input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
x183 * x226 + x223
A[3, 0] = -r_xx * x43 + r_yx * x46 - r_zx * x42 + x113 + x131 * x187 + x188 * x92
A[3, 1] = -r_xx * x71 + r_yx * x74 - r_zx * x67 + x131 * x195 + x196 * x92 + x203
A[3, 2] = -r_xx * x87 + r_yx * x88 - r_zx * x86 + x131 * x199 + x200 * x92 + x221
A[3, 3] = -r_xx * x132 + r_yx * x134 - r_zx * x130 + self.p.m1 * x233 + self.p.m1 * x234 + self.p.m2 * x233 + self.p.m2 * x234 + self.p.m3 * \
x104**2 + self.p.m3 * x112**2 + self.p.m3 * x123**2 + self.p.theta2 + self.p.theta3x * x227 + x131 * x204 + x205 * x92 + x228 * x230 + x228 * x232
A[3, 4] = -r_xx * x155 + r_yx * x157 - r_zx * x153 + x131 * x210 + x211 * x92 + x241
A[3, 5] = -r_xx * x173 + r_yx * x175 - r_zx * x171 + x131 * x213 + x214 * x92 + x244
A[3, 6] = -r_xx * x181 + r_yx * x182 - r_zx * x180 + self.p.theta3x * \
x95 + x103 * x126 + x111 * x128 + x123 * x245 + x131 * x216 - x215 * x92
A[3, 7] = -r_xx * x185 + r_yx * x186 - r_zx * x184 - x126 * x140 - \
x128 * x142 + x131 * x218 - x217 * x92 + x236 + x238 + x240
A[4, 0] = -r_xy * x43 + r_yy * x46 - r_zy * x42 + x138 * x188 + x144 + x154 * x187
A[4, 1] = -r_xy * x71 + r_yy * x74 - r_zy * x67 + x138 * x196 + x154 * x195 + x209
A[4, 2] = -r_xy * x87 + r_yy * x88 - r_zy * x86 + x138 * x200 + x154 * x199 + x224
A[4, 3] = -r_xy * x132 + r_yy * x134 - r_zy * x130 + x138 * x205 + x154 * x204 + x241
A[4, 4] = -r_xy * x155 + r_yy * x157 - r_zy * x153 + self.p.m1 * x248 + self.p.m1 * x249 + self.p.m2 * x248 + \
self.p.m2 * x249 + self.p.m3 * x141**2 + self.p.m3 * x143**2 + self.p.theta2 + x138 * x211 + x154 * x210 + x247
A[4, 5] = -r_xy * x173 + r_yy * x175 - r_zy * x171 + x138 * x214 + x154 * x213 + x253
A[4, 6] = -r_xy * x181 + r_yy * x182 - r_zy * x180 - x100 * x117 * \
x121 * x246 + x103 * x149 + x111 * x151 - x138 * x215 + x154 * x216
A[4, 7] = -r_xy * x185 + r_yy * x186 - r_zy * x184 - x138 * \
x217 - x140 * x149 - x142 * x151 + x154 * x218 + x247
A[5, 0] = -r_xz * x43 + r_yz * x46 - r_zz * x42 + x161 * x188 + x164 + x172 * x187
A[5, 1] = -r_xz * x71 + r_yz * x74 - r_zz * x67 + x161 * x196 + x172 * x195 + x212
A[5, 2] = -r_xz * x87 + r_yz * x88 - r_zz * x86 + x161 * x200 + x172 * x199 + x225
A[5, 3] = -r_xz * x132 + r_yz * x134 - r_zz * x130 + x161 * x205 + x172 * x204 + x244
A[5, 4] = -r_xz * x155 + r_yz * x157 - r_zz * x153 + x161 * x211 + x172 * x210 + x253
A[5, 5] = -r_xz * x173 + r_yz * x175 - r_zz * x171 + self.p.m1 * x254 + self.p.m1 * x255 + self.p.m2 * x254 + self.p.m2 * x255 + self.p.m3 * \
x162**2 + self.p.m3 * x163**2 + self.p.m3 * x165**2 + self.p.theta2 + self.p.theta3x * x228 + x161 * x214 + x172 * x213 + x227 * x230 + x227 * x232
A[5, 6] = -r_xz * x181 + r_yz * x182 - r_zz * x180 + x103 * x167 + \
x111 * x169 - x161 * x215 + x165 * x245 + x172 * x216 - x242
A[5, 7] = -r_xz * x185 + r_yz * x186 - r_zz * x184 - x140 * x167 - \
x142 * x169 - x161 * x217 + x172 * x218 + x250 + x251 + x252
A[6, 0] = 0
A[6, 1] = 0
A[6, 2] = 0
A[6, 3] = 0
A[6, 4] = 0
A[6, 5] = 0
A[6, 6] = 1
A[6, 7] = 0
A[7, 0] = 0
A[7, 1] = 0
A[7, 2] = 0
A[7, 3] = 0
A[7, 4] = 0
A[7, 5] = 0
A[7, 6] = 0
A[7, 7] = 1
b[0] = -x1 * x347 + x14 * x280 + x14 * x286 + x14 * x336 + x20 * x299 + x20 * \
x303 + x20 * x338 - x340 + x350 * x45 + x59 * (self.p.g * self.p.m1 + x331)
b[1] = -x280 * x49 - x286 * x54 - x299 * x51 - x303 * x56 + x306 * \
x60 + x330 * x60 - x336 * x54 - x338 * x56 - x351 * x69 - x352 * x51
b[2] = -x222 * x350 - x299 * x52 - x303 * x78 + x306 * x80 + x330 * x80 - x338 * x78
b[3] = r_xx * x346 - r_yx * x349 + r_zx * x340 - x104 * x336 - x112 * x338 - x123 * x330 - x131 * x351 - \
x280 * x91 - x286 * x91 - x299 * x92 - x303 * x92 - x352 * x92 - x354 * x95 - x357 * x93 - x360 * x93
b[4] = r_xy * x346 - r_yy * x349 + r_zy * x340 - x100 * x359 - x137 * x280 - x137 * x286 - x138 * \
x299 - x138 * x303 - x138 * x352 - x141 * x336 - x143 * x338 + x145 * x330 - x154 * x351 + x356 * x94
b[5] = r_xz * x346 - r_yz * x349 + r_zz * x340 - x160 * x280 - x160 * x286 - x161 * x299 - x161 * x303 - \
x161 * x352 - x162 * x336 - x163 * x338 - x165 * x330 - x172 * x351 + x354 * x93 - x357 * x95 - x360 * x95
b[6] = x361 * (omega_x_cmd - phi_x_dot)
b[7] = x361 * (omega_y_cmd - phi_y_dot)
omega_dot = np.linalg.solve(A, b)
return omega_dot
def _compute_r_OSi(self, state):
"""computes center of mass locations of all bodies
args:
state (ModelState): current state
Returns: list of x/y/z coordinates of center of mass of lower ball, upper ball, and lever arm.
"""
[r_xx, r_xy, r_xz, r_yx, r_yy, r_yz, r_zx, r_zy, r_zz] = state.R_IB2.reshape(9)
[pos_x, pos_y] = state.pos
[psi_x, psi_y] = state.psi
[phi_x, phi_y] = state.phi
r_OS1 = np.array([pos_x, pos_y, self.p.r1])
r_S1S2 = np.zeros(3)
r_S2S3 = np.zeros(3)
x0 = self.p.r1 + self.p.r2
x1 = x0 * cos(psi_x)
x2 = sin(phi_x)
x3 = cos(phi_x)
x4 = x3 * sin(phi_y)
x5 = x3 * cos(phi_y)
r_S1S2[0] = x1 * sin(psi_y)
r_S1S2[1] = -x0 * sin(psi_x)
r_S1S2[2] = x1 * cos(psi_y)
r_S2S3[0] = -self.p.l * (r_xx * x4 - r_xy * x2 + r_xz * x5)
r_S2S3[1] = -self.p.l * (r_yx * x4 - r_yy * x2 | |
the maximum possible size of a waveform in microseconds.
...
micros = pi.wave_get_max_micros()
...
"""
return _u2i(_pigpio_command(self.sl, _PI_CMD_WVSM, 2, 0))
def wave_get_pulses(self):
"""
Returns the length in pulses of the current waveform.
...
pulses = pi.wave_get_pulses()
...
"""
return _u2i(_pigpio_command(self.sl, _PI_CMD_WVSP, 0, 0))
def wave_get_max_pulses(self):
"""
Returns the maximum possible size of a waveform in pulses.
...
pulses = pi.wave_get_max_pulses()
...
"""
return _u2i(_pigpio_command(self.sl, _PI_CMD_WVSP, 2, 0))
def wave_get_cbs(self):
"""
Returns the length in DMA control blocks of the current
waveform.
...
cbs = pi.wave_get_cbs()
...
"""
return _u2i(_pigpio_command(self.sl, _PI_CMD_WVSC, 0, 0))
def wave_get_max_cbs(self):
"""
Returns the maximum possible size of a waveform in DMA
control blocks.
...
cbs = pi.wave_get_max_cbs()
...
"""
return _u2i(_pigpio_command(self.sl, _PI_CMD_WVSC, 2, 0))
def i2c_open(self, i2c_bus, i2c_address, i2c_flags=0):
"""
Returns a handle (>=0) for the device at the I2C bus address.
i2c_bus:= >=0.
i2c_address:= 0-0x7F.
i2c_flags:= 0, no flags are currently defined.
Normally you would only use the [*i2c_**] functions if
you are or will be connecting to the Pi over a network. If
you will always run on the local Pi use the standard SMBus
module instead.
Physically buses 0 and 1 are available on the Pi. Higher
numbered buses will be available if a kernel supported bus
multiplexor is being used.
For the SMBus commands the low level transactions are shown
at the end of the function description. The following
abbreviations are used:
. .
S (1 bit) : Start bit
P (1 bit) : Stop bit
Rd/Wr (1 bit) : Read/Write bit. Rd equals 1, Wr equals 0.
A, NA (1 bit) : Accept and not accept bit.
Addr (7 bits): I2C 7 bit address.
reg (8 bits): Command byte, which often selects a register.
Data (8 bits): A data byte.
Count (8 bits): A byte defining the length of a block operation.
[..]: Data sent by the device.
. .
...
h = pi.i2c_open(1, 0x53) # open device at address 0x53 on bus 1
...
"""
# I p1 i2c_bus
# I p2 i2c_addr
# I p3 4
## extension ##
# I i2c_flags
extents = [struct.pack("I", i2c_flags)]
return _u2i(_pigpio_command_ext(
self.sl, _PI_CMD_I2CO, i2c_bus, i2c_address, 4, extents))
def i2c_close(self, handle):
"""
Closes the I2C device associated with handle.
handle:= >=0 (as returned by a prior call to [*i2c_open*]).
...
pi.i2c_close(h)
...
"""
return _u2i(_pigpio_command(self.sl, _PI_CMD_I2CC, handle, 0))
def i2c_write_quick(self, handle, bit):
"""
Sends a single bit to the device associated with handle.
handle:= >=0 (as returned by a prior call to [*i2c_open*]).
bit:= 0 or 1, the value to write.
SMBus 2.0 5.5.1 - Quick command.
. .
S Addr bit [A] P
. .
...
pi.i2c_write_quick(0, 1) # send 1 to device 0
pi.i2c_write_quick(3, 0) # send 0 to device 3
...
"""
return _u2i(_pigpio_command(self.sl, _PI_CMD_I2CWQ, handle, bit))
def i2c_write_byte(self, handle, byte_val):
"""
Sends a single byte to the device associated with handle.
handle:= >=0 (as returned by a prior call to [*i2c_open*]).
byte_val:= 0-255, the value to write.
SMBus 2.0 5.5.2 - Send byte.
. .
S Addr Wr [A] byte_val [A] P
. .
...
pi.i2c_write_byte(1, 17) # send byte 17 to device 1
pi.i2c_write_byte(2, 0x23) # send byte 0x23 to device 2
...
"""
return _u2i(
_pigpio_command(self.sl, _PI_CMD_I2CWS, handle, byte_val))
def i2c_read_byte(self, handle):
"""
Reads a single byte from the device associated with handle.
handle:= >=0 (as returned by a prior call to [*i2c_open*]).
SMBus 2.0 5.5.3 - Receive byte.
. .
S Addr Rd [A] [Data] NA P
. .
...
b = pi.i2c_read_byte(2) # read a byte from device 2
...
"""
return _u2i(_pigpio_command(self.sl, _PI_CMD_I2CRS, handle, 0))
def i2c_write_byte_data(self, handle, reg, byte_val):
"""
Writes a single byte to the specified register of the device
associated with handle.
handle:= >=0 (as returned by a prior call to [*i2c_open*]).
reg:= >=0, the device register.
byte_val:= 0-255, the value to write.
SMBus 2.0 5.5.4 - Write byte.
. .
S Addr Wr [A] reg [A] byte_val [A] P
. .
...
# send byte 0xC5 to reg 2 of device 1
pi.i2c_write_byte_data(1, 2, 0xC5)
# send byte 9 to reg 4 of device 2
pi.i2c_write_byte_data(2, 4, 9)
...
"""
# I p1 handle
# I p2 reg
# I p3 4
## extension ##
# I byte_val
extents = [struct.pack("I", byte_val)]
return _u2i(_pigpio_command_ext(
self.sl, _PI_CMD_I2CWB, handle, reg, 4, extents))
def i2c_write_word_data(self, handle, reg, word_val):
"""
Writes a single 16 bit word to the specified register of the
device associated with handle.
handle:= >=0 (as returned by a prior call to [*i2c_open*]).
reg:= >=0, the device register.
word_val:= 0-65535, the value to write.
SMBus 2.0 5.5.4 - Write word.
. .
S Addr Wr [A] reg [A] word_val_Low [A] word_val_High [A] P
. .
...
# send word 0xA0C5 to reg 5 of device 4
pi.i2c_write_word_data(4, 5, 0xA0C5)
# send word 2 to reg 2 of device 5
pi.i2c_write_word_data(5, 2, 23)
...
"""
# I p1 handle
# I p2 reg
# I p3 4
## extension ##
# I word_val
extents = [struct.pack("I", word_val)]
return _u2i(_pigpio_command_ext(
self.sl, _PI_CMD_I2CWW, handle, reg, 4, extents))
def i2c_read_byte_data(self, handle, reg):
"""
Reads a single byte from the specified register of the device
associated with handle.
handle:= >=0 (as returned by a prior call to [*i2c_open*]).
reg:= >=0, the device register.
SMBus 2.0 5.5.5 - Read byte.
. .
S Addr Wr [A] reg [A] S Addr Rd [A] [Data] NA P
. .
...
# read byte from reg 17 of device 2
b = pi.i2c_read_byte_data(2, 17)
# read byte from reg 1 of device 0
b = pi.i2c_read_byte_data(0, 1)
...
"""
return _u2i(_pigpio_command(self.sl, _PI_CMD_I2CRB, handle, reg))
def i2c_read_word_data(self, handle, reg):
"""
Reads a single 16 bit word from the specified register of the
device associated with handle.
handle:= >=0 (as returned by a prior call to [*i2c_open*]).
reg:= >=0, the device register.
SMBus 2.0 5.5.5 - Read word.
. .
S Addr Wr [A] reg [A] S Addr Rd [A] [DataLow] A [DataHigh] NA P
. .
...
# read word from reg 2 of device 3
w = pi.i2c_read_word_data(3, 2)
# read word from reg 7 of device 2
w = pi.i2c_read_word_data(2, 7)
...
"""
return _u2i(_pigpio_command(self.sl, _PI_CMD_I2CRW, handle, reg))
def i2c_process_call(self, handle, reg, word_val):
"""
Writes 16 bits of data to the specified register of the device
associated with handle and reads 16 bits of data in return.
handle:= >=0 (as returned by a prior call to [*i2c_open*]).
reg:= >=0, the device register.
word_val:= 0-65535, the value to write.
SMBus 2.0 5.5.6 - Process call.
. .
S Addr Wr [A] reg [A] word_val_Low [A] word_val_High [A]
S Addr Rd [A] [DataLow] A [DataHigh] NA P
. .
...
r = pi.i2c_process_call(h, 4, 0x1231)
r = pi.i2c_process_call(h, 6, 0)
...
"""
# I p1 handle
# I p2 reg
# I p3 4
## extension ##
# I word_val
extents = [struct.pack("I", word_val)]
return _u2i(_pigpio_command_ext(
self.sl, _PI_CMD_I2CPC, handle, reg, 4, extents))
def i2c_write_block_data(self, handle, reg, data):
"""
Writes up to 32 bytes to the specified register of the device
associated with handle.
handle:= >=0 (as returned by a prior call to [*i2c_open*]).
reg:= >=0, the device register.
data:= the bytes to write.
SMBus 2.0 5.5.7 - Block write.
. .
S Addr Wr [A] reg [A] len(data) [A] data0 [A] data1 [A] ... [A]
datan [A] P
. .
...
pi.i2c_write_block_data(4, 5, b'hello')
pi.i2c_write_block_data(4, 5, "data bytes")
pi.i2c_write_block_data(5, 0, b'\\x00\\x01\\x22')
pi.i2c_write_block_data(6, 2, [0, 1, 0x22])
...
"""
# I p1 handle
# I p2 reg
# I p3 len
## extension ##
# s len data bytes
if len(data):
return _u2i(_pigpio_command_ext(
self.sl, _PI_CMD_I2CWK, handle, reg, len(data), [data]))
else:
return 0
def i2c_read_block_data(self, handle, reg):
"""
Reads a block of up to 32 bytes from the specified register of
the | |
normalization to apply, if any of ['LCN'], 'LCN-',
'ZCA', or '-1:1'.
split -- tuple
The (train, valid, test) split fractions [(0.6, 0.2, 0.2)].
num_files -- int
Number of files to load for each person.
'''
ds = None
if dataset == 'olivetti':
from sklearn.datasets import fetch_olivetti_faces
ds = fetch_olivetti_faces()
# TODO: Apply processing options to olivetti
elif dataset == 'lfw':
ds = Datasets(
crop_style=crop_style,
crop_factor=crop_factor,
resolution=resolution,
n_files_per_person=n_files_per_person,
n_min_files_per_person=(n_files_per_person / 2),
b_convert_to_grayscale=b_convert_to_grayscale
)
ds = ds.get_parsed_dataset(dataset=dataset, path_to_data=path_to_data)
elif dataset.__class__ is dict and 'target' in dataset.keys() and 'images' in dataset.keys():
ds = dataset
else:
raise ValueError(
'Dataset should be either olivetti, lfw, or a dict defining images and target from get_parsed_dataset')
# Split up the dataset into unique targets for train/test,
# making sure not to repeat any subjects between train/test
# Should get 32 subjects train, 8 test, with a 0.8 split
y = ds['target']
total = len(np.unique(y))
train_split = int(total * split[0])
valid_split = train_split + int(total * split[1])
test_split = total - int(total * split[2])
unique_train_labels = np.unique(y)[:train_split]
unique_valid_labels = np.unique(y)[train_split:valid_split]
unique_test_labels = np.unique(y)[-test_split:]
# X = (400, 1, 64, 64); y = (400,), 40 subjects w/ 10 examples each of 64
# x 64 pixels
if b_convert_to_grayscale:
X = np.concatenate([img[np.newaxis, np.newaxis, ...]
for img in ds['images']], axis=0)
else:
X = np.concatenate([img[np.newaxis, ...]
for img in ds['images']], axis=0)
print(X.shape)
if normalization == 'LCN':
lcn = LCN(sigma=round(0.0625 * X.shape[2]), subtractive=False)
lcn.fit(X[:len(y) * split[0], ...])
X = lcn.transform(X)
elif normalization == 'LCN-':
lcn = LCN(sigma=round(0.0625 * X.shape[2]), subtractive=True)
lcn.fit(X[:len(y) * split[0], ...])
X = lcn.transform(X)
elif normalization == 'ZCA':
zca = ZCA(bias=0.1)
zca.fit(X[:len(y) * split[0], ...])
X = zca.transform(X)
elif normalization == '-1:1':
for idx in range(len(X)):
X[idx, ...] = (X[idx, ...] - np.min(X[idx, ...])) / \
(np.max(X[idx, ...]) - np.min(X[idx, ...])) * 2.0 - 1.0
if b_load_idxs_only:
# Make pairs of actual and imposter faces, returning the indexes to
# create them
print('train')
X_train_matched, y_train_matched, X_train_unmatched, y_train_unmatched = make_image_pair_idxs(
y, unique_train_labels)
print('valid')
X_valid_matched, y_valid_matched, X_valid_unmatched, y_valid_unmatched = make_image_pair_idxs(
y, unique_valid_labels)
print('test')
X_test_matched, y_test_matched, X_test_unmatched, y_test_unmatched = make_image_pair_idxs(
y, unique_test_labels)
return {
'X': lasagne.utils.floatX(X),
'y': y.astype(np.int32),
'X_train_matched_idxs': X_train_matched.astype(np.int32),
'y_train_matched_idxs': y_train_matched.astype(np.int32),
'X_train_unmatched_idxs': X_train_unmatched.astype(np.int32),
'y_train_unmatched_idxs': y_train_unmatched.astype(np.int32),
'X_valid_matched_idxs': X_valid_matched.astype(np.int32),
'y_valid_matched_idxs': y_valid_matched.astype(np.int32),
'X_valid_unmatched_idxs': X_valid_unmatched.astype(np.int32),
'y_valid_unmatched_idxs': y_valid_unmatched.astype(np.int32),
'X_test_matched_idxs': X_test_matched.astype(np.int32),
'y_test_matched_idxs': y_test_matched.astype(np.int32),
'X_test_unmatched_idxs': X_test_unmatched.astype(np.int32),
'y_test_unmatched_idxs': y_test_unmatched.astype(np.int32)
}
else:
# Make pairs of actual and imposter faces
X_train_matched, y_train_matched, X_train_unmatched, y_train_unmatched = make_image_pairs(
X, y, unique_train_labels)
X_valid_matched, y_valid_matched, X_valid_unmatched, y_valid_unmatched = make_image_pairs(
X, y, unique_valid_labels)
X_test_matched, y_test_matched, X_test_unmatched, y_test_unmatched = make_image_pairs(
X, y, unique_test_labels)
return {
'X_train_matched': lasagne.utils.floatX(X_train_matched),
'y_train_matched': y_train_matched.astype(np.int32),
'X_train_unmatched': lasagne.utils.floatX(X_train_unmatched),
'y_train_unmatched': y_train_unmatched.astype(np.int32),
'X_valid_matched': lasagne.utils.floatX(X_valid_matched),
'y_valid_matched': y_valid_matched.astype(np.int32),
'X_valid_unmatched': lasagne.utils.floatX(X_valid_unmatched),
'y_valid_unmatched': y_valid_unmatched.astype(np.int32),
'X_test_matched': lasagne.utils.floatX(X_test_matched),
'y_test_matched': y_test_matched.astype(np.int32),
'X_test_unmatched': lasagne.utils.floatX(X_test_unmatched),
'y_test_unmatched': y_test_unmatched.astype(np.int32)
}
def interleave_dataset(X_split, y_split):
'''Take paired observations in the channel dimension and convert them
to alternating batches
----------------------
N x 2 x W x H --> 2*N x 1 x W x H
Args
----
X_split : TYPE
Description
y_split : TYPE
Description
Deleted Parameters
------------------
X_split (TYPE) : Description
y_split (TYPE) : Description
'''
# TODO: account for color images
n_batch, n_channels, n_height, n_width = X_split.shape
n_obs = n_batch * n_channels
n_feats = n_height * n_width
X_interleaved = np.empty((n_obs, n_feats), dtype=theano.config.floatX)
y_interleaved = np.empty((n_obs,), dtype=np.int32)
X_interleaved[0::2] = X_split[:, 0, ...].reshape(n_batch, n_feats)
X_interleaved[1::2] = X_split[:, 1, ...].reshape(n_batch, n_feats)
y_interleaved[0::2] = y_split.copy()
y_interleaved[1::2] = y_split.copy()
return X_interleaved.reshape(n_obs, 1, n_height, n_width), y_interleaved
def shuffle_dataset(X, y):
'''Randomly permute the order of the observations and their associated labels
Parameters
----------
X : TYPE
Description
y : TYPE
Description
'''
indices = np.random.permutation(len(y))
return X[indices, ...], y[indices, ...]
def get_balanced_shuffled_dataset(X_matched, y_matched, X_unmatched, y_unmatched):
'''Shuffles dataset, producing training data with similar number of matched
and unmatched observations. There are often much more unmatched
observations, so this method is used to sample from the larger set of
unmatched observations, while using as many matched observations as
there are, but randomly permuting their order.
Parameters
----------
X_matched : TYPE
Description
y_matched : TYPE
Description
X_unmatched : TYPE
Description
y_unmatched : TYPE
Description
'''
npairs = X_matched.shape[0]
# Shuffle order
X_matched, y_matched = shuffle_dataset(X_matched, y_matched)
X_unmatched, y_unmatched = shuffle_dataset(X_unmatched, y_unmatched)
# Sample same number of unmatched data
X_train = np.concatenate((X_matched, X_unmatched[:npairs]))
y_train = np.concatenate((y_matched, y_unmatched[:npairs]))
# Shuffle again so that batches aren't all matched/unmatched
X_train, y_train = shuffle_dataset(X_train, y_train)
return X_train, y_train
def make_shared(X, dtype):
'''Convert `X` to a theano shared variable with the given type.
Parameters
----------
X : TYPE
Description
dtype : TYPE
Description
'''
return theano.shared(np.asarray(X, dtype=dtype), borrow=True)
def generate_new_dataset_batch(X_matched, y_matched, X_unmatched, y_unmatched, batch_size):
'''Generator which loops through a randomly permuted ordering of the dataset.
This method requires the generated pairs of the data, which is a much
higher number of observations than the original dataset.
If you cannot fit the entire dataset into memory, use the slower method:
`generate_new_dataset_batch_from_idxs`
Returns X_train, y_train
Parameters
----------
X_matched : TYPE
Description
y_matched : TYPE
Description
X_unmatched : TYPE
Description
y_unmatched : TYPE
Description
batch_size : TYPE
Description
'''
# Generate a new shuffled, balanced dataset
X_train, y_train = get_balanced_shuffled_dataset(
X_matched, y_matched, X_unmatched, y_unmatched)
# Interleave pairs into sequential batches which will be used in the
# distance/loss functions appropriately
X_train, y_train = interleave_dataset(X_train, y_train)
nobs = len(X_train)
# Make sure it is even
batch_size = batch_size + (batch_size % 2)
# Loop until we're out of observations
batch_start = 0
batch_end = batch_size
while batch_start < np.min((nobs, (nobs - batch_size))):
yield X_train[batch_start:batch_end, ...], y_train[batch_start:batch_end, ...]
batch_start = batch_end
batch_end = batch_start + batch_size
def generate_new_dataset_batch_from_idxs(
X, y, X_matched_idxs, y_matched_idxs,
X_unmatched_idxs, y_unmatched_idxs, batch_size):
'''Generator which loops through a randomly permuted ordering of the dataset.
This method requires the generated pairs of the data as indexes.
Returns X_train, y_train
Parameters
----------
X : TYPE
Description
y : TYPE
Description
X_matched_idxs : TYPE
Description
y_matched_idxs : TYPE
Description
X_unmatched_idxs : TYPE
Description
y_unmatched_idxs : TYPE
Description
batch_size : TYPE
Description
'''
# Generate a new shuffled, balanced dataset
X_train, y_train = get_balanced_shuffled_dataset(
X_matched_idxs, y_matched_idxs, X_unmatched_idxs, y_unmatched_idxs)
# Interleave pairs into sequential batches which will be used in the distance/loss functions appropriately
# TODO: account for color images
X_train, y_train = interleave_dataset(
X_train[..., np.newaxis, np.newaxis], y_train)
X_train = np.squeeze(X_train).astype(np.int32)
y_train = np.squeeze(y_train).astype(np.int32)
nobs = len(X_train)
# Make sure it is even
batch_size = batch_size + (batch_size % 2)
# Loop until we're out of observations
batch_start = 0
batch_end = batch_size
while batch_start < np.min((nobs, (nobs - batch_size))):
yield X[X_train[batch_start:batch_end, ...], ...], y_train[batch_start:batch_end, ...]
batch_start = batch_end
batch_end = batch_start + batch_size
class SiameseNetPredictor(object):
'''Loads a pre-trained Deep Net for Face Verification which uses a
Siamese Net distance function + LogisticRegression on the final feature
layer. Requires the pretrained model in the directory results
Attributes
----------
clf : TYPE
Description
fn : TYPE
Description
lcn : TYPE
Description
result : TYPE
Description
'''
def __init__(self, images, filename='./lfw.pkl'):
"""Summary"""
# Load the pretrained model
self.result = pickle.load(open(filename, 'rb'))
print(self.result['params'])
self.grayscale = self.result['params']['b_convert_to_grayscale']
self.normalization = self.result['params']['normalization']
self.net = ConvSiameseNet(
input_channels=(1
if self.grayscale
else 3),
input_width=self.result['params']['resolution'][0],
input_height=self.result['params']['resolution'][1],
n_out=self.result['params']['n_features'],
distance_fn=self.result['params']['distance_fn'],
nonlinearity=self.result['params']['nonlinearity'])
if self.result['params']['model_type'] == 'custom':
self.net.use_custom_model()
elif self.result['params']['model_type'] == 'hani':
self.net.use_hani_model()
elif self.result['params']['model_type'] == 'chopra':
self.net.use_chopra_model()
else:
print('Unrecognized model!')
self.net.set_from_parameters(
pickle.loads(self.result['model_parameters']))
pred = lasagne.layers.get_output(self.net.model, self.net.x,
deterministic=True)
# Compile
self.fn = theano.function([self.net.x], [pred])
# We'll hash functions for every layer if/when user asks for them
self.fns = {}
# Train final regressor on entire dataset
# (cheating, but...¯\_(ツ)_/¯)
Xs = self.result['prediction']['X']
ys = self.result['prediction']['y']
Xs_L1 = np.abs(Xs[:, :self.net.n_out] - Xs[:, self.net.n_out:])
self.clf = LogisticRegression()
self.clf.fit(Xs_L1, ys)
# Load normalization kernel
# (previously created using LCN on the training set)
# self.lcn = pickle.loads(self.result['LCN'])
if self.grayscale:
X = np.concatenate([img[np.newaxis, np.newaxis, ...]
for img in images], axis=0)
else:
X = np.concatenate([img[np.newaxis, ...]
for img in images], axis=0)
print(X.shape)
if self.normalization == 'LCN':
lcn = LCN(
sigma=round(0.0625 * self.result['params']['resolution'][0]),
subtractive=False)
lcn.fit(X)
self.norm = lcn
elif self.normalization == 'LCN-':
lcn = LCN(
sigma=round(0.0625 * self.result['params']['resolution'][0]),
subtractive=True)
lcn.fit(X)
self.norm = lcn
elif self.normalization == 'ZCA':
zca = | |
<reponame>Michal-Gagala/sympy
"""Compatibility interface between dense and sparse polys. """
from sympy.polys.densearith import dup_add_term
from sympy.polys.densearith import dmp_add_term
from sympy.polys.densearith import dup_sub_term
from sympy.polys.densearith import dmp_sub_term
from sympy.polys.densearith import dup_mul_term
from sympy.polys.densearith import dmp_mul_term
from sympy.polys.densearith import dup_add_ground
from sympy.polys.densearith import dmp_add_ground
from sympy.polys.densearith import dup_sub_ground
from sympy.polys.densearith import dmp_sub_ground
from sympy.polys.densearith import dup_mul_ground
from sympy.polys.densearith import dmp_mul_ground
from sympy.polys.densearith import dup_quo_ground
from sympy.polys.densearith import dmp_quo_ground
from sympy.polys.densearith import dup_exquo_ground
from sympy.polys.densearith import dmp_exquo_ground
from sympy.polys.densearith import dup_lshift
from sympy.polys.densearith import dup_rshift
from sympy.polys.densearith import dup_abs
from sympy.polys.densearith import dmp_abs
from sympy.polys.densearith import dup_neg
from sympy.polys.densearith import dmp_neg
from sympy.polys.densearith import dup_add
from sympy.polys.densearith import dmp_add
from sympy.polys.densearith import dup_sub
from sympy.polys.densearith import dmp_sub
from sympy.polys.densearith import dup_add_mul
from sympy.polys.densearith import dmp_add_mul
from sympy.polys.densearith import dup_sub_mul
from sympy.polys.densearith import dmp_sub_mul
from sympy.polys.densearith import dup_mul
from sympy.polys.densearith import dmp_mul
from sympy.polys.densearith import dup_sqr
from sympy.polys.densearith import dmp_sqr
from sympy.polys.densearith import dup_pow
from sympy.polys.densearith import dmp_pow
from sympy.polys.densearith import dup_pdiv
from sympy.polys.densearith import dup_prem
from sympy.polys.densearith import dup_pquo
from sympy.polys.densearith import dup_pexquo
from sympy.polys.densearith import dmp_pdiv
from sympy.polys.densearith import dmp_prem
from sympy.polys.densearith import dmp_pquo
from sympy.polys.densearith import dmp_pexquo
from sympy.polys.densearith import dup_rr_div
from sympy.polys.densearith import dmp_rr_div
from sympy.polys.densearith import dup_ff_div
from sympy.polys.densearith import dmp_ff_div
from sympy.polys.densearith import dup_div
from sympy.polys.densearith import dup_rem
from sympy.polys.densearith import dup_quo
from sympy.polys.densearith import dup_exquo
from sympy.polys.densearith import dmp_div
from sympy.polys.densearith import dmp_rem
from sympy.polys.densearith import dmp_quo
from sympy.polys.densearith import dmp_exquo
from sympy.polys.densearith import dup_max_norm
from sympy.polys.densearith import dmp_max_norm
from sympy.polys.densearith import dup_l1_norm
from sympy.polys.densearith import dmp_l1_norm
from sympy.polys.densearith import dup_l2_norm_squared
from sympy.polys.densearith import dmp_l2_norm_squared
from sympy.polys.densearith import dup_expand
from sympy.polys.densearith import dmp_expand
from sympy.polys.densebasic import dup_LC
from sympy.polys.densebasic import dmp_LC
from sympy.polys.densebasic import dup_TC
from sympy.polys.densebasic import dmp_TC
from sympy.polys.densebasic import dmp_ground_LC
from sympy.polys.densebasic import dmp_ground_TC
from sympy.polys.densebasic import dup_degree
from sympy.polys.densebasic import dmp_degree
from sympy.polys.densebasic import dmp_degree_in
from sympy.polys.densebasic import dmp_to_dict
from sympy.polys.densetools import dup_integrate
from sympy.polys.densetools import dmp_integrate
from sympy.polys.densetools import dmp_integrate_in
from sympy.polys.densetools import dup_diff
from sympy.polys.densetools import dmp_diff
from sympy.polys.densetools import dmp_diff_in
from sympy.polys.densetools import dup_eval
from sympy.polys.densetools import dmp_eval
from sympy.polys.densetools import dmp_eval_in
from sympy.polys.densetools import dmp_eval_tail
from sympy.polys.densetools import dmp_diff_eval_in
from sympy.polys.densetools import dup_trunc
from sympy.polys.densetools import dmp_trunc
from sympy.polys.densetools import dmp_ground_trunc
from sympy.polys.densetools import dup_monic
from sympy.polys.densetools import dmp_ground_monic
from sympy.polys.densetools import dup_content
from sympy.polys.densetools import dmp_ground_content
from sympy.polys.densetools import dup_primitive
from sympy.polys.densetools import dmp_ground_primitive
from sympy.polys.densetools import dup_extract
from sympy.polys.densetools import dmp_ground_extract
from sympy.polys.densetools import dup_real_imag
from sympy.polys.densetools import dup_mirror
from sympy.polys.densetools import dup_scale
from sympy.polys.densetools import dup_shift
from sympy.polys.densetools import dup_transform
from sympy.polys.densetools import dup_compose
from sympy.polys.densetools import dmp_compose
from sympy.polys.densetools import dup_decompose
from sympy.polys.densetools import dmp_lift
from sympy.polys.densetools import dup_sign_variations
from sympy.polys.densetools import dup_clear_denoms
from sympy.polys.densetools import dmp_clear_denoms
from sympy.polys.densetools import dup_revert
from sympy.polys.euclidtools import dup_half_gcdex
from sympy.polys.euclidtools import dmp_half_gcdex
from sympy.polys.euclidtools import dup_gcdex
from sympy.polys.euclidtools import dmp_gcdex
from sympy.polys.euclidtools import dup_invert
from sympy.polys.euclidtools import dmp_invert
from sympy.polys.euclidtools import dup_euclidean_prs
from sympy.polys.euclidtools import dmp_euclidean_prs
from sympy.polys.euclidtools import dup_primitive_prs
from sympy.polys.euclidtools import dmp_primitive_prs
from sympy.polys.euclidtools import dup_inner_subresultants
from sympy.polys.euclidtools import dup_subresultants
from sympy.polys.euclidtools import dup_prs_resultant
from sympy.polys.euclidtools import dup_resultant
from sympy.polys.euclidtools import dmp_inner_subresultants
from sympy.polys.euclidtools import dmp_subresultants
from sympy.polys.euclidtools import dmp_prs_resultant
from sympy.polys.euclidtools import dmp_zz_modular_resultant
from sympy.polys.euclidtools import dmp_zz_collins_resultant
from sympy.polys.euclidtools import dmp_qq_collins_resultant
from sympy.polys.euclidtools import dmp_resultant
from sympy.polys.euclidtools import dup_discriminant
from sympy.polys.euclidtools import dmp_discriminant
from sympy.polys.euclidtools import dup_rr_prs_gcd
from sympy.polys.euclidtools import dup_ff_prs_gcd
from sympy.polys.euclidtools import dmp_rr_prs_gcd
from sympy.polys.euclidtools import dmp_ff_prs_gcd
from sympy.polys.euclidtools import dup_zz_heu_gcd
from sympy.polys.euclidtools import dmp_zz_heu_gcd
from sympy.polys.euclidtools import dup_qq_heu_gcd
from sympy.polys.euclidtools import dmp_qq_heu_gcd
from sympy.polys.euclidtools import dup_inner_gcd
from sympy.polys.euclidtools import dmp_inner_gcd
from sympy.polys.euclidtools import dup_gcd
from sympy.polys.euclidtools import dmp_gcd
from sympy.polys.euclidtools import dup_rr_lcm
from sympy.polys.euclidtools import dup_ff_lcm
from sympy.polys.euclidtools import dup_lcm
from sympy.polys.euclidtools import dmp_rr_lcm
from sympy.polys.euclidtools import dmp_ff_lcm
from sympy.polys.euclidtools import dmp_lcm
from sympy.polys.euclidtools import dmp_content
from sympy.polys.euclidtools import dmp_primitive
from sympy.polys.euclidtools import dup_cancel
from sympy.polys.euclidtools import dmp_cancel
from sympy.polys.factortools import dup_trial_division
from sympy.polys.factortools import dmp_trial_division
from sympy.polys.factortools import dup_zz_mignotte_bound
from sympy.polys.factortools import dmp_zz_mignotte_bound
from sympy.polys.factortools import dup_zz_hensel_step
from sympy.polys.factortools import dup_zz_hensel_lift
from sympy.polys.factortools import dup_zz_zassenhaus
from sympy.polys.factortools import dup_zz_irreducible_p
from sympy.polys.factortools import dup_cyclotomic_p
from sympy.polys.factortools import dup_zz_cyclotomic_poly
from sympy.polys.factortools import dup_zz_cyclotomic_factor
from sympy.polys.factortools import dup_zz_factor_sqf
from sympy.polys.factortools import dup_zz_factor
from sympy.polys.factortools import dmp_zz_wang_non_divisors
from sympy.polys.factortools import dmp_zz_wang_lead_coeffs
from sympy.polys.factortools import dup_zz_diophantine
from sympy.polys.factortools import dmp_zz_diophantine
from sympy.polys.factortools import dmp_zz_wang_hensel_lifting
from sympy.polys.factortools import dmp_zz_wang
from sympy.polys.factortools import dmp_zz_factor
from sympy.polys.factortools import dup_qq_i_factor
from sympy.polys.factortools import dup_zz_i_factor
from sympy.polys.factortools import dmp_qq_i_factor
from sympy.polys.factortools import dmp_zz_i_factor
from sympy.polys.factortools import dup_ext_factor
from sympy.polys.factortools import dmp_ext_factor
from sympy.polys.factortools import dup_gf_factor
from sympy.polys.factortools import dmp_gf_factor
from sympy.polys.factortools import dup_factor_list
from sympy.polys.factortools import dup_factor_list_include
from sympy.polys.factortools import dmp_factor_list
from sympy.polys.factortools import dmp_factor_list_include
from sympy.polys.factortools import dup_irreducible_p
from sympy.polys.factortools import dmp_irreducible_p
from sympy.polys.rootisolation import dup_sturm
from sympy.polys.rootisolation import dup_root_upper_bound
from sympy.polys.rootisolation import dup_root_lower_bound
from sympy.polys.rootisolation import dup_step_refine_real_root
from sympy.polys.rootisolation import dup_inner_refine_real_root
from sympy.polys.rootisolation import dup_outer_refine_real_root
from sympy.polys.rootisolation import dup_refine_real_root
from sympy.polys.rootisolation import dup_inner_isolate_real_roots
from sympy.polys.rootisolation import dup_inner_isolate_positive_roots
from sympy.polys.rootisolation import dup_inner_isolate_negative_roots
from sympy.polys.rootisolation import dup_isolate_real_roots_sqf
from sympy.polys.rootisolation import dup_isolate_real_roots
from sympy.polys.rootisolation import dup_isolate_real_roots_list
from sympy.polys.rootisolation import dup_count_real_roots
from sympy.polys.rootisolation import dup_count_complex_roots
from sympy.polys.rootisolation import dup_isolate_complex_roots_sqf
from sympy.polys.rootisolation import dup_isolate_all_roots_sqf
from sympy.polys.rootisolation import dup_isolate_all_roots
from sympy.polys.sqfreetools import (
dup_sqf_p, dmp_sqf_p, dup_sqf_norm, dmp_sqf_norm, dup_gf_sqf_part, dmp_gf_sqf_part,
dup_sqf_part, dmp_sqf_part, dup_gf_sqf_list, dmp_gf_sqf_list, dup_sqf_list,
dup_sqf_list_include, dmp_sqf_list, dmp_sqf_list_include, dup_gff_list, dmp_gff_list)
from sympy.polys.galoistools import (
gf_degree, gf_LC, gf_TC, gf_strip, gf_from_dict,
gf_to_dict, gf_from_int_poly, gf_to_int_poly, gf_neg, gf_add_ground, gf_sub_ground,
gf_mul_ground, gf_quo_ground, gf_add, gf_sub, gf_mul, gf_sqr, gf_add_mul, gf_sub_mul,
gf_expand, gf_div, gf_rem, gf_quo, gf_exquo, gf_lshift, gf_rshift, gf_pow, gf_pow_mod,
gf_gcd, gf_lcm, gf_cofactors, gf_gcdex, gf_monic, gf_diff, gf_eval, gf_multi_eval,
gf_compose, gf_compose_mod, gf_trace_map, gf_random, gf_irreducible, gf_irred_p_ben_or,
gf_irred_p_rabin, gf_irreducible_p, gf_sqf_p, gf_sqf_part, gf_Qmatrix,
gf_berlekamp, gf_ddf_zassenhaus, gf_edf_zassenhaus, gf_ddf_shoup, gf_edf_shoup,
gf_zassenhaus, gf_shoup, gf_factor_sqf, gf_factor)
from sympy.utilities import public
@public
class IPolys:
symbols = None
ngens = None
domain = None
order = None
gens = None
def drop(self, gen):
pass
def clone(self, symbols=None, domain=None, order=None):
pass
def to_ground(self):
pass
def ground_new(self, element):
pass
def domain_new(self, element):
pass
def from_dict(self, d):
pass
def wrap(self, element):
from sympy.polys.rings import PolyElement
if isinstance(element, PolyElement):
if element.ring == self:
return element
else:
raise NotImplementedError("domain conversions")
else:
return self.ground_new(element)
def to_dense(self, element):
return self.wrap(element).to_dense()
def from_dense(self, element):
return self.from_dict(dmp_to_dict(element, self.ngens-1, self.domain))
def dup_add_term(self, f, c, i):
return self.from_dense(dup_add_term(self.to_dense(f), c, i, self.domain))
def dmp_add_term(self, f, c, i):
return self.from_dense(dmp_add_term(self.to_dense(f), self.wrap(c).drop(0).to_dense(), i, self.ngens-1, self.domain))
def dup_sub_term(self, f, c, i):
return self.from_dense(dup_sub_term(self.to_dense(f), c, i, self.domain))
def dmp_sub_term(self, f, c, i):
return self.from_dense(dmp_sub_term(self.to_dense(f), self.wrap(c).drop(0).to_dense(), i, self.ngens-1, self.domain))
def dup_mul_term(self, f, c, i):
return self.from_dense(dup_mul_term(self.to_dense(f), c, i, self.domain))
def dmp_mul_term(self, f, c, i):
return self.from_dense(dmp_mul_term(self.to_dense(f), self.wrap(c).drop(0).to_dense(), i, self.ngens-1, self.domain))
def dup_add_ground(self, f, c):
return self.from_dense(dup_add_ground(self.to_dense(f), c, self.domain))
def dmp_add_ground(self, f, c):
return self.from_dense(dmp_add_ground(self.to_dense(f), c, self.ngens-1, self.domain))
def dup_sub_ground(self, f, c):
return self.from_dense(dup_sub_ground(self.to_dense(f), c, self.domain))
def dmp_sub_ground(self, f, c):
return self.from_dense(dmp_sub_ground(self.to_dense(f), c, self.ngens-1, self.domain))
def dup_mul_ground(self, f, c):
return self.from_dense(dup_mul_ground(self.to_dense(f), c, self.domain))
def dmp_mul_ground(self, f, c):
return self.from_dense(dmp_mul_ground(self.to_dense(f), c, self.ngens-1, self.domain))
def dup_quo_ground(self, f, c):
return self.from_dense(dup_quo_ground(self.to_dense(f), c, self.domain))
def dmp_quo_ground(self, f, c):
return self.from_dense(dmp_quo_ground(self.to_dense(f), c, self.ngens-1, self.domain))
def dup_exquo_ground(self, f, c):
return self.from_dense(dup_exquo_ground(self.to_dense(f), c, self.domain))
def dmp_exquo_ground(self, f, c):
return self.from_dense(dmp_exquo_ground(self.to_dense(f), c, self.ngens-1, self.domain))
def dup_lshift(self, f, n):
return self.from_dense(dup_lshift(self.to_dense(f), n, self.domain))
def dup_rshift(self, f, n):
return self.from_dense(dup_rshift(self.to_dense(f), n, self.domain))
def dup_abs(self, f):
return self.from_dense(dup_abs(self.to_dense(f), self.domain))
def dmp_abs(self, f):
return self.from_dense(dmp_abs(self.to_dense(f), self.ngens-1, self.domain))
def dup_neg(self, f):
return self.from_dense(dup_neg(self.to_dense(f), self.domain))
def dmp_neg(self, f):
return self.from_dense(dmp_neg(self.to_dense(f), self.ngens-1, self.domain))
def dup_add(self, f, g):
return self.from_dense(dup_add(self.to_dense(f), self.to_dense(g), self.domain))
def dmp_add(self, f, g):
return self.from_dense(dmp_add(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain))
def dup_sub(self, f, g):
return self.from_dense(dup_sub(self.to_dense(f), self.to_dense(g), self.domain))
def dmp_sub(self, f, g):
return self.from_dense(dmp_sub(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain))
def dup_add_mul(self, f, g, h):
return self.from_dense(dup_add_mul(self.to_dense(f), self.to_dense(g), self.to_dense(h), self.domain))
def dmp_add_mul(self, f, g, h):
return self.from_dense(dmp_add_mul(self.to_dense(f), self.to_dense(g), self.to_dense(h), self.ngens-1, self.domain))
def dup_sub_mul(self, f, g, h):
return self.from_dense(dup_sub_mul(self.to_dense(f), self.to_dense(g), self.to_dense(h), self.domain))
def dmp_sub_mul(self, f, g, h):
return self.from_dense(dmp_sub_mul(self.to_dense(f), self.to_dense(g), self.to_dense(h), self.ngens-1, self.domain))
def dup_mul(self, f, g):
return self.from_dense(dup_mul(self.to_dense(f), self.to_dense(g), self.domain))
def dmp_mul(self, f, g):
return self.from_dense(dmp_mul(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain))
def dup_sqr(self, f):
return self.from_dense(dup_sqr(self.to_dense(f), self.domain))
def dmp_sqr(self, f):
return self.from_dense(dmp_sqr(self.to_dense(f), self.ngens-1, self.domain))
def dup_pow(self, f, n):
return self.from_dense(dup_pow(self.to_dense(f), n, self.domain))
def dmp_pow(self, f, n):
return self.from_dense(dmp_pow(self.to_dense(f), n, self.ngens-1, self.domain))
def dup_pdiv(self, f, g):
q, r = dup_pdiv(self.to_dense(f), self.to_dense(g), self.domain)
return (self.from_dense(q), self.from_dense(r))
def dup_prem(self, f, g):
return self.from_dense(dup_prem(self.to_dense(f), self.to_dense(g), self.domain))
def dup_pquo(self, f, g):
return self.from_dense(dup_pquo(self.to_dense(f), self.to_dense(g), self.domain))
def dup_pexquo(self, f, g):
return self.from_dense(dup_pexquo(self.to_dense(f), self.to_dense(g), self.domain))
def dmp_pdiv(self, f, g):
q, r = dmp_pdiv(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain)
return (self.from_dense(q), self.from_dense(r))
def dmp_prem(self, f, g):
return self.from_dense(dmp_prem(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain))
def dmp_pquo(self, f, g):
return self.from_dense(dmp_pquo(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain))
def dmp_pexquo(self, f, g):
return self.from_dense(dmp_pexquo(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain))
def dup_rr_div(self, f, g):
q, r = dup_rr_div(self.to_dense(f), self.to_dense(g), self.domain)
return (self.from_dense(q), self.from_dense(r))
def dmp_rr_div(self, f, g):
q, r = dmp_rr_div(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain)
return (self.from_dense(q), self.from_dense(r))
def dup_ff_div(self, f, g):
q, r = dup_ff_div(self.to_dense(f), self.to_dense(g), self.domain)
return (self.from_dense(q), self.from_dense(r))
def dmp_ff_div(self, f, g):
q, r = dmp_ff_div(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain)
return (self.from_dense(q), self.from_dense(r))
def dup_div(self, f, g):
q, r = dup_div(self.to_dense(f), self.to_dense(g), self.domain)
return (self.from_dense(q), self.from_dense(r))
def dup_rem(self, f, g):
return self.from_dense(dup_rem(self.to_dense(f), self.to_dense(g), self.domain))
def dup_quo(self, f, g):
return self.from_dense(dup_quo(self.to_dense(f), self.to_dense(g), self.domain))
def dup_exquo(self, f, g):
return self.from_dense(dup_exquo(self.to_dense(f), self.to_dense(g), self.domain))
def dmp_div(self, f, g):
q, r = dmp_div(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain)
return (self.from_dense(q), self.from_dense(r))
def | |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import re
import collections
import numbers
from contextlib import contextmanager
import numpy as np
import pandas as pd
import skbio.sequence.distance
from skbio._base import SkbioObject
from skbio.metadata._mixin import (MetadataMixin, PositionalMetadataMixin,
IntervalMetadataMixin)
from skbio.metadata import IntervalMetadata
from skbio.sequence._repr import _SequenceReprBuilder
from skbio.util._decorator import (stable, experimental, classonlymethod,
overrides)
class Sequence(MetadataMixin, PositionalMetadataMixin, IntervalMetadataMixin,
collections.Sequence, SkbioObject):
"""Store generic sequence data and optional associated metadata.
``Sequence`` objects do not enforce an alphabet or grammar and are thus the
most generic objects for storing sequence data. ``Sequence`` objects do not
necessarily represent biological sequences. For example, ``Sequence`` can
be used to represent a position in a multiple sequence alignment.
Subclasses ``DNA``, ``RNA``, and ``Protein`` enforce the IUPAC character
set [1]_ for, and provide operations specific to, each respective molecule
type.
``Sequence`` objects consist of the underlying sequence data, as well
as optional metadata and positional metadata. The underlying sequence
is immutable, while the metdata and positional metadata are mutable.
Parameters
----------
sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\\|S1')
Characters representing the sequence itself.
metadata : dict, optional
Arbitrary metadata which applies to the entire sequence. A shallow copy
of the ``dict`` will be made (see Examples section below for details).
positional_metadata : pd.DataFrame consumable, optional
Arbitrary per-character metadata (e.g., sequence read quality
scores). Must be able to be passed directly to ``pd.DataFrame``
constructor. Each column of metadata must be the same length as
`sequence`. A shallow copy of the positional metadata will be made if
necessary (see Examples section below for details).
interval_metadata : IntervalMetadata
Arbitrary metadata which applies to intervals within a sequence to
store interval features (such as genes, ncRNA on the sequence).
lowercase : bool or str, optional
If ``True``, lowercase sequence characters will be converted to
uppercase characters. If ``False``, no characters will be converted.
If a str, it will be treated as a key into the positional metadata of
the object. All lowercase characters will be converted to uppercase,
and a ``True`` value will be stored in a boolean array in the
positional metadata under the key.
See Also
--------
DNA
RNA
Protein
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
Examples
--------
>>> from pprint import pprint
>>> from skbio import Sequence
>>> from skbio.metadata import IntervalMetadata
**Creating sequences:**
Create a sequence without any metadata:
>>> seq = Sequence('GGUCGUGAAGGA')
>>> seq
Sequence
---------------
Stats:
length: 12
---------------
0 GGUCGUGAAG GA
Create a sequence with metadata and positional metadata:
>>> metadata = {'authors': ['Alice'], 'desc':'seq desc', 'id':'seq-id'}
>>> positional_metadata = {'exons': [True, True, False, True],
... 'quality': [3, 3, 4, 10]}
>>> interval_metadata = IntervalMetadata(4)
>>> interval = interval_metadata.add([(1, 3)], metadata={'gene': 'sagA'})
>>> seq = Sequence('ACGT', metadata=metadata,
... positional_metadata=positional_metadata,
... interval_metadata=interval_metadata)
>>> seq
Sequence
-----------------------------
Metadata:
'authors': <class 'list'>
'desc': 'seq desc'
'id': 'seq-id'
Positional metadata:
'exons': <dtype: bool>
'quality': <dtype: int64>
Interval metadata:
1 interval feature
Stats:
length: 4
-----------------------------
0 ACGT
**Retrieving underlying sequence data:**
Retrieve underlying sequence:
>>> seq.values # doctest: +NORMALIZE_WHITESPACE
array([b'A', b'C', b'G', b'T'],
dtype='|S1')
Underlying sequence immutable:
>>> seq.values = np.array([b'T', b'C', b'G', b'A'], dtype='|S1')
Traceback (most recent call last):
...
AttributeError: can't set attribute
>>> seq.values[0] = b'T'
Traceback (most recent call last):
...
ValueError: assignment destination is read-only
**Retrieving sequence metadata:**
Retrieve metadata:
>>> pprint(seq.metadata) # using pprint to display dict in sorted order
{'authors': ['Alice'], 'desc': 'seq desc', 'id': 'seq-id'}
Retrieve positional metadata:
>>> seq.positional_metadata
exons quality
0 True 3
1 True 3
2 False 4
3 True 10
Retrieve interval metadata:
>>> seq.interval_metadata # doctest: +ELLIPSIS
1 interval feature
------------------
Interval(interval_metadata=<...>, bounds=[(1, 3)], \
fuzzy=[(False, False)], metadata={'gene': 'sagA'})
**Updating sequence metadata:**
.. warning:: Be aware that a shallow copy of ``metadata`` and
``positional_metadata`` is made for performance. Since a deep copy is
not made, changes made to mutable Python objects stored as metadata may
affect the metadata of other ``Sequence`` objects or anything else that
shares a reference to the object. The following examples illustrate this
behavior.
First, let's create a sequence and update its metadata:
>>> metadata = {'id':'seq-id', 'desc':'seq desc', 'authors': ['Alice']}
>>> seq = Sequence('ACGT', metadata=metadata)
>>> seq.metadata['id'] = 'new-id'
>>> seq.metadata['pubmed'] = 12345
>>> pprint(seq.metadata)
{'authors': ['Alice'], 'desc': 'seq desc', 'id': 'new-id', 'pubmed': 12345}
Note that the original metadata dictionary (stored in variable
``metadata``) hasn't changed because a shallow copy was made:
>>> pprint(metadata)
{'authors': ['Alice'], 'desc': 'seq desc', 'id': 'seq-id'}
>>> seq.metadata == metadata
False
Note however that since only a *shallow* copy was made, updates to mutable
objects will also change the original metadata dictionary:
>>> seq.metadata['authors'].append('Bob')
>>> seq.metadata['authors']
['Alice', 'Bob']
>>> metadata['authors']
['Alice', 'Bob']
This behavior can also occur when manipulating a sequence that has been
derived from another sequence:
>>> subseq = seq[1:3]
>>> subseq
Sequence
-----------------------------
Metadata:
'authors': <class 'list'>
'desc': 'seq desc'
'id': 'new-id'
'pubmed': 12345
Stats:
length: 2
-----------------------------
0 CG
>>> pprint(subseq.metadata)
{'authors': ['Alice', 'Bob'],
'desc': 'seq desc',
'id': 'new-id',
'pubmed': 12345}
The subsequence has inherited the metadata of its parent sequence. If we
update the subsequence's author list, we see the changes propagated in the
parent sequence and original metadata dictionary:
>>> subseq.metadata['authors'].append('Carol')
>>> subseq.metadata['authors']
['Alice', 'Bob', 'Carol']
>>> seq.metadata['authors']
['Alice', 'Bob', 'Carol']
>>> metadata['authors']
['Alice', 'Bob', 'Carol']
The behavior for updating positional metadata is similar. Let's create a
new sequence with positional metadata that is already stored in a
``pd.DataFrame``:
>>> positional_metadata = pd.DataFrame(
... {'list': [[], [], [], []], 'quality': [3, 3, 4, 10]})
>>> seq = Sequence('ACGT', positional_metadata=positional_metadata)
>>> seq
Sequence
-----------------------------
Positional metadata:
'list': <dtype: object>
'quality': <dtype: int64>
Stats:
length: 4
-----------------------------
0 ACGT
>>> seq.positional_metadata
list quality
0 [] 3
1 [] 3
2 [] 4
3 [] 10
Now let's update the sequence's positional metadata by adding a new column
and changing a value in another column:
>>> seq.positional_metadata['gaps'] = [False, False, False, False]
>>> seq.positional_metadata.loc[0, 'quality'] = 999
>>> seq.positional_metadata
list quality gaps
0 [] 999 False
1 [] 3 False
2 [] 4 False
3 [] 10 False
Note that the original positional metadata (stored in variable
``positional_metadata``) hasn't changed because a shallow copy was made:
>>> positional_metadata
list quality
0 [] 3
1 [] 3
2 [] 4
3 [] 10
>>> seq.positional_metadata.equals(positional_metadata)
False
Next let's create a sequence that has been derived from another sequence:
>>> subseq = seq[1:3]
>>> subseq
Sequence
-----------------------------
Positional metadata:
'list': <dtype: object>
'quality': <dtype: int64>
'gaps': <dtype: bool>
Stats:
length: 2
-----------------------------
0 CG
>>> subseq.positional_metadata
list quality gaps
0 [] 3 False
1 [] 4 False
As described above for metadata, since only a *shallow* copy was made of
the positional metadata, updates to mutable objects will also change the
parent sequence's positional metadata and the original positional metadata
``pd.DataFrame``:
>>> subseq.positional_metadata.loc[0, 'list'].append('item')
>>> subseq.positional_metadata
list quality gaps
0 [item] 3 False
1 [] 4 False
>>> seq.positional_metadata
list quality gaps
0 [] 999 False
1 [item] 3 False
2 [] 4 False
3 [] 10 False
>>> positional_metadata
list quality
0 [] 3
1 [item] 3
2 [] 4
3 [] 10
You can also update the interval metadata. Let's re-create a
``Sequence`` object with interval metadata at first:
>>> seq = Sequence('ACGT')
>>> interval = seq.interval_metadata.add(
... [(1, 3)], metadata={'gene': 'foo'})
You can update directly on the ``Interval`` object:
>>> interval # doctest: +ELLIPSIS
Interval(interval_metadata=<...>, bounds=[(1, 3)], \
fuzzy=[(False, False)], metadata={'gene': 'foo'})
>>> interval.bounds = [(0, 2)]
>>> interval # doctest: +ELLIPSIS
Interval(interval_metadata=<...>, bounds=[(0, 2)], \
fuzzy=[(False, False)], metadata={'gene': 'foo'})
You can also query and obtain the interval features you are
interested and then modify them:
>>> intervals = list(seq.interval_metadata.query(metadata={'gene': 'foo'}))
>>> intervals[0].fuzzy = [(True, | |
# language-learning/src/grammar_learner/clustering.py 80925
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances, silhouette_score
from statistics import mode
from random import randint
from operator import itemgetter
from .utl import UTC, round1, round2, round3
def cluster_id(n,nmax):
def int2az(n,l='ABCDEFGHJKLMNOPQRSTUVWXYZ'):
return (int2az(n//25)+l[n%25]).lstrip("A") if n>0 else "A"
return int2az(n).zfill(len(int2az(nmax))).replace('0','A')
def cluster_words_kmeans(words_df, n_clusters, init='k-means++', n_init=10):
# words_df - pandas DataFrame
# init: 'k-means++', 'random', ndarray with random seed
# n_init - number of initializations (runs), default 10
words_list = words_df['word'].tolist()
df = words_df.copy()
del df['word']
#-fails? = KMeans(init='random', n_clusters=n_clusters, n_init=30) #80617 #F?!
#-kmeans_model = KMeans(init='k-means++', n_clusters=n_clusters, n_init=10)
kmeans_model = KMeans(init=init, n_clusters=n_clusters, n_init=n_init)
kmeans_model.fit(df)
labels = kmeans_model.labels_
inertia = kmeans_model.inertia_
centroids = np.asarray(kmeans_model.cluster_centers_[:(max(labels)+1)])
silhouette = silhouette_score(df, labels, metric ='euclidean')
cdf = pd.DataFrame(centroids)
cdf = cdf.applymap(lambda x: x if abs(x) > 1e-12 else 0.)
cdf.columns = [x+1 if type(x)==int else x for x in cdf.columns]
cols = cdf.columns.tolist()
def cluster_word_list(i):
return [words_list[j] for j,x in enumerate(labels) if x==i]
cdf['cluster'] = cdf.index
cdf['cluster_words'] = cdf['cluster'].apply(cluster_word_list)
#-cdf = cdf.sort_values(by=[1,2], ascending=[True,True]) # 81020: [x]
#-cdf.index = range(1, len(cdf)+1)
cdf['cluster'] = cdf['cluster'].apply(lambda x: cluster_id(x+1, len(cdf)))
cols = ['cluster', 'cluster_words'] + cols
cdf = cdf[cols]
return cdf, silhouette, inertia
def number_of_clusters(vdf, **kwargs): #80809
def kwa(v,k): return kwargs[k] if k in kwargs else v
algorithm = kwa('kmeans', 'clustering')
criteria = kwa('silhouette', 'cluster_criteria')
level = kwa(1.0, 'cluster_level')
verbose = kwa('none', 'verbose')
crange = kwa((2,48,3), 'cluster_range')
# crange :: cluster range:
# (10) = (10,10) = (10,10,n) :: 10 clusters
# (10,40,5) :: min, max, step
# (10,40,5,n) :: min, max, step, m tests for each step
if len(crange) < 2 or crange[1] == crange[0]:
if verbose in ['max', 'debug']:
print('number_of_clusters:', crange[0], 'from range:', crange)
return crange[0]
elif len(crange) == 4:
attempts = crange[3]
else: attempts = 1
sil_range = pd.DataFrame(columns=['Np','Nc','Silhouette','Inertia'])
if verbose == 'debug':
print('clustering.py number_of_clusters: vdf:\n', \
vdf.applymap(round2).sort_values(by=[1,2], ascending=[True,True]).head(10))
# Check number of clusters <= word vector dimensionality
max_clusters = min(max(crange[0], crange[1]), len(vdf), \
max([x for x in list(vdf) if isinstance(x,int)]))
#?if max([x for x in list(vdf) if isinstance(x,int)]) < cluster_range[0]+1:
#? max_clusters = min(cluster_range[1], len(vdf)) #FIXME: hack 80420!
if max([x for x in list(vdf) if isinstance(x,int)]) == 2:
if verbose in ['max','debug']: print('2 dim word space -- 4 clusters')
return 4 #FIXME: hack 80420!
if verbose in ['max', 'debug']:
print(UTC(),':: number_of_clusters: range:', \
crange[0], max_clusters, crange[2])
n_clusters = max_clusters
lst = []
for k in range(attempts):
for i,j in enumerate(range(crange[0], max_clusters, crange[2])):
cdf, silhouette, inertia = cluster_words_kmeans(vdf, j)
if verbose in ['max','debug']:
print(UTC(),':: number_of_clusters:', j, \
'⇒ silhouette =', round(silhouette,3))
sil_range.loc[i] = [j, len(cdf), round(silhouette,4), round(inertia,2)]
if level > 0.9999: # 1 - max Silhouette index
n_clusters = sil_range.loc[sil_range['Silhouette'].idxmax()]['Nc']
elif level < 0.0001: # 0 - max number of clusters
n_clusters = sil_range.loc[sil_range['Nc'].idxmax()]['Nc']
else:
thresh = level * sil_range.loc[sil_range['Silhouette'].idxmax()]['Silhouette']
n_clusters = min(sil_range.loc[sil_range['Silhouette'] > thresh]['Nc'].tolist())
lst.append(int(n_clusters))
dct = dict()
for n in lst:
if n in dct:
dct[n] += 1
else: dct[n] = 1
n_clusters = int(round(np.mean(lst),0))
n2 = list(dct.keys())[list(dct.values()).index(max(list(dct.values())))]
if n2 != n_clusters:
if len(list(dct.values())) == len(set(list(dct.values()))):
n3 = mode(lst) # FIXME: Might get error?
else: n3 = n_clusters
n_clusters = int(round((n_clusters + n2 + n3)/3.0, 0))
if verbose in ['max', 'debug']:
if len(dct) > 1:
print(UTC(),':: number_of_clusters:', sorted(lst), \
'⇒', n_clusters, 'clusters weighted average')
return int(n_clusters)
def best_clusters(vdf, **kwargs): #80809
def kwa(v,k): return kwargs[k] if k in kwargs else v
#-cluster_range = kwa((2,48,1), 'cluster_range')
algo = kwa('kmeans', 'clustering')
criteria = kwa('silhouette', 'cluster_criteria')
level = kwa(1.0, 'cluster_level')
verbose = kwa('none', 'verbose')
crange = kwa((2,50,2), 'cluster_range')
# crange :: cluster range:
# (10) = (10,10) = (10,10,n) :: 10 clusters, n tests
# (10,40,5) :: min, max, step ⇒ number_of_clusters
# (10,40,5,n) :: min, max, step, n tests for each step ⇒ number_of_clusters
# (40,10,m) :: max, min, optimum: max of m top results with same number of clusters
if verbose in ['max','debug']:
print(UTC(),':: best_clusters started')
if type(algo) is str:
if algo == 'kmeans':
algorithm = 'kmeans'
init = 'k-means++'
n_init = 10
elif type(algo) in [tuple, list]:
if algo[0] == 'kmeans':
algorithm = 'kmeans'
if len(algo) > 1:
if algo[1][0] == 'r':
init = 'random'
else: init = 'k-means++'
else: init = 'k-means++'
if len(algo) > 2:
try: n_init = int(algo[2])
except: n_init = 10
else: n_init = 10
if (crange[0]==crange[1] or len(crange) < 2): #given n_clusters
if verbose in ['max','debug']:
print(UTC(),':: best_clusters:', crange[0], 'clusters from range', crange)
if len(crange) < 2 or crange[2] < 2:
clusters, silhouette, inertia = cluster_words_kmeans(vdf, crange[0])
return clusters, silhouette, inertia
else: # run crange[2] times to define the best
lst = []
for n in range(crange[2]):
try:
c,s,i = cluster_words_kmeans(vdf, crange[0], init, n_init)
lst.append((n, crange[0], c,s,i))
except:
if n == crange[2]-1 and len(lst) == 0:
return 0,0,0
else: continue
lst.sort(key=itemgetter(3), reverse=True)
if len(lst) > 0:
return lst[0][2], lst[0][3], lst[0][4]
else: return 0,0,0
elif crange[1] > crange[0]: #80809 option: legacy search in range
if verbose in ['max','debug']:
print(UTC(),':: best_clusters: range', crange, '⇒ number_of_clusters')
n_clusters = number_of_clusters(vdf, **kwargs)
if len(crange) > 3 and crange[3] > 1:
lst = []
for n in range(crange[3]):
try:
c,s,i = cluster_words_kmeans(vdf, n_clusters, init, n_init)
lst.append((n, n_clusters, c,s,i))
except:
if n == crange[3]-1 and len(lst) == 0:
return 0,0,0
else: continue
lst.sort(key=itemgetter(3), reverse=True)
return lst[0][2], lst[0][3], lst[0][4]
else:
clusters, silhouette, inertia = cluster_words_kmeans(vdf, n_clusters)
return clusters, silhouette, inertia
else: #TODO: elif algorithm == 'kmeans'
# Check number of clusters <= word vector dimensionality
max_clusters = min(max(crange[0],crange[1]), len(vdf), \
max([x for x in list(vdf) if isinstance(x,int)]))
if max([x for x in list(vdf) if isinstance(x,int)]) == 2:
max_clusters = 4 #FIXME: hack 80420: 2D word space ⇒ 4 clusters
c = pd.DataFrame(columns=['cluster','cluster_words'])
s = 0
i = 0
while max_clusters > crange[0]:
try:
c,s,i = cluster_words_kmeans(vdf, max_clusters, init, n_init)
break
except: max_clusters -= 1
if verbose in ['max', 'debug']:
print(UTC(),':: best_clusters: max_clusters =', max_clusters)
n_clusters = max_clusters #80623: cure case max < crange.min
if level < 0.1: return c,s,i #return max possible number of clusters
else:
lst = []
lst.append((0, max_clusters, c,s,i))
min_clusters = min(crange[0], crange[1])
if verbose in ['max', 'debug']:
print(UTC(),':: best_clusters: min_clusters =', min_clusters)
if min_clusters > max_clusters: #overkill?
if verbose in ['max', 'debug']:
print(UTC(),':: best_clusters: min > max:', \
min_clusters, '>', max_clusters, '?')
return c,s,i
else: #check min clusters, find min viable #overkill?
while min_clusters < max_clusters:
try:
print(UTC(),':: best_clusters: test')
c,s,i = cluster_words_kmeans(vdf, min_clusters, init, n_init)
break
except: min_clusters += 1
if verbose in ['max', 'debug']:
print(UTC(),':: best_clusters: checked min_clusters =', min_clusters)
lst.append((1, min_clusters, c,s,i))
middle = int((min_clusters + max_clusters)/2)
c,s,i = cluster_words_kmeans(vdf, middle, init, n_init)
lst.append((2, middle, c,s,i))
if verbose in ['debug']:
print('\nstarting lst:')
for x in lst: print([x[i] for i in [0,1,3,4]])
lst.sort(key=itemgetter(3), reverse=True)
ntop = 1
while ntop < crange[2]:
no = lst[0][1]
c,s,i = cluster_words_kmeans(vdf, no, init, n_init)
lst.append((len(lst), no, c,s,i))
dn = int(round(0.6*abs(no - lst[ntop][1]), 0))
if ntop > crange[2]/2.0:
dn = 1
if no > min_clusters:
nm = max(no - dn, min_clusters)
c,s,i = cluster_words_kmeans(vdf, nm, init, n_init)
lst.append((len(lst), nm, c,s,i))
if no < max_clusters:
nm = min(no + dn, max_clusters)
c,s,i = cluster_words_kmeans(vdf, nm, init, n_init)
lst.append((len(lst), nm, c,s,i))
lst.sort(key=itemgetter(3), reverse=True)
for i,x in enumerate(lst):
ntop = i+1
if x[1] != lst[i+1][1]: break
n_clusters = lst[0][1]
clusters = lst[0][2]
silhouette = lst[0][3]
inertia = lst[0][4]
if verbose in ['max','debug']:
print(UTC(),':: best_clusters: return', n_clusters, 'clusters')
if verbose in ['debug']:
print(clusters.applymap(round1))
if verbose in ['max','debug']:
print('\ntop clusters: test_no, n_clusters, silhouette, inertia')
for x in lst: print([x[i] for i in [0,1,3,4]])
return clusters, silhouette, inertia
def group_links(links, verbose): # Group ILE # 80925
df = links.copy()
df['links'] = [[x] for x in df['link']]
del df['link']
if verbose in ['max','debug']:
print('\ngroup_links: links:\n')
with pd.option_context('display.max_rows', 6):
print(links.sort_values(by='word', ascending=True))
print('\ngroup_links: df:\n')
with pd.option_context('display.max_rows', 6): print(df)
df = df.groupby('word').agg({'links': 'sum', 'count': 'sum'}).reset_index()
df['words'] = [[x] for x in df['word']]
del df['word']
df2 = df.copy().reset_index()
df2['links'] = df2['links'].apply(lambda x: tuple(sorted(x)))
df3 = df2.groupby('links')['count'].apply(sum).reset_index()
if verbose == 'debug':
with pd.option_context('display.max_rows', 6): print('\ndf3:\n', df3)
df4 = df2.groupby('links')['words'].apply(sum).reset_index()
if df4['links'].tolist() == df3['links'].tolist():
| |
# coding: utf-8
"""
Rumble API
Rumble Network Discovery API # noqa: E501
OpenAPI spec version: 2.11.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Organization(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'created_at': 'int',
'updated_at': 'int',
'client_id': 'str',
'download_token': 'str',
'download_token_created_at': 'int',
'permanent': 'bool',
'name': 'str',
'description': 'str',
'inactive': 'bool',
'deactivated_at': 'int',
'service_count': 'int',
'service_count_tcp': 'int',
'service_count_udp': 'int',
'service_count_arp': 'int',
'service_count_icmp': 'int',
'asset_count': 'int',
'export_token': 'str',
'export_token_created_at': 'int',
'export_token_last_used_at': 'int',
'export_token_last_used_by': 'str',
'export_token_counter': 'int',
'project': 'bool',
'parent_id': 'str',
'expiration_assets_stale': 'int',
'expiration_assets_offline': 'int',
'expiration_scans': 'int'
}
attribute_map = {
'id': 'id',
'created_at': 'created_at',
'updated_at': 'updated_at',
'client_id': 'client_id',
'download_token': 'download_token',
'download_token_created_at': 'download_token_created_at',
'permanent': 'permanent',
'name': 'name',
'description': 'description',
'inactive': 'inactive',
'deactivated_at': 'deactivated_at',
'service_count': 'service_count',
'service_count_tcp': 'service_count_tcp',
'service_count_udp': 'service_count_udp',
'service_count_arp': 'service_count_arp',
'service_count_icmp': 'service_count_icmp',
'asset_count': 'asset_count',
'export_token': 'export_token',
'export_token_created_at': 'export_token_created_at',
'export_token_last_used_at': 'export_token_last_used_at',
'export_token_last_used_by': 'export_token_last_used_by',
'export_token_counter': 'export_token_counter',
'project': 'project',
'parent_id': 'parent_id',
'expiration_assets_stale': 'expiration_assets_stale',
'expiration_assets_offline': 'expiration_assets_offline',
'expiration_scans': 'expiration_scans'
}
def __init__(self, id:str=None, created_at:int=None, updated_at:int=None, client_id:str=None, download_token:str=None, download_token_created_at:int=None, permanent:bool=None, name:str=None, description:str=None, inactive:bool=None, deactivated_at:int=None, service_count:int=None, service_count_tcp:int=None, service_count_udp:int=None, service_count_arp:int=None, service_count_icmp:int=None, asset_count:int=None, export_token:str=None, export_token_created_at:int=None, export_token_last_used_at:int=None, export_token_last_used_by:str=None, export_token_counter:int=None, project:bool=None, parent_id:str=None, expiration_assets_stale:int=None, expiration_assets_offline:int=None, expiration_scans:int=None): # noqa: E501
"""Organization - a model defined in Swagger""" # noqa: E501
self._id = None
self._created_at = None
self._updated_at = None
self._client_id = None
self._download_token = None
self._download_token_created_at = None
self._permanent = None
self._name = None
self._description = None
self._inactive = None
self._deactivated_at = None
self._service_count = None
self._service_count_tcp = None
self._service_count_udp = None
self._service_count_arp = None
self._service_count_icmp = None
self._asset_count = None
self._export_token = None
self._export_token_created_at = None
self._export_token_last_used_at = None
self._export_token_last_used_by = None
self._export_token_counter = None
self._project = None
self._parent_id = None
self._expiration_assets_stale = None
self._expiration_assets_offline = None
self._expiration_scans = None
self.discriminator = None
self.id = id
if created_at is not None:
self.created_at = created_at
if updated_at is not None:
self.updated_at = updated_at
if client_id is not None:
self.client_id = client_id
if download_token is not None:
self.download_token = download_token
if download_token_created_at is not None:
self.download_token_created_at = download_token_created_at
if permanent is not None:
self.permanent = permanent
self.name = name
if description is not None:
self.description = description
if inactive is not None:
self.inactive = inactive
if deactivated_at is not None:
self.deactivated_at = deactivated_at
if service_count is not None:
self.service_count = service_count
if service_count_tcp is not None:
self.service_count_tcp = service_count_tcp
if service_count_udp is not None:
self.service_count_udp = service_count_udp
if service_count_arp is not None:
self.service_count_arp = service_count_arp
if service_count_icmp is not None:
self.service_count_icmp = service_count_icmp
if asset_count is not None:
self.asset_count = asset_count
if export_token is not None:
self.export_token = export_token
if export_token_created_at is not None:
self.export_token_created_at = export_token_created_at
if export_token_last_used_at is not None:
self.export_token_last_used_at = export_token_last_used_at
if export_token_last_used_by is not None:
self.export_token_last_used_by = export_token_last_used_by
if export_token_counter is not None:
self.export_token_counter = export_token_counter
if project is not None:
self.project = project
if parent_id is not None:
self.parent_id = parent_id
if expiration_assets_stale is not None:
self.expiration_assets_stale = expiration_assets_stale
if expiration_assets_offline is not None:
self.expiration_assets_offline = expiration_assets_offline
if expiration_scans is not None:
self.expiration_scans = expiration_scans
@property
def id(self):
"""Gets the id of this Organization. # noqa: E501
:return: The id of this Organization. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Organization.
:param id: The id of this Organization. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def created_at(self):
"""Gets the created_at of this Organization. # noqa: E501
:return: The created_at of this Organization. # noqa: E501
:rtype: int
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this Organization.
:param created_at: The created_at of this Organization. # noqa: E501
:type: int
"""
self._created_at = created_at
@property
def updated_at(self):
"""Gets the updated_at of this Organization. # noqa: E501
:return: The updated_at of this Organization. # noqa: E501
:rtype: int
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this Organization.
:param updated_at: The updated_at of this Organization. # noqa: E501
:type: int
"""
self._updated_at = updated_at
@property
def client_id(self):
"""Gets the client_id of this Organization. # noqa: E501
:return: The client_id of this Organization. # noqa: E501
:rtype: str
"""
return self._client_id
@client_id.setter
def client_id(self, client_id):
"""Sets the client_id of this Organization.
:param client_id: The client_id of this Organization. # noqa: E501
:type: str
"""
self._client_id = client_id
@property
def download_token(self):
"""Gets the download_token of this Organization. # noqa: E501
:return: The download_token of this Organization. # noqa: E501
:rtype: str
"""
return self._download_token
@download_token.setter
def download_token(self, download_token):
"""Sets the download_token of this Organization.
:param download_token: The download_token of this Organization. # noqa: E501
:type: str
"""
self._download_token = download_token
@property
def download_token_created_at(self):
"""Gets the download_token_created_at of this Organization. # noqa: E501
:return: The download_token_created_at of this Organization. # noqa: E501
:rtype: int
"""
return self._download_token_created_at
@download_token_created_at.setter
def download_token_created_at(self, download_token_created_at):
"""Sets the download_token_created_at of this Organization.
:param download_token_created_at: The download_token_created_at of this Organization. # noqa: E501
:type: int
"""
self._download_token_created_at = download_token_created_at
@property
def permanent(self):
"""Gets the permanent of this Organization. # noqa: E501
:return: The permanent of this Organization. # noqa: E501
:rtype: bool
"""
return self._permanent
@permanent.setter
def permanent(self, permanent):
"""Sets the permanent of this Organization.
:param permanent: The permanent of this Organization. # noqa: E501
:type: bool
"""
self._permanent = permanent
@property
def name(self):
"""Gets the name of this Organization. # noqa: E501
:return: The name of this Organization. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Organization.
:param name: The name of this Organization. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def description(self):
"""Gets the description of this Organization. # noqa: E501
:return: The description of this Organization. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Organization.
:param description: The description of this Organization. # noqa: E501
:type: str
"""
self._description = description
@property
def inactive(self):
"""Gets the inactive of this Organization. # noqa: E501
:return: The inactive of this Organization. # noqa: E501
:rtype: bool
"""
return self._inactive
@inactive.setter
def inactive(self, inactive):
"""Sets the inactive of this Organization.
:param inactive: The inactive of this Organization. # noqa: E501
:type: bool
"""
self._inactive = inactive
@property
def deactivated_at(self):
"""Gets the deactivated_at of this Organization. # noqa: E501
:return: The deactivated_at of this Organization. # noqa: E501
:rtype: int
"""
return self._deactivated_at
@deactivated_at.setter
def deactivated_at(self, deactivated_at):
"""Sets the deactivated_at of this Organization.
:param deactivated_at: The deactivated_at of this Organization. # noqa: E501
:type: int
"""
self._deactivated_at = deactivated_at
@property
def service_count(self):
"""Gets the service_count of this Organization. # noqa: E501
:return: The service_count of this Organization. # noqa: E501
:rtype: int
"""
return self._service_count
@service_count.setter
def service_count(self, service_count):
"""Sets the service_count of this Organization.
:param service_count: The service_count of this Organization. # noqa: E501
:type: int
"""
self._service_count = service_count
@property
def service_count_tcp(self):
"""Gets the service_count_tcp of this Organization. # noqa: E501
:return: The service_count_tcp of this Organization. # noqa: E501
:rtype: int
"""
return self._service_count_tcp
@service_count_tcp.setter
def service_count_tcp(self, service_count_tcp):
"""Sets the service_count_tcp of this Organization.
:param service_count_tcp: The service_count_tcp of this Organization. # noqa: E501
:type: int
"""
self._service_count_tcp = service_count_tcp
@property
def service_count_udp(self):
"""Gets the service_count_udp of this Organization. # noqa: E501
:return: The service_count_udp of this Organization. # noqa: E501
:rtype: int
"""
return self._service_count_udp
@service_count_udp.setter
def service_count_udp(self, service_count_udp):
"""Sets the service_count_udp of this Organization.
:param service_count_udp: The service_count_udp of this Organization. # noqa: E501
| |
if hasattr(self, 'Isotropic'):
self.Isotropic = self.Isotropic[0:-1]
self.vzaDegrees = self.vzaDegrees[0:-1]
self.szaDegrees = self.szaDegrees[0:-1]
self.raaDegrees = self.raaDegrees[0:-1]
self.N = len(self.vzaDegrees)
self.vza = self.vza[0:-1]
self.sza = self.sza[0:-1]
self.raa = self.raa[0:-1]
def __doKernels(self):
"""Private method to run the various kernel methods"""
# the kernels
# self.IsotropicKernel()
self.RossKernel()
self.LiKernel()
def set_angle_info(self, vza, sza, raa):
"""Private method to store and organise the input angle data"""
self.vzaDegrees = np.array([vza]).flatten()
self.szaDegrees = np.array([sza]).flatten()
self.raaDegrees = np.array([raa]).flatten()
self.N = len(self.vzaDegrees)
if (self.N != len(self.szaDegrees) or self.N != len(self.raaDegrees)):
self.error('kernels: inconsistent number of samples in vza, sza and raa data: ' + str(
len(self.vzaDegrees)) + ', ' + str(len(self.szaDegrees)) + ', ' + str(len(self.raaDegrees)),
critical=self.critical)
print(self.vzaDegrees)
print(self.szaDegrees)
print(self.raaDegrees)
return [-1]
if (self.normalise >= 1):
# calculate nadir term by extending array
self.vzaDegrees = np.array(list(self.vzaDegrees) + [0.0]).flatten()
self.szaDegrees = np.array(list(self.szaDegrees) + [self.nbar]).flatten()
self.raaDegrees = np.array(list(self.raaDegrees) + [0.0]).flatten()
# not N is one too many now
self.N = len(self.vzaDegrees)
self.vza = self.dtor(self.vzaDegrees)
self.sza = self.dtor(self.szaDegrees) # -1 to make HS direction for raa = 0
self.raa = self.dtor(self.raaDegrees)
w = np.where(self.vza < 0)[0]
self.vza[w] = -self.vza[w]
self.raa[w] = self.raa[w] + self.__M_PI
w = np.where(self.sza < 0)[0]
self.sza[w] = -self.sza[w]
self.raa[w] = self.raa[w] + self.__M_PI
def __integrateKernels(self, BSAangles=[]):
"""
Private method to call integration functions for the kernels
NB - this overwrites all kernel info ... so be careful how/where you call it
@option: BSAangles=[] allows the user to set the sza angles at which directional-hemispherical intergal is
calculated, else steps of 1 degree from 0 to 89 (though I wouldnt trust it down to 90)
This function can be rather slow, so using fewer samples or an approximate function may be a god idea
"""
import scipy.integrate
if BSAangles == []:
BSAangles = np.array(range(90)) * 1.0
self.BSAangles = np.array(BSAangles).flatten()
# isotropic integral
self.BSA_Isotropic = np.zeros(len(self.BSAangles)) + 1.0
self.BSA_Ross = np.zeros(len(self.BSAangles))
self.BSA_Li = np.zeros(len(self.BSAangles))
self.BSA_Isotropic_error = np.zeros(len(self.BSAangles))
self.BSA_Ross_error = np.zeros(len(self.BSAangles))
self.BSA_Li_error = np.zeros(len(self.BSAangles))
i = 0
mu = np.cos(self.BSAangles * self.__M_PI / 180.)
for sza in self.BSAangles:
# ross integral
self.BSA_Ross[i], self.BSA_Ross_error[i] = scipy.integrate.dblquad(RossFunctionForIntegral, 0.0, 1.0,
__gfun, __hfun, args=(sza, self))
self.BSA_Li[i], self.BSA_Li_error[i] = scipy.integrate.dblquad(LiFunctionForIntegral, 0.0, 1.0, __gfun,
__hfun, args=(sza, self))
i = i + 1
self.WSA_Ross = -2.0 * scipy.integrate.simps(self.BSA_Ross * mu, mu)
self.WSA_Li = -2.0 * scipy.integrate.simps(self.BSA_Li * mu, mu)
return
def __GetPhaang(self):
"""Private method to calculate Phase angle component of kernel"""
self.__cosphaang = self.__cos1 * self.__cos2 + self.__sin1 * self.__sin2 * self.__cos3
# better check the bounds before arccos ... just to be safe
w = np.where(self.__cosphaang < -1)[0]
self.__cosphaang[w] = -1.0
w = np.where(self.__cosphaang > 1)[0]
self.__cosphaang[w] = 1.0
self.__phaang = np.arccos(self.__cosphaang)
self.__sinphaang = np.sin(self.__phaang)
def __RossKernelPart(self):
"""Private method to calculate main part of Ross kernel"""
self.__cos1 = np.cos(self.vza)
self.__cos2 = np.cos(self.sza)
self.__sin1 = np.sin(self.vza)
self.__sin2 = np.sin(self.sza)
self.__cos3 = np.cos(self.raa)
self.__GetPhaang()
self.rosselement = (self.__M_PI_2 - self.__phaang) * self.__cosphaang + self.__sinphaang
def GetDistance(self):
"""Private method to get distance component of Li kernels"""
temp = self.__tan1 * self.__tan1 + self.__tan2 * self.__tan2 - 2. * self.__tan1 * self.__tan2 * self.__cos3;
w = np.where(temp < 0)[0]
temp[w] = 0.0
self.__temp = temp # used by other functions ??
distance = np.sqrt(temp)
return distance
def GetpAngles(self, tan1):
"""Private method to do B/R transformation for ellipse shape"""
t = self.BR * tan1
w = np.where(t < 0.)[0]
t[w] = 0.0
angp = np.arctan(t)
s = np.sin(angp)
c = np.cos(angp)
# have to make sure c isnt 0
w = np.where(c == 0)[0]
c[w] = self.__NEARLYZERO
return c, s, t
def GetOverlap(self):
"""Private method to do HB ratio transformation"""
self.__temp = 1. / self.__cos1 + 1. / self.__cos2
self.__cost = self.HB * np.sqrt(
self.__distance * self.__distance + self.__tan1 * self.__tan1 * self.__tan2 * self.__tan2 * self.__sin3 * self.__sin3) / self.__temp;
w = np.where(self.__cost < -1)[0]
self.__cost[w] = -1.0
w = np.where(self.__cost > 1.0)[0]
self.__cost[w] = 1.0
self.__tvar = np.arccos(self.__cost)
self.__sint = np.sin(self.__tvar)
self.__overlap = self.__M_1_PI * (self.__tvar - self.__sint * self.__cost) * self.__temp
w = np.where(self.__overlap < 0)[0]
self.__overlap[w] = 0.0
return
def RoujeanKernel(self):
"""Private method - call to calculate Roujean shadowing kernel"""
# first make sure its in range 0 to 2 pi
self.__phi = np.abs((self.raa % (2. * self.__M_PI)))
self.__cos3 = np.cos(self.__phi)
self.__sin3 = np.sin(self.__phi)
self.__tan1 = np.tan(self.sza)
self.__tan2 = np.tan(self.vza)
self.__distance = self.GetDistance()
self.Li = 0.5 * self.__M_1_PI * (
(self.__M_PI - self.__phi) * self.__cos3 + self.__sin3) * self.__tan1 * self.__tan2 - self.__M_1_PI * (
self.__tan1 + self.__tan2 + self.__distance);
return
def LiKernel(self):
"""Private method - call to calculate Li Kernel"""
# at some point add in LiGround kernel & LiTransit
if self.LiType == 'Roujean':
return self.RoujeanKernel()
# first make sure its in range 0 to 2 pi
self.__phi = np.abs((self.raa % (2. * self.__M_PI)))
self.__cos3 = np.cos(self.__phi)
self.__sin3 = np.sin(self.__phi)
self.__tanti = np.tan(self.sza)
self.__tantv = np.tan(self.vza)
self.__cos1, self.__sin1, self.__tan1 = self.GetpAngles(self.__tantv);
self.__cos2, self.__sin2, self.__tan2 = self.GetpAngles(self.__tanti);
self.__GetPhaang(); # sets cos & sin phase angle terms
self.__distance = self.GetDistance(); # sets self.temp
self.GetOverlap(); # also sets self.temp
if self.LiType == 'Sparse':
if self.RecipFlag == True:
self.Li = self.__overlap - self.__temp + 0.5 * (1. + self.__cosphaang) / self.__cos1 / self.__cos2;
else:
self.Li = self.__overlap - self.__temp + 0.5 * (1. + self.__cosphaang) / self.__cos1;
else:
if self.LiType == 'Dense':
if self.RecipFlag:
self.Li = (1.0 + self.__cosphaang) / (
self.__cos1 * self.__cos2 * (self.__temp - self.__overlap)) - 2.0;
else:
self.Li = (1.0 + self.__cosphaang) / (self.__cos1 * (self.__temp - self.__overlap)) - 2.0;
else:
B = self.__temp - self.__overlap
w = np.where(B <= 2.0)
self.Li = B * 0.0
if self.RecipFlag == True:
Li = self.__overlap - self.__temp + 0.5 * (1. + self.__cosphaang) / self.__cos1 / self.__cos2;
else:
Li = self.__overlap - self.__temp + 0.5 * (1. + self.__cosphaang) / self.__cos1;
self.Li[w] = Li[w]
w = np.where(B > 2.0)
if self.RecipFlag:
Li = (1.0 + self.__cosphaang) / (self.__cos1 * self.__cos2 * (self.__temp - self.__overlap)) - 2.0;
else:
Li = (1.0 + self.__cosphaang) / (self.__cos1 * (self.__temp - self.__overlap)) - 2.0;
self.Li[w] = Li[w]
return
def IsotropicKernel(self):
"""Public method - call to calculate Isotropic kernel"""
# default behaviour
self.Isotropic = np.zeros(self.N) + 1.0
return
def RossThin(self):
"""Public method - call to calculate RossThin kernel"""
self.__RossKernelPart()
self.rosselement = self.rosselement / (self.__cos1 * self.__cos2)
return
def RossThick(self):
"""Public method - call to calculate RossThick kernel"""
self.__RossKernelPart()
self.rosselement = self.rosselement / (self.__cos1 + self.__cos2)
return
def RossKernel(self):
"""Public method - call to calculate Ross Kernel"""
if self.RossType == 'Thin':
self.RossThin()
else:
self.RossThick()
self.Ross = self.rosselement
if self.RossHS:
self.Ross = self.Ross * (1. + 1. / (1. + self.__phaang / .25))
def dtor(self, x):
"""Public method to convert degrees to radians"""
return x * self.__M_PI / 180.
def rtod(self, x):
"""Public method to convert radians to degrees"""
return x * 180. / self.__M_PI
def error(self, msg, critical=0, newline=1, code=-1):
"""
Public method to do Class error reporting
@param msg: error message
@param critical: set to 1 if require exit (default critical=0)
@param newline: set to 0 if newline not required (default newline=0)
@param code: error code reported on exit if critical error (default code=-1)
"""
if newline == 1:
nl = '\n'
else:
nl = ''
print(msg + nl)
if critical == 1:
logger.exception([code])
def printIntegrals(self, header=True, reflectance=False):
"""
Public method to print kernel integrals (to stdout only at present)
"""
if (header == True):
self.printer(
'# ' + str(self.N) + ' samples Ross: ' + self.RossType + ' Li: ' + self.LiType + ' Reciprocal: ' + str(
self.RecipFlag) + ' normalisation: ' + str(self.normalise) + ' HB ' + str(self.HB) + ' BR ' + str(
self.BR) + '\n');
self.printer('# WSA: Isotropic 1.0 Ross ' + str(self.WSA_Ross) + ' Li ' + str(self.WSA_Li))
self.printer('# 1: SZA (degrees) 2: BSA Isotropic 3: BSA Ross 4: BSA Li')
if (reflectance == True):
self.printer(' ');
self.printer('\n');
for i in range(len(self.BSAangles)):
self.printer(
str(self.BSAangles[i]) + ' ' + str(self.BSA_Isotropic[i]) + ' | |
end_time: Optional[datetime.datetime] = None,
since_id: Optional[ID] = None,
until_id: Optional[ID] = None,
sort_by_relevancy: bool = False,
) -> List[Tweet]:
"""Searches tweet from the last seven days that match a search query.
Parameters
------------
query: :class:`str`
One query for matching Tweets.
max_results: :class:`int`
The maximum number of search results to be returned by a request. A number between 10 and 100. By default, the method will returns 10 results.
start_time: Optional[:class:`datetime.datetime`]
This will make sure the tweets created datetime is after that specific time.
end_time: Optional[:class:`datetime.datetime`]
This will make sure the tweets created datetime is before that specific time.
since_id: Optional[`ID`]
Returns results with a Tweet ID greater than (that is, more recent than) the specified 'since' Tweet ID. Only the 3200 most recent Tweets are available. The result will exclude the since_id. If the limit of Tweets has occurred since the since_id, the since_id will be forced to the oldest ID available.
until_id: Optional[`ID`]
Returns results with a Tweet ID less less than (that is, older than) the specified 'until' Tweet ID. Only the 3200 most recent Tweets are available. The result will exclude the until_id. If the limit of Tweets has occurred since the until_id, the until_id will be forced to the most recent ID available.
sort_by_relevancy: :class:`bool`
This parameter is used to specify the order in which you want the Tweets returned. If sets to True, tweets will be order by relevancy, else it sets to recency. Default to False.
Returns
---------
Union[:class:`TweetPagination`, :class:`list`]
This method returns a list of :class:`Tweet` objects.
.. versionadded:: 1.5.0
"""
if (
not isinstance(start_time, datetime.datetime)
and start_time
or not isinstance(end_time, datetime.datetime)
and end_time
):
raise ValueError("start_time or end_time must be a datetime object!")
params = {
"expansions": TWEET_EXPANSION,
"user.fields": USER_FIELD,
"media.fields": MEDIA_FIELD,
"place.fields": PLACE_FIELD,
"poll.fields": POLL_FIELD,
"tweet.fields": TWEET_FIELD,
"query": query,
"max_results": max_results,
}
if start_time:
params["start_time"] = start_time.isoformat()
if end_time:
params["end_time"] = end_time.isoformat()
if since_id:
params["since_id"] = str(since_id)
if until_id:
params["until_id"] = str(until_id)
if sort_by_relevancy:
params["sort_order"] = "relevancy"
res = self.http.request("GET", "2", "/tweets/search/recent", params=params)
return [Tweet(data, http_client=self.http) for data in res.get("data")]
def search_all_tweet(
self,
query: str,
*,
max_results: int = 10,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
since_id: Optional[ID] = None,
until_id: Optional[ID] = None,
sort_by_relevancy: bool = False,
) -> List[Tweet]:
"""Searches all tweet from the complete history of public Tweets matching a search query; since the first Tweet was created March 26, 2006. Only available to those users who have been approved for Academic Research access.
Parameters
------------
query: :class:`str`
One query for matching Tweets.
max_results: :class:`int`
The maximum number of search results to be returned by a request. A number between 10 and 100. By default, the method will returns 10 results.
start_time: Optional[:class:`datetime.datetime`]
This will make sure the tweets created datetime is after that specific time.
end_time: Optional[:class:`datetime.datetime`]
This will make sure the tweets created datetime is before that specific time.
since_id: Optional[`ID`]
Returns results with a Tweet ID greater than (that is, more recent than) the specified 'since' Tweet ID. Only the 3200 most recent Tweets are available. The result will exclude the since_id. If the limit of Tweets has occurred since the since_id, the since_id will be forced to the oldest ID available.
until_id: Optional[`ID`]
Returns results with a Tweet ID less less than (that is, older than) the specified 'until' Tweet ID. Only the 3200 most recent Tweets are available. The result will exclude the until_id. If the limit of Tweets has occurred since the until_id, the until_id will be forced to the most recent ID available.
sort_by_relevancy: :class:`bool`
This parameter is used to specify the order in which you want the Tweets returned. If sets to True, tweets will be order by relevancy, else it sets to recency. Default to False.
Returns
---------
Union[:class:`TweetPagination`, :class:`list`]
This method returns a list of :class:`Tweet` objects.
.. versionadded:: 1.5.0
"""
if (
not isinstance(start_time, datetime.datetime)
and start_time
or not isinstance(end_time, datetime.datetime)
and end_time
):
raise ValueError("start_time or end_time must be a datetime object!")
params = {
"expansions": TWEET_EXPANSION,
"user.fields": USER_FIELD,
"media.fields": MEDIA_FIELD,
"place.fields": PLACE_FIELD,
"poll.fields": POLL_FIELD,
"tweet.fields": TWEET_FIELD,
"query": query,
"max_results": max_results,
}
if start_time:
params["start_time"] = start_time.isoformat()
if end_time:
params["end_time"] = end_time.isoformat()
if since_id:
params["since_id"] = str(since_id)
if until_id:
params["until_id"] = str(until_id)
if sort_by_relevancy:
params["sort_order"] = "relevancy"
res = self.http.request("GET", "2", "/tweets/search/all", params=params)
return [Tweet(data, http_client=self.http) for data in res.get("data")]
def get_user(self, user_id: ID) -> Optional[User]:
"""Gets a user through the client internal user cache. Return None if the user is not in the cache.
.. note::
Users will get cache with several conditions:
* Users return from a method such as :meth:`Client.fetch_user`.
* The client interacts with other users such as dming them, triggering the typing animation, likes the client's tweets etc (This condition only applies if you use :meth:`Client.listen` at the very end of the file)
Parameters
------------
user_id: :class:`ID`
The ID of a user that you want to get.
Raises
--------
ValueError:
Raised when the user_id argument is not an integer or a string of digits.
Returns
---------
:class:`User`
This method returns a :class:`User` object or None if the user was not found.
.. versionadded:: 1.5.0
"""
try:
user_id = int(user_id)
except ValueError:
raise ValueError("user_id must be an integer or a string of digits.")
return self.http.user_cache.get(user_id)
def get_tweet(self, tweet_id: ID) -> Optional[Tweet]:
"""Gets a tweet through the client internal tweet cache. Return None if the tweet is not in the cache.
.. note::
Tweets will get cache with several conditions:
* Tweets send by the client.
* Tweets send by the subscription users.
* Tweets return from a method such as: :meth:`Client.fetch_tweet`
Parameters
------------
tweet_id: :class:`ID`
The ID of a tweet that you want to get.
Raises
--------
ValueError:
Raised when the tweet_id argument is not an integer or a string of digits.
Returns
---------
:class:`Tweet`
This method returns a :class:`Tweet` object or None if the tweet was not found.
.. versionadded:: 1.2.0
"""
try:
tweet_id = int(tweet_id)
except ValueError:
raise ValueError("tweet_id must be an integer or a string of digits.")
return self.http.tweet_cache.get(tweet_id)
def get_direct_message(self, event_id: ID) -> Optional[DirectMessage]:
"""Get a direct message through the client message cache. Returns None if the message is not in the cache.
.. note::
Messages will get cache with several conditions:
* Messages send by the client.
* Messages send by the subscription users.
* Messages return from a method such as: :meth:`Client.fetch_direct_message`
Parameters
------------
event_id: :class:`ID`
The event ID of the Direct Message event that you want to get.
Returns
---------
:class:`DirectMessage`
This method returns a :class:`DirectMessage` object.
.. versionadded:: 1.2.0
"""
try:
event_id = int(event_id)
except ValueError:
raise ValueError("Event id must be an integer or a :class:`str`ing of digits.")
return self.http.message_cache.get(event_id)
def stream(self, *, dry_run: bool = False) -> None:
"""Stream realtime in twitter for tweets! This method use the stream argument in :meth:`request.get` for streaming in one of the stream endpoint that twitter api provides. If you want to use this method, make sure to provides the stream kwarg in your :class:`Client` instance and make an on_stream event to get the stream's tweet data and connection,
example:
.. code-block:: py
import pytweet
stream = pytweet.Stream()
stream.add_rule("pytweet") #this make sure to only return tweets that has pytweet keyword in it.
client = pytweet.Client(
...
stream=stream
)
@client.event
def on_stream(tweet, connection):
... #Do what you want with tweet and stream connection you got.
client.stream()
You can also add rules and specified which tweets must be retrieve via the tweet's characteristic features.
Parameters
------------
dry_run: :class:`bool`
Indicates if you want to debug your rule's operator syntax.
.. versionadded:: 1.3.5
"""
if not self.http.stream:
raise TypeError("'stream' argument is missing in client!")
try:
self.http.stream.connect(dry_run=dry_run)
except KeyboardInterrupt:
print("\nKeyboardInterrupt: Exit stream.")
def listen(
self,
app: Flask,
*,
url: str,
env_label: str,
sleep_for: Union[int, float] = 0.50,
ngrok: bool = | |
if e.params else e.message)
return self.get(request, *args, **kwargs)
for (i, v), (c, price) in selected.items():
data.append({
'addon_to': f['cartpos'].pk,
'item': i.pk,
'variation': v.pk if v else None,
'count': c,
'price': price,
})
return self.do(self.request.event.id, data, get_or_create_cart_id(self.request),
invoice_address=self.invoice_address.pk, locale=get_language(),
sales_channel=request.sales_channel.identifier)
class QuestionsStep(QuestionsViewMixin, CartMixin, TemplateFlowStep):
priority = 50
identifier = "questions"
template_name = "pretixpresale/event/checkout_questions.html"
label = pgettext_lazy('checkoutflow', 'Your information')
def is_applicable(self, request):
return True
@cached_property
def all_optional(self):
for recv, resp in checkout_all_optional.send(sender=self.request.event, request=self.request):
if resp:
return True
return False
@cached_property
def _contact_override_sets(self):
return [
resp for recv, resp in contact_form_fields_overrides.send(
self.request.event,
request=self.request,
order=None,
)
]
@cached_property
def contact_form(self):
wd = self.cart_session.get('widget_data', {})
initial = {
'email': (
self.cart_session.get('email', '') or
wd.get('email', '')
),
'phone': wd.get('phone', None)
}
initial.update(self.cart_session.get('contact_form_data', {}))
override_sets = self._contact_override_sets
for overrides in override_sets:
initial.update({
k: v['initial'] for k, v in overrides.items() if 'initial' in v
})
f = ContactForm(data=self.request.POST if self.request.method == "POST" else None,
event=self.request.event,
request=self.request,
initial=initial, all_optional=self.all_optional)
if wd.get('email', '') and wd.get('fix', '') == "true":
f.fields['email'].disabled = True
for overrides in override_sets:
for fname, val in overrides.items():
if 'disabled' in val and fname in f.fields:
f.fields[fname].disabled = val['disabled']
return f
def get_question_override_sets(self, cart_position):
return [
resp for recv, resp in question_form_fields_overrides.send(
self.request.event,
position=cart_position,
request=self.request
)
]
@cached_property
def eu_reverse_charge_relevant(self):
return any([p.item.tax_rule and (p.item.tax_rule.eu_reverse_charge or p.item.tax_rule.custom_rules)
for p in self.positions])
@cached_property
def invoice_form(self):
wd = self.cart_session.get('widget_data', {})
if not self.invoice_address.pk:
wd_initial = {
'name_parts': {
k[21:].replace('-', '_'): v
for k, v in wd.items()
if k.startswith('invoice-address-name-')
},
'company': wd.get('invoice-address-company', ''),
'is_business': bool(wd.get('invoice-address-company', '')),
'street': wd.get('invoice-address-street', ''),
'zipcode': wd.get('invoice-address-zipcode', ''),
'city': wd.get('invoice-address-city', ''),
'country': wd.get('invoice-address-country', ''),
}
else:
wd_initial = {}
initial = dict(wd_initial)
override_sets = self._contact_override_sets
for overrides in override_sets:
initial.update({
k: v['initial'] for k, v in overrides.items() if 'initial' in v
})
if not self.address_asked and self.request.event.settings.invoice_name_required:
f = InvoiceNameForm(data=self.request.POST if self.request.method == "POST" else None,
event=self.request.event,
request=self.request,
instance=self.invoice_address,
initial=initial,
validate_vat_id=False, all_optional=self.all_optional)
else:
f = InvoiceAddressForm(data=self.request.POST if self.request.method == "POST" else None,
event=self.request.event,
request=self.request,
initial=initial,
instance=self.invoice_address,
validate_vat_id=self.eu_reverse_charge_relevant, all_optional=self.all_optional)
for name, field in f.fields.items():
if wd_initial.get(name) and wd.get('fix', '') == 'true':
field.disabled = True
for overrides in override_sets:
for fname, val in overrides.items():
if 'disabled' in val and fname in f.fields:
f.fields[fname].disabled = val['disabled']
return f
@cached_property
def address_asked(self):
return (
self.request.event.settings.invoice_address_asked
and (not self.request.event.settings.invoice_address_not_asked_free or not get_cart_is_free(self.request))
)
def post(self, request):
self.request = request
failed = not self.save() or not self.contact_form.is_valid()
if self.address_asked or self.request.event.settings.invoice_name_required:
failed = failed or not self.invoice_form.is_valid()
if failed:
messages.error(request,
_("We had difficulties processing your input. Please review the errors below."))
return self.render()
self.cart_session['email'] = self.contact_form.cleaned_data['email']
d = dict(self.contact_form.cleaned_data)
if d.get('phone'):
d['phone'] = str(d['phone'])
self.cart_session['contact_form_data'] = d
if self.address_asked or self.request.event.settings.invoice_name_required:
addr = self.invoice_form.save()
try:
diff = update_tax_rates(
event=request.event,
cart_id=get_or_create_cart_id(request),
invoice_address=addr
)
except TaxRule.SaleNotAllowed:
messages.error(request,
_("Unfortunately, based on the invoice address you entered, we're not able to sell you "
"the selected products for tax-related legal reasons."))
return self.render()
self.cart_session['invoice_address'] = addr.pk
if abs(diff) > Decimal('0.001'):
messages.info(request, _('Due to the invoice address you entered, we need to apply a different tax '
'rate to your purchase and the price of the products in your cart has '
'changed accordingly.'))
return redirect(self.get_next_url(request) + '?open_cart=true')
return redirect(self.get_next_url(request))
def is_completed(self, request, warn=False):
self.request = request
try:
emailval = EmailValidator()
if not self.cart_session.get('email') and not self.all_optional:
if warn:
messages.warning(request, _('Please enter a valid email address.'))
return False
if self.cart_session.get('email'):
emailval(self.cart_session.get('email'))
except ValidationError:
if warn:
messages.warning(request, _('Please enter a valid email address.'))
return False
if not self.all_optional:
if self.address_asked:
if request.event.settings.invoice_address_required and (not self.invoice_address or not self.invoice_address.street):
messages.warning(request, _('Please enter your invoicing address.'))
return False
if request.event.settings.invoice_name_required and (not self.invoice_address or not self.invoice_address.name):
messages.warning(request, _('Please enter your name.'))
return False
for cp in self._positions_for_questions:
answ = {
aw.question_id: aw for aw in cp.answerlist
}
question_cache = {
q.pk: q for q in cp.item.questions_to_ask
}
def question_is_visible(parentid, qvals):
if parentid not in question_cache:
return False
parentq = question_cache[parentid]
if parentq.dependency_question_id and not question_is_visible(parentq.dependency_question_id, parentq.dependency_values):
return False
if parentid not in answ:
return False
return (
('True' in qvals and answ[parentid].answer == 'True')
or ('False' in qvals and answ[parentid].answer == 'False')
or (any(qval in [o.identifier for o in answ[parentid].options.all()] for qval in qvals))
)
def question_is_required(q):
return (
q.required and
(not q.dependency_question_id or question_is_visible(q.dependency_question_id, q.dependency_values))
)
for q in cp.item.questions_to_ask:
if question_is_required(q) and q.id not in answ:
if warn:
messages.warning(request, _('Please fill in answers to all required questions.'))
return False
if cp.item.admission and self.request.event.settings.get('attendee_names_required', as_type=bool) \
and not cp.attendee_name_parts:
if warn:
messages.warning(request, _('Please fill in answers to all required questions.'))
return False
if cp.item.admission and self.request.event.settings.get('attendee_emails_required', as_type=bool) \
and cp.attendee_email is None:
if warn:
messages.warning(request, _('Please fill in answers to all required questions.'))
return False
if cp.item.admission and self.request.event.settings.get('attendee_company_required', as_type=bool) \
and cp.company is None:
if warn:
messages.warning(request, _('Please fill in answers to all required questions.'))
return False
if cp.item.admission and self.request.event.settings.get('attendee_attendees_required', as_type=bool) \
and (cp.street is None or cp.city is None or cp.country is None):
if warn:
messages.warning(request, _('Please fill in answers to all required questions.'))
return False
responses = question_form_fields.send(sender=self.request.event, position=cp)
form_data = cp.meta_info_data.get('question_form_data', {})
for r, response in sorted(responses, key=lambda r: str(r[0])):
for key, value in response.items():
if value.required and not form_data.get(key):
return False
return True
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['formgroups'] = self.formdict.items()
ctx['contact_form'] = self.contact_form
ctx['invoice_form'] = self.invoice_form
ctx['reverse_charge_relevant'] = self.eu_reverse_charge_relevant
ctx['cart'] = self.get_cart()
ctx['cart_session'] = self.cart_session
ctx['invoice_address_asked'] = self.address_asked
return ctx
class PaymentStep(CartMixin, TemplateFlowStep):
priority = 200
identifier = "payment"
template_name = "pretixpresale/event/checkout_payment.html"
label = pgettext_lazy('checkoutflow', 'Payment')
icon = 'credit-card'
@cached_property
def _total_order_value(self):
cart = get_cart(self.request)
total = get_cart_total(self.request)
total += sum([f.value for f in get_fees(self.request.event, self.request, total, self.invoice_address, None,
cart)])
return Decimal(total)
@cached_property
def provider_forms(self):
providers = []
for provider in sorted(self.request.event.get_payment_providers().values(), key=lambda p: str(p.public_name)):
if not provider.is_enabled or not self._is_allowed(provider, self.request):
continue
fee = provider.calculate_fee(self._total_order_value)
if 'total' in inspect.signature(provider.payment_form_render).parameters:
form = provider.payment_form_render(self.request, self._total_order_value + fee)
else:
form = provider.payment_form_render(self.request)
providers.append({
'provider': provider,
'fee': fee,
'total': self._total_order_value + fee,
'form': form
})
return providers
def post(self, request):
self.request = request
for p in self.provider_forms:
if p['provider'].identifier == request.POST.get('payment', ''):
self.cart_session['payment'] = p['provider'].identifier
resp = p['provider'].checkout_prepare(
request,
self.get_cart()
)
if isinstance(resp, str):
return redirect(resp)
elif resp is True:
return redirect(self.get_next_url(request))
else:
return self.render()
messages.error(self.request, _("Please select a payment method."))
return self.render()
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['providers'] = self.provider_forms
ctx['show_fees'] = any(p['fee'] for p in self.provider_forms)
ctx['selected'] = self.request.POST.get('payment', self.cart_session.get('payment', ''))
if len(self.provider_forms) == 1:
ctx['selected'] = self.provider_forms[0]['provider'].identifier
ctx['cart'] = self.get_cart()
return ctx
@cached_property
def payment_provider(self):
return self.request.event.get_payment_providers().get(self.cart_session['payment'])
def _is_allowed(self, prov, request):
return prov.is_allowed(request, total=self._total_order_value)
def is_completed(self, request, warn=False):
self.request = request
if 'payment' not in self.cart_session or not self.payment_provider:
if warn:
messages.error(request, _('The payment information you entered was incomplete.'))
return False
if not self.payment_provider.payment_is_valid_session(request) or \
not self.payment_provider.is_enabled or \
not self._is_allowed(self.payment_provider, request):
if warn:
messages.error(request, _('The payment information you entered was incomplete.'))
return False
return True
def is_applicable(self, request):
self.request = request
for cartpos in get_cart(self.request):
if cartpos.item.require_approval:
if 'payment' in self.cart_session:
del self.cart_session['payment']
return False
for p in self.request.event.get_payment_providers().values():
if p.is_implicit(request) if callable(p.is_implicit) else p.is_implicit:
if self._is_allowed(p, request):
self.cart_session['payment'] = p.identifier
return False
elif self.cart_session.get('payment') == p.identifier:
# is_allowed might have changed, e.g. after add-on selection
del self.cart_session['payment']
return True
class ConfirmStep(CartMixin, AsyncAction, TemplateFlowStep):
priority = 1001
identifier = "confirm"
template_name = "pretixpresale/event/checkout_confirm.html"
task = perform_order
known_errortypes = ['OrderError']
label = pgettext_lazy('checkoutflow', 'Review order')
icon = 'eye'
def is_applicable(self, request):
return True
def is_completed(self, request, warn=False):
pass
@cached_property
def address_asked(self):
return (
self.request.event.settings.invoice_address_asked
and (not self.request.event.settings.invoice_address_not_asked_free or not get_cart_is_free(self.request))
)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['cart'] = self.get_cart(answers=True)
if self.payment_provider:
ctx['payment'] = self.payment_provider.checkout_confirm_render(self.request)
ctx['payment_provider'] = self.payment_provider
ctx['require_approval'] = any(cp.item.require_approval for cp in ctx['cart']['positions'])
ctx['addr'] = self.invoice_address
ctx['confirm_messages'] = self.confirm_messages
ctx['cart_session'] = self.cart_session
ctx['invoice_address_asked'] = self.address_asked
self.cart_session['shown_total'] = str(ctx['cart']['total'])
email = self.cart_session.get('contact_form_data', {}).get('email')
if email != settings.PRETIX_EMAIL_NONE_VALUE:
ctx['contact_info'] = [
(_('E-mail'), email),
]
else:
ctx['contact_info'] = []
phone = self.cart_session.get('contact_form_data', {}).get('phone')
if phone:
ctx['contact_info'].append((_('Phone number'), phone))
responses = contact_form_fields.send(self.event, request=self.request)
for r, response in | |
"site_id": "d2018f1d-82b1-422a-8ec4-4e8b3fe92a4a",
"variable": "ghi",
"interval_value_type": "interval_mean",
"interval_label": "beginning",
"interval_length": 5,
"uncertainty": 0.10,
"created_at": pytz.utc.localize(dt.datetime(2019, 3, 1, 12, 1, 55)),
"modified_at": pytz.utc.localize(dt.datetime(2019, 3, 1, 12, 1, 55))
},
"b1dfe2cb-9c8e-43cd-afcf-c5a6feaf81e2": {
"extra_parameters": (
'{"instrument": "Kipp & Zonen CMP 22 Pyranometer",'
' "network": "NOAA"}'
),
"name": "<NAME>",
"observation_id": "b1dfe2cb-9c8e-43cd-afcf-c5a6feaf81e2",
"provider": "Organization 1",
"site_id": "d2018f1d-82b1-422a-8ec4-4e8b3fe92a4a",
"variable": "ghi",
"interval_value_type": "interval_mean",
"interval_label": "beginning",
"interval_length": 5,
"uncertainty": 0.10,
"created_at": pytz.utc.localize(dt.datetime(2019, 3, 1, 12, 2, 38)),
"modified_at": pytz.utc.localize(dt.datetime(2019, 3, 1, 12, 2, 38))
},
'991d15ce-7f66-11ea-96ae-0242ac150002': {
'name': 'Weather Station Event Observation',
'variable': 'event',
'interval_value_type': 'instantaneous',
'interval_length': 5.0,
'interval_label': 'event',
'site_id': '123e4567-e89b-12d3-a456-426655440001',
'uncertainty': 1.0,
'observation_id': '991d15ce-7f66-11ea-96ae-0242ac150002',
'provider': 'Organization 1',
'created_at': pytz.utc.localize(dt.datetime(2019, 4, 14, 7, 00, 00)),
'modified_at': pytz.utc.localize(dt.datetime(2019, 4, 14, 7, 00, 00)),
'extra_parameters': ''}
}
demo_forecasts = {
'11c20780-76ae-4b11-bef1-7a75bdc784e3': {
"extra_parameters": "",
"forecast_id": "11c20780-76ae-4b11-bef1-7a75bdc784e3",
"name": "<NAME>",
"provider": "Organization 1",
"site_id": "123e4567-e89b-12d3-a456-426655440001",
"aggregate_id": None,
"variable": "ghi",
"issue_time_of_day": "06:00",
"interval_length": 5,
"run_length": 1440,
"interval_label": "beginning",
"lead_time_to_start": 60,
"interval_value_type": "interval_mean",
"created_at": pytz.utc.localize(dt.datetime(2019, 3, 1, 11, 55, 37)),
"modified_at": pytz.utc.localize(dt.datetime(2019, 3, 1, 11, 55, 37))
},
'f8dd49fa-23e2-48a0-862b-ba0af6dec276': {
"extra_parameters": "",
"forecast_id": "f8dd49fa-23e2-48a0-862b-ba0af6dec276",
"name": "<NAME>",
"provider": "Organization 1",
"site_id": "123e4567-e89b-12d3-a456-426655440002",
"aggregate_id": None,
"variable": "ac_power",
"issue_time_of_day": "12:00",
"run_length": 60,
"interval_length": 1,
"interval_label": "beginning",
"lead_time_to_start": 60,
"interval_value_type": "interval_mean",
"created_at": pytz.utc.localize(dt.datetime(2019, 3, 1, 11, 55, 38)),
"modified_at": pytz.utc.localize(dt.datetime(2019, 3, 1, 11, 55, 38))
},
'39220780-76ae-4b11-bef1-7a75bdc784e3': {
"extra_parameters": "",
"forecast_id": "39220780-76ae-4b11-bef1-7a75bdc784e3",
"name": "GHI Aggregate FX",
"provider": "Organization 1",
"site_id": None,
"aggregate_id": "458ffc27-df0b-11e9-b622-62adb5fd6af0",
"variable": "ghi",
"issue_time_of_day": "06:00",
"run_length": 1440,
"interval_length": 5,
"interval_label": "beginning",
"lead_time_to_start": 60,
"interval_value_type": "interval_mean",
"created_at": pytz.utc.localize(dt.datetime(2019, 3, 1, 11, 55, 37)),
"modified_at": pytz.utc.localize(dt.datetime(2019, 3, 1, 11, 55, 37))
},
'49220780-76ae-4b11-bef1-7a75bdc784e3': {
"extra_parameters": "",
"forecast_id": "49220780-76ae-4b11-bef1-7a75bdc784e3",
"name": "GHI Aggregate FX 60",
"provider": "Organization 1",
"site_id": None,
"aggregate_id": "458ffc27-df0b-11e9-b622-62adb5fd6af0",
"variable": "ghi",
"issue_time_of_day": "00:00",
"run_length": 1440,
"interval_length": 60,
"interval_label": "beginning",
"lead_time_to_start": 0,
"interval_value_type": "interval_mean",
"created_at": pytz.utc.localize(dt.datetime(2019, 3, 1, 11, 55, 37)),
"modified_at": pytz.utc.localize(dt.datetime(2019, 3, 1, 11, 55, 37))
},
'24cbae4e-7ea6-11ea-86b1-0242ac150002': {
'name': 'Weather Station Event Forecast',
'issue_time_of_day': '05:00',
'lead_time_to_start': 60.0,
'interval_length': 5.0,
'run_length': 60.0,
'interval_label': 'event',
'interval_value_type': 'instantaneous',
'variable': 'event',
'forecast_id': '24cbae4e-7ea6-11ea-86b1-0242ac150002',
'site_id': '123e4567-e89b-12d3-a456-426655440001',
'aggregate_id': None,
'provider': 'Organization 1',
'extra_parameters': '',
'created_at': pytz.utc.localize(dt.datetime(2019, 4, 14, 7, 00, 00)),
'modified_at': pytz.utc.localize(dt.datetime(2019, 4, 14, 7, 00, 00)),
}
}
demo_single_cdf = {
'633f9396-50bb-11e9-8647-d663bd873d93': {
"forecast_id": '633f9396-50bb-11e9-8647-d663bd873d93',
"constant_value": 5.0,
"parent": 'ef51e87c-50b9-11e9-8647-d663bd873d93',
},
'633f9864-50bb-11e9-8647-d663bd873d93': {
"forecast_id": '633f9864-50bb-11e9-8647-d663bd873d93',
"constant_value": 20.0,
"parent": 'ef51e87c-50b9-11e9-8647-d663bd873d93',
},
'633f9b2a-50bb-11e9-8647-d663bd873d93': {
"forecast_id": '633f9b2a-50bb-11e9-8647-d663bd873d93',
"constant_value": 50.0,
"parent": 'ef51e87c-50b9-11e9-8647-d663bd873d93',
},
'633f9d96-50bb-11e9-8647-d663bd873d93': {
"forecast_id": '633f9d96-50bb-11e9-8647-d663bd873d93',
"constant_value": 80.0,
"parent": 'ef51e87c-50b9-11e9-8647-d663bd873d93',
},
'633fa548-50bb-11e9-8647-d663bd873d93': {
"forecast_id": '633fa548-50bb-11e9-8647-d663bd873d93',
"constant_value": 95.0,
"parent": 'ef51e87c-50b9-11e9-8647-d663bd873d93',
},
'633fa94e-50bb-11e9-8647-d663bd873d93': {
"forecast_id": '633fa94e-50bb-11e9-8647-d663bd873d93',
"constant_value": 0.0,
"parent": '058b182a-50ba-11e9-8647-d663bd873d93',
},
'633fabec-50bb-11e9-8647-d663bd873d93': {
"forecast_id": '633fabec-50bb-11e9-8647-d663bd873d93',
"constant_value": 5.0,
"parent": '058b182a-50ba-11e9-8647-d663bd873d93',
},
'633fae62-50bb-11e9-8647-d663bd873d93': {
"forecast_id": '633fae62-50bb-11e9-8647-d663bd873d93',
"constant_value": 10.0,
"parent": '058b182a-50ba-11e9-8647-d663bd873d93',
},
'633fb114-50bb-11e9-8647-d663bd873d93': {
"forecast_id": '633fb114-50bb-11e9-8647-d663bd873d93',
"constant_value": 15.0,
"parent": '058b182a-50ba-11e9-8647-d663bd873d93',
},
'633fb3a8-50bb-11e9-8647-d663bd873d93': {
"forecast_id": '633fb3a8-50bb-11e9-8647-d663bd873d93',
"constant_value": 20.0,
"parent": '058b182a-50ba-11e9-8647-d663bd873d93',
},
'733f9396-50bb-11e9-8647-d663bd873d93': {
"forecast_id": '733f9396-50bb-11e9-8647-d663bd873d93',
"constant_value": 10.0,
"parent": 'f6b620ca-f743-11e9-a34f-f4939feddd82'
},
'733f9864-50bb-11e9-8647-d663bd873d93': {
"forecast_id": '733f9864-50bb-11e9-8647-d663bd873d93',
"constant_value": 20.0,
"parent": 'f6b620ca-f743-11e9-a34f-f4939feddd82'
},
'733f9b2a-50bb-11e9-8647-d663bd873d93': {
"forecast_id": '733f9b2a-50bb-11e9-8647-d663bd873d93',
"constant_value": 50.0,
"parent": 'f6b620ca-f743-11e9-a34f-f4939feddd82'
},
'733f9d96-50bb-11e9-8647-d663bd873d93': {
"forecast_id": '733f9d96-50bb-11e9-8647-d663bd873d93',
"constant_value": 80.0,
"parent": 'f6b620ca-f743-11e9-a34f-f4939feddd82'
},
'733fa548-50bb-11e9-8647-d663bd873d93': {
"forecast_id": '733fa548-50bb-11e9-8647-d663bd873d93',
"constant_value": 100.0,
"parent": 'f6b620ca-f743-11e9-a34f-f4939feddd82'
},
}
def _get_constant_values(fxid):
out = demo_single_cdf[fxid].copy()
del out['parent']
return out
demo_group_cdf = {
'ef51e87c-50b9-11e9-8647-d663bd873d93': {
"forecast_id": "ef51e87c-50b9-11e9-8647-d663bd873d93",
"name": "<NAME>",
"extra_parameters": "",
"provider": "Organization 1",
"site_id": "123e4567-e89b-12d3-a456-426655440001",
"aggregate_id": None,
"variable": "ghi",
"issue_time_of_day": "06:00",
"interval_length": 5,
"run_length": 1440,
"interval_label": "beginning",
"lead_time_to_start": 60,
"interval_value_type": "interval_mean",
"axis": "y",
"constant_values": [
_get_constant_values('633f9396-50bb-11e9-8647-d663bd873d93'),
_get_constant_values('633f9864-50bb-11e9-8647-d663bd873d93'),
_get_constant_values('633f9b2a-50bb-11e9-8647-d663bd873d93'),
_get_constant_values('633f9d96-50bb-11e9-8647-d663bd873d93'),
_get_constant_values('633fa548-50bb-11e9-8647-d663bd873d93')],
"created_at": pytz.utc.localize(dt.datetime(2019, 3, 2, 14, 55, 37)),
"modified_at": pytz.utc.localize(dt.datetime(2019, 3, 2, 14, 55, 37))
},
'058b182a-50ba-11e9-8647-d663bd873d93': {
"forecast_id": "058b182a-50ba-11e9-8647-d663bd873d93",
"name": "<NAME>",
"extra_parameters": "",
"provider": "Organization 1",
"site_id": "123e4567-e89b-12d3-a456-426655440002",
"aggregate_id": None,
"variable": "ac_power",
"issue_time_of_day": "12:00",
"run_length": 60,
"interval_length": 1,
"interval_label": "beginning",
"lead_time_to_start": 60,
"interval_value_type": "interval_mean",
"axis": "x",
"constant_values": [
_get_constant_values('633fb3a8-50bb-11e9-8647-d663bd873d93'),
_get_constant_values('633fb114-50bb-11e9-8647-d663bd873d93'),
_get_constant_values('633fae62-50bb-11e9-8647-d663bd873d93'),
_get_constant_values('633fabec-50bb-11e9-8647-d663bd873d93'),
_get_constant_values('633fa94e-50bb-11e9-8647-d663bd873d93')],
"created_at": pytz.utc.localize(dt.datetime(2019, 3, 2, 14, 55, 38)),
"modified_at": pytz.utc.localize(dt.datetime(2019, 3, 2, 14, 55, 38))
},
'f6b620ca-f743-11e9-a34f-f4939feddd82': {
"forecast_id": "f6b620ca-f743-11e9-a34f-f4939feddd82",
"name": "GHI Aggregate CDF FX",
"extra_parameters": "",
"provider": "Organization 1",
"site_id": None,
"aggregate_id": "458ffc27-df0b-11e9-b622-62adb5fd6af0",
"variable": "ghi",
"issue_time_of_day": "06:00",
"interval_length": 5,
"run_length": 1440,
"interval_label": "beginning",
"lead_time_to_start": 60,
"interval_value_type": "interval_mean",
"axis": "y",
"constant_values": [
_get_constant_values('733f9396-50bb-11e9-8647-d663bd873d93'),
_get_constant_values('733f9864-50bb-11e9-8647-d663bd873d93'),
_get_constant_values('733f9b2a-50bb-11e9-8647-d663bd873d93'),
_get_constant_values('733f9d96-50bb-11e9-8647-d663bd873d93'),
_get_constant_values('733fa548-50bb-11e9-8647-d663bd873d93')],
"created_at": pytz.utc.localize(dt.datetime(2019, 3, 2, 14, 55, 38)),
"modified_at": pytz.utc.localize(dt.datetime(2019, 3, 2, 14, 55, 38))
}
}
ca = dt.datetime(2019, 9, 25, 0, 0, tzinfo=dt.timezone.utc)
ef = dt.datetime(2019, 1, 1, 0, 0, tzinfo=dt.timezone.utc)
demo_aggregates = {
"458ffc27-df0b-11e9-b622-62adb5fd6af0": {
"aggregate_id": "458ffc27-df0b-11e9-b622-62adb5fd6af0",
"name": "Test Aggregate ghi",
"provider": "Organization 1",
"variable": "ghi",
"interval_label": "ending",
"interval_length": 60,
"interval_value_type": "interval_mean",
"aggregate_type": "mean",
"extra_parameters": "extra",
"description": "ghi agg",
"timezone": "America/Denver",
"created_at": dt.datetime(2019, 9, 24, 12, 0, tzinfo=dt.timezone.utc),
"modified_at": dt.datetime(2019, 9, 24, 12, 0, tzinfo=dt.timezone.utc),
"observations": [
{"observation_id": "123e4567-e89b-12d3-a456-426655440000",
"created_at": ca,
"effective_from": ef,
"observation_deleted_at": None,
"effective_until": None},
{"observation_id": "e0da0dea-9482-4073-84de-f1b12c304d23",
"created_at": ca,
"effective_from": ef,
"observation_deleted_at": None,
"effective_until": None},
{"observation_id": "b1dfe2cb-9c8e-43cd-afcf-c5a6feaf81e2",
"created_at": ca,
"effective_from": ef,
"observation_deleted_at": None,
"effective_until": None},
]
},
"d3d1e8e5-df1b-11e9-b622-62adb5fd6af0": {
"aggregate_id": "d3d1e8e5-df1b-11e9-b622-62adb5fd6af0",
"name": "Test Aggregate dni",
"provider": "Organization 1",
"variable": "dni",
"interval_label": "ending",
"interval_length": 60,
"interval_value_type": "interval_mean",
"aggregate_type": "mean",
"extra_parameters": "extra",
"description": "dni agg",
"timezone": "America/Denver",
"created_at": dt.datetime(2019, 9, 24, 12, 0, tzinfo=dt.timezone.utc),
"modified_at": dt.datetime(2019, 9, 24, 12, 0, tzinfo=dt.timezone.utc),
"observations": [
{"observation_id": "95890740-824f-11e9-a81f-54bf64606445",
"created_at": ca,
"observation_deleted_at": None,
"effective_from": ef,
"effective_until": None},
{"observation_id": "9ce9715c-bd91-47b7-989f-50bb558f1eb9",
"created_at": ca,
"observation_deleted_at": None,
"effective_from": ef,
"effective_until": None}
]
}
}
def generate_randoms(freq):
"""Generates two days worth of random noisy data.
Parameters
----------
freq: int
The "interval length" of the data to produce in minutes.
options: 1 or 5
Returns
-------
Dataframe
Dataframe with datetimeindex, values and quality_flag
columns.
Notes
-----
Won't throw an error if you try to use a freq of other
than 1 or 5 but will provide you values as though
you selected 5.
"""
if freq == 1:
length = 4320
elif freq == 5:
length = 864
else:
# assume 60 min
length = 72
index = pd.date_range(start=pd.Timestamp('20190414T07:00'),
periods=length, freq=f'{freq}min', tz='UTC')
values = np.random.normal(50, 5, size=length)
quality_flags = np.random.randint(11, size=length)
return index, values, quality_flags
demo_file_variables = ['ghi', 'dni', 'dhi']
@pytest.fixture()
def obs_vals():
index, values, quality = generate_randoms(freq=5)
data = {
'value': values,
'quality_flag': quality}
obs_df = pd.DataFrame(index=index, data=data)
obs_df.index.name = 'timestamp'
return obs_df
@pytest.fixture()
def fx_vals():
index, values, quality = generate_randoms(freq=5)
data = {
'value': values}
fx_df = pd.DataFrame(index=index, data=data)
fx_df.index.name = 'timestamp'
return fx_df
@pytest.fixture(scope="module")
def ghi_obs_vals():
ghi_csv = """
timestamp,value,quality_flag
2019-04-14T07:00:00+00:00,-2.58981,0
2019-04-14T07:05:00+00:00,-2.59598,0
2019-04-14T07:10:00+00:00,-2.95363,0
2019-04-14T07:15:00+00:00,-2.58983,0
2019-04-14T07:20:00+00:00,-2.5897900000000003,0
2019-04-14T07:25:00+00:00,-2.65143,0
2019-04-14T07:30:00+00:00,-2.58363,0
2019-04-14T07:35:00+00:00,-2.9351,0
2019-04-14T07:40:00+00:00,-2.5897799999999997,0
2019-04-14T07:45:00+00:00,-2.58975,0
2019-04-14T07:50:00+00:00,-2.58976,0
2019-04-14T07:55:00+00:00,-2.5836,0
2019-04-14T08:00:00+00:00,-2.63912,0
2019-04-14T08:05:00+00:00,-2.87963,0
2019-04-14T08:10:00+00:00,-2.58983,0
2019-04-14T08:15:00+00:00,-2.5897799999999997,0
2019-04-14T08:20:00+00:00,-2.21981,0
2019-04-14T08:25:00+00:00,-2.0163599999999997,0
2019-04-14T08:30:00+00:00,-2.5897900000000003,0
2019-04-14T08:35:00+00:00,-2.58986,0
2019-04-14T08:40:00+00:00,-2.5899,0
2019-04-14T08:45:00+00:00,-2.46659,0
2019-04-14T08:50:00+00:00,-2.54677,0
2019-04-14T08:55:00+00:00,-2.33095,0
2019-04-14T09:00:00+00:00,-2.07193,0
2019-04-14T09:05:00+00:00,-2.3679200000000002,0
2019-04-14T09:10:00+00:00,-2.2137599999999997,0
2019-04-14T09:15:00+00:00,-2.5899799999999997,0
2019-04-14T09:20:00+00:00,-2.368,0
2019-04-14T09:25:00+00:00,-2.59004,0
2019-04-14T09:30:00+00:00,-2.5838900000000002,0
2019-04-14T09:35:00+00:00,-2.59002,0
2019-04-14T09:40:00+00:00,-2.59003,0
2019-04-14T09:45:00+00:00,-2.46049,0
2019-04-14T09:50:00+00:00,-2.21995,0
2019-04-14T09:55:00+00:00,-2.2384,0
2019-04-14T10:00:00+00:00,-2.15828,0
2019-04-14T10:05:00+00:00,-2.59001,0
2019-04-14T10:10:00+00:00,-2.22008,0
2019-04-14T10:15:00+00:00,-2.22011,0
2019-04-14T10:20:00+00:00,-2.34346,0
2019-04-14T10:25:00+00:00,-2.22012,0
2019-04-14T10:30:00+00:00,-2.2200900000000003,0
2019-04-14T10:35:00+00:00,-2.26943,0
2019-04-14T10:40:00+00:00,-2.22627,0
2019-04-14T10:45:00+00:00,-1.94259,0
2019-04-14T10:50:00+00:00,-2.21394,0
2019-04-14T10:55:00+00:00,-2.20161,0
2019-04-14T11:00:00+00:00,-2.18928,0
2019-04-14T11:05:00+00:00,-2.25712,0
2019-04-14T11:10:00+00:00,-2.3496200000000003,0
2019-04-14T11:15:00+00:00,-2.01661,0
2019-04-14T11:20:00+00:00,-2.20778,0
2019-04-14T11:25:00+00:00,-1.99194,0
2019-04-14T11:30:00+00:00,-1.8501,0
2019-04-14T11:35:00+00:00,-1.86243,0
2019-04-14T11:40:00+00:00,-2.1461099999999997,0
2019-04-14T11:45:00+00:00,-1.88093,0
2019-04-14T11:50:00+00:00,-1.8501,0
2019-04-14T11:55:00+00:00,-2.21395,0
2019-04-14T12:00:00+00:00,-2.07211,0
2019-04-14T12:05:00+00:00,-1.8562599999999998,0
2019-04-14T12:10:00+00:00,-2.33112,0
2019-04-14T12:15:00+00:00,-2.56547,0
2019-04-14T12:20:00+00:00,-2.5593,0
2019-04-14T12:25:00+00:00,-2.18928,0
2019-04-14T12:30:00+00:00,-2.1461099999999997,0
2019-04-14T12:35:00+00:00,-1.8501,0
2019-04-14T12:40:00+00:00,-1.8501,0
2019-04-14T12:45:00+00:00,-1.11006,0
2019-04-14T12:50:00+00:00,0.370021,0
2019-04-14T12:55:00+00:00,2.81833,0
2019-04-14T13:00:00+00:00,6.01286,0
2019-04-14T13:05:00+00:00,14.8625,0
2019-04-14T13:10:00+00:00,25.0627,0
2019-04-14T13:15:00+00:00,36.0769,0
2019-04-14T13:20:00+00:00,49.3668,0
2019-04-14T13:25:00+00:00,64.1798,0
2019-04-14T13:30:00+00:00,80.7814,0
2019-04-14T13:35:00+00:00,98.1722,0
2019-04-14T13:40:00+00:00,116.131,0
2019-04-14T13:45:00+00:00,134.749,0
2019-04-14T13:50:00+00:00,152.97799999999998,0
2019-04-14T13:55:00+00:00,161.322,0
2019-04-14T14:00:00+00:00,185.75,0
2019-04-14T14:05:00+00:00,212.2,0
2019-04-14T14:10:00+00:00,232.768,0
2019-04-14T14:15:00+00:00,253.32299999999998,0
2019-04-14T14:20:00+00:00,271.79200000000003,0
2019-04-14T14:25:00+00:00,292.827,0
2019-04-14T14:30:00+00:00,314.072,0
2019-04-14T14:35:00+00:00,331.245,0
2019-04-14T14:40:00+00:00,354.673,0
2019-04-14T14:45:00+00:00,374.912,0
2019-04-14T14:50:00+00:00,395.663,0
2019-04-14T14:55:00+00:00,416.967,0
2019-04-14T15:00:00+00:00,437.421,0
2019-04-14T15:05:00+00:00,458.991,0
2019-04-14T15:10:00+00:00,482.281,0
2019-04-14T15:15:00+00:00,479.139,0
2019-04-14T15:20:00+00:00,508.588,0
2019-04-14T15:25:00+00:00,535.8969999999999,0
2019-04-14T15:30:00+00:00,553.289,0
2019-04-14T15:35:00+00:00,571.726,0
2019-04-14T15:40:00+00:00,590.8,0
2019-04-14T15:45:00+00:00,597.552,0
2019-04-14T15:50:00+00:00,625.388,0
2019-04-14T15:55:00+00:00,642.813,0
2019-04-14T16:00:00+00:00,659.5160000000001,0
2019-04-14T16:05:00+00:00,676.591,0
2019-04-14T16:10:00+00:00,693.842,0
2019-04-14T16:15:00+00:00,710.203,0
2019-04-14T16:20:00+00:00,725.822,0
2019-04-14T16:25:00+00:00,742.3810000000001,0
2019-04-14T16:30:00+00:00,758.559,0
2019-04-14T16:35:00+00:00,774.2180000000001,0
2019-04-14T16:40:00+00:00,789.38,0
2019-04-14T16:45:00+00:00,804.44,0
2019-04-14T16:50:00+00:00,819.814,0
2019-04-14T16:55:00+00:00,834.556,0
2019-04-14T17:00:00+00:00,848.2639999999999,0
2019-04-14T17:05:00+00:00,861.556,0
2019-04-14T17:10:00+00:00,874.9169999999999,0
2019-04-14T17:15:00+00:00,887.265,0
2019-04-14T17:20:00+00:00,899.135,0
2019-04-14T17:25:00+00:00,914.666,0
2019-04-14T17:30:00+00:00,928.618,0
2019-04-14T17:35:00+00:00,951.102,0
2019-04-14T17:40:00+00:00,890.1210000000001,0
2019-04-14T17:45:00+00:00,728.257,0
2019-04-14T17:50:00+00:00,661.2189999999999,0
2019-04-14T17:55:00+00:00,991.305,0
2019-04-14T18:00:00+00:00,974.33,0
2019-04-14T18:05:00+00:00,999.362,0
2019-04-14T18:10:00+00:00,944.8510000000001,0
2019-04-14T18:15:00+00:00,715.2760000000001,0
2019-04-14T18:20:00+00:00,945.5980000000001,0
2019-04-14T18:25:00+00:00,967.4939999999999,0
2019-04-14T18:30:00+00:00,1038.75,0
2019-04-14T18:35:00+00:00,1035.99,0
2019-04-14T18:40:00+00:00,1038.4,0
2019-04-14T18:45:00+00:00,1039.57,0
2019-04-14T18:50:00+00:00,929.263,0
2019-04-14T18:55:00+00:00,947.2260000000001,0
2019-04-14T19:00:00+00:00,952.812,0
2019-04-14T19:05:00+00:00,1056.66,0
2019-04-14T19:10:00+00:00,1084.52,0
2019-04-14T19:15:00+00:00,1080.25,0
2019-04-14T19:20:00+00:00,1067.63,0
2019-04-14T19:25:00+00:00,1063.64,0
2019-04-14T19:30:00+00:00,1046.98,0
2019-04-14T19:35:00+00:00,1049.83,0
2019-04-14T19:40:00+00:00,983.454,0
2019-04-14T19:45:00+00:00,953.0,0
2019-04-14T19:50:00+00:00,973.1189999999999,0
2019-04-14T19:55:00+00:00,1031.86,0
2019-04-14T20:00:00+00:00,893.164,0
2019-04-14T20:05:00+00:00,966.245,0
2019-04-14T20:10:00+00:00,1019.2,0
2019-04-14T20:15:00+00:00,976.79,0
2019-04-14T20:20:00+00:00,901.305,0
2019-04-14T20:25:00+00:00,945.3939999999999,0
2019-04-14T20:30:00+00:00,989.505,0
2019-04-14T20:35:00+00:00,1043.08,0
2019-04-14T20:40:00+00:00,843.58,0
2019-04-14T20:45:00+00:00,943.299,0
2019-04-14T20:50:00+00:00,893.836,0
2019-04-14T20:55:00+00:00,866.0110000000001,0
2019-04-14T21:00:00+00:00,811.8969999999999,0
2019-04-14T21:05:00+00:00,840.812,0
2019-04-14T21:10:00+00:00,757.7389999999999,0
2019-04-14T21:15:00+00:00,899.6519999999999,0
2019-04-14T21:20:00+00:00,909.35,0
2019-04-14T21:25:00+00:00,954.7180000000001,0
2019-04-14T21:30:00+00:00,894.877,0
2019-04-14T21:35:00+00:00,891.1339999999999,0
2019-04-14T21:40:00+00:00,871.166,0
2019-04-14T21:45:00+00:00,872.8610000000001,0
2019-04-14T21:50:00+00:00,844.846,0
2019-04-14T21:55:00+00:00,837.497,0
2019-04-14T22:00:00+00:00,766.3919999999999,0
2019-04-14T22:05:00+00:00,810.61,0
2019-04-14T22:10:00+00:00,776.055,0
2019-04-14T22:15:00+00:00,745.7360000000001,0
2019-04-14T22:20:00+00:00,722.574,0
2019-04-14T22:25:00+00:00,752.806,0
2019-04-14T22:30:00+00:00,721.926,0
2019-04-14T22:35:00+00:00,694.8430000000001,0
2019-04-14T22:40:00+00:00,684.0169999999999,0
2019-04-14T22:45:00+00:00,666.4830000000001,0
2019-04-14T22:50:00+00:00,632.129,0
2019-04-14T22:55:00+00:00,580.055,0
2019-04-14T23:00:00+00:00,458.814,0
2019-04-14T23:05:00+00:00,615.59,0
2019-04-14T23:10:00+00:00,619.446,0
2019-04-14T23:15:00+00:00,592.008,0
2019-04-14T23:20:00+00:00,437.16900000000004,0
2019-04-14T23:25:00+00:00,514.895,0
2019-04-14T23:30:00+00:00,342.56,0
2019-04-14T23:35:00+00:00,567.229,0
2019-04-14T23:40:00+00:00,521.059,0
2019-04-14T23:45:00+00:00,475.625,0
2019-04-14T23:50:00+00:00,286.94,0
2019-04-14T23:55:00+00:00,430.19,0
2019-04-15T00:00:00+00:00,181.178,0
2019-04-15T00:05:00+00:00,246.452,0
2019-04-15T00:10:00+00:00,123.838,0
2019-04-15T00:15:00+00:00,134.411,0
2019-04-15T00:20:00+00:00,178.725,0
2019-04-15T00:25:00+00:00,222.75099999999998,0
2019-04-15T00:30:00+00:00,122.815,0
2019-04-15T00:35:00+00:00,120.95200000000001,0
2019-04-15T00:40:00+00:00,215.28599999999997,0
2019-04-15T00:45:00+00:00,182.082,0
2019-04-15T00:50:00+00:00,112.78399999999999,0
2019-04-15T00:55:00+00:00,99.9878,0
2019-04-15T01:00:00+00:00,95.6318,0
2019-04-15T01:05:00+00:00,98.37,0
2019-04-15T01:10:00+00:00,65.9311,0
2019-04-15T01:15:00+00:00,54.4334,0
2019-04-15T01:20:00+00:00,69.2061,0
2019-04-15T01:25:00+00:00,57.6223,0
2019-04-15T01:30:00+00:00,27.152,0
2019-04-15T01:35:00+00:00,15.2969,0
2019-04-15T01:40:00+00:00,7.52508,0
2019-04-15T01:45:00+00:00,3.06547,0
2019-04-15T01:50:00+00:00,0.370076,0
2019-04-15T01:55:00+00:00,-1.11022,0
2019-04-15T02:00:00+00:00,-1.8503,0
2019-04-15T02:05:00+00:00,-2.6026700000000003,0
2019-04-15T02:10:00+00:00,-2.9603,0
2019-04-15T02:15:00+00:00,-3.31795,0
2019-04-15T02:20:00+00:00,-3.3302300000000002,0
2019-04-15T02:25:00+00:00,-3.3302099999999997,0
2019-04-15T02:30:00+00:00,-3.3302300000000002,0
2019-04-15T02:35:00+00:00,-3.19462,0
2019-04-15T02:40:00+00:00,-3.70025,0
2019-04-15T02:45:00+00:00,-3.8913900000000003,0
2019-04-15T02:50:00+00:00,-3.5152099999999997,0
2019-04-15T02:55:00+00:00,-3.54601,0
2019-04-15T03:00:00+00:00,-3.48432,0
2019-04-15T03:05:00+00:00,-3.33015,0
2019-04-15T03:10:00+00:00,-3.33015,0
2019-04-15T03:15:00+00:00,-3.1760200000000003,0
2019-04-15T03:20:00+00:00,-3.3302400000000003,0
2019-04-15T03:25:00+00:00,-2.9725599999999996,0
2019-04-15T03:30:00+00:00,-2.9664,0
2019-04-15T03:35:00+00:00,-2.59022,0
2019-04-15T03:40:00+00:00,-2.96026,0
2019-04-15T03:45:00+00:00,-3.0836,0
2019-04-15T03:50:00+00:00,-3.33029,0
2019-04-15T03:55:00+00:00,-3.3302199999999997,0
2019-04-15T04:00:00+00:00,-3.33016,0
2019-04-15T04:05:00+00:00,-3.2933,0
2019-04-15T04:10:00+00:00,-2.96029,0
2019-04-15T04:15:00+00:00,-2.96038,0
2019-04-15T04:20:00+00:00,-2.91717,0
2019-04-15T04:25:00+00:00,-2.59643,0
2019-04-15T04:30:00+00:00,-2.59025,0
2019-04-15T04:35:00+00:00,-2.59023,0
2019-04-15T04:40:00+00:00,-2.59016,0
2019-04-15T04:45:00+00:00,-2.7258299999999998,0
2019-04-15T04:50:00+00:00,-2.99723,0
2019-04-15T04:55:00+00:00,-2.96025,0
2019-04-15T05:00:00+00:00,-3.07743,0
2019-04-15T05:05:00+00:00,-3.39195,0
2019-04-15T05:10:00+00:00,-3.36734,0
2019-04-15T05:15:00+00:00,-3.3303599999999998,0
2019-04-15T05:20:00+00:00,-3.3303800000000003,0
2019-04-15T05:25:00+00:00,-3.18854,0
2019-04-15T05:30:00+00:00,-3.3242300000000005,0
2019-04-15T05:35:00+00:00,-2.9603599999999997,0
2019-04-15T05:40:00+00:00,-2.96037,0
2019-04-15T05:45:00+00:00,-2.95419,0
2019-04-15T05:50:00+00:00,-2.96035,0
2019-04-15T05:55:00+00:00,-2.9603599999999997,0
2019-04-15T06:00:00+00:00,-3.09606,0
2019-04-15T06:05:00+00:00,-2.9603599999999997,0
2019-04-15T06:10:00+00:00,-2.9603200000000003,0
2019-04-15T06:15:00+00:00,-2.96029,0
2019-04-15T06:20:00+00:00,-2.96028,0
2019-04-15T06:25:00+00:00,-2.59024,0
2019-04-15T06:30:00+00:00,-2.59023,0
2019-04-15T06:35:00+00:00,-2.59017,0
2019-04-15T06:40:00+00:00,-2.5901400000000003,0
2019-04-15T06:45:00+00:00,-2.59009,0
2019-04-15T06:50:00+00:00,-2.59008,0
2019-04-15T06:55:00+00:00,-2.59001,0
2019-04-15T07:00:00+00:00,-2.5899900000000002,0
2019-04-15T07:05:00+00:00,-2.62702,0
2019-04-15T07:10:00+00:00,-2.5899900000000002,0
2019-04-15T07:15:00+00:00,-2.58996,0
2019-04-15T07:20:00+00:00,-2.5899400000000004,0
2019-04-15T07:25:00+00:00,-2.58995,0
2019-04-15T07:30:00+00:00,-2.5899099999999997,0
2019-04-15T07:35:00+00:00,-2.5899,0
2019-04-15T07:40:00+00:00,-2.58992,0
2019-04-15T07:45:00+00:00,-2.58993,0
2019-04-15T07:50:00+00:00,-2.58995,0
2019-04-15T07:55:00+00:00,-2.58995,0
2019-04-15T08:00:00+00:00,-2.9291,0
2019-04-15T08:05:00+00:00,-2.95992,0
2019-04-15T08:10:00+00:00,-2.95991,0
2019-04-15T08:15:00+00:00,-3.07707,0
2019-04-15T08:20:00+00:00,-2.9599,0
2019-04-15T08:25:00+00:00,-2.68859,0
2019-04-15T08:30:00+00:00,-2.60227,0
2019-04-15T08:35:00+00:00,-2.67009,0
2019-04-15T08:40:00+00:00,-2.58993,0
2019-04-15T08:45:00+00:00,-2.58992,0
2019-04-15T08:50:00+00:00,-2.58989,0
2019-04-15T08:55:00+00:00,-2.58988,0
2019-04-15T09:00:00+00:00,-2.58988,0
2019-04-15T09:05:00+00:00,-2.58989,0
2019-04-15T09:10:00+00:00,-2.9598400000000002,0
2019-04-15T09:15:00+00:00,-2.91663,0
2019-04-15T09:20:00+00:00,-2.58977,0
2019-04-15T09:25:00+00:00,-2.54041,0
2019-04-15T09:30:00+00:00,-2.25062,0
2019-04-15T09:35:00+00:00,-2.30618,0
2019-04-15T09:40:00+00:00,-2.58982,0
2019-04-15T09:45:00+00:00,-2.58986,0
2019-04-15T09:50:00+00:00,-2.58987,0
2019-04-15T09:55:00+00:00,-2.57755,0
2019-04-15T10:00:00+00:00,-2.58989,0
2019-04-15T10:05:00+00:00,-2.58985,0
2019-04-15T10:10:00+00:00,-2.5897900000000003,0
2019-04-15T10:15:00+00:00,-2.31846,0
2019-04-15T10:20:00+00:00,-2.5897799999999997,0
2019-04-15T10:25:00+00:00,-2.58976,0
2019-04-15T10:30:00+00:00,-2.58974,0
2019-04-15T10:35:00+00:00,-2.44176,0
2019-04-15T10:40:00+00:00,-2.57742,0
2019-04-15T10:45:00+00:00,-2.56506,0
2019-04-15T10:50:00+00:00,-2.42942,0
2019-04-15T10:55:00+00:00,-2.2938,0
2019-04-15T11:00:00+00:00,-2.5342599999999997,0
2019-04-15T11:05:00+00:00,-2.3122700000000003,0
2019-04-15T11:10:00+00:00,-2.21975,0
2019-04-15T11:15:00+00:00,-2.20126,0
2019-04-15T11:20:00+00:00,-2.35545,0
2019-04-15T11:25:00+00:00,-2.58973,0
2019-04-15T11:30:00+00:00,-2.58974,0
2019-04-15T11:35:00+00:00,-2.58976,0
2019-04-15T11:40:00+00:00,-2.7069400000000003,0
2019-04-15T11:45:00+00:00,-2.58983,0
2019-04-15T11:50:00+00:00,-2.58986,0
2019-04-15T11:55:00+00:00,-2.58986,0
2019-04-15T12:00:00+00:00,-2.58982,0
2019-04-15T12:05:00+00:00,-2.5898,0
2019-04-15T12:10:00+00:00,-2.34317,0
2019-04-15T12:15:00+00:00,-2.2199,0
2019-04-15T12:20:00+00:00,-2.13352,0
2019-04-15T12:25:00+00:00,-2.2938099999999997,0
2019-04-15T12:30:00+00:00,-2.27527,0
2019-04-15T12:35:00+00:00,-2.55277,0
2019-04-15T12:40:00+00:00,-2.24453,0
2019-04-15T12:45:00+00:00,-1.2333,0
2019-04-15T12:50:00+00:00,1.51696,0
2019-04-15T12:55:00+00:00,3.3299300000000005,0
2019-04-15T13:00:00+00:00,5.33415,0
2019-04-15T13:05:00+00:00,12.8762,0
2019-04-15T13:10:00+00:00,28.0401,0
2019-04-15T13:15:00+00:00,25.3819,0
2019-04-15T13:20:00+00:00,33.9907,0
2019-04-15T13:25:00+00:00,40.2998,0
2019-04-15T13:30:00+00:00,46.7991,0
2019-04-15T13:35:00+00:00,55.1116,0
2019-04-15T13:40:00+00:00,65.0373,0
2019-04-15T13:45:00+00:00,90.2805,0
2019-04-15T13:50:00+00:00,107.291,0
2019-04-15T13:55:00+00:00,91.751,0
2019-04-15T14:00:00+00:00,93.0265,0
2019-04-15T14:05:00+00:00,119.853,0
2019-04-15T14:10:00+00:00,275.908,0
2019-04-15T14:15:00+00:00,259.889,0
2019-04-15T14:20:00+00:00,297.738,0
2019-04-15T14:25:00+00:00,320.35900000000004,0
2019-04-15T14:30:00+00:00,330.24800000000005,0
2019-04-15T14:35:00+00:00,340.918,0
2019-04-15T14:40:00+00:00,315.414,0
2019-04-15T14:45:00+00:00,323.96299999999997,0
2019-04-15T14:50:00+00:00,391.41,0
2019-04-15T14:55:00+00:00,428.966,0
2019-04-15T15:00:00+00:00,452.981,0
2019-04-15T15:05:00+00:00,436.892,0
2019-04-15T15:10:00+00:00,501.43800000000005,0
2019-04-15T15:15:00+00:00,361.515,0
2019-04-15T15:20:00+00:00,517.174,0
2019-04-15T15:25:00+00:00,553.139,0
2019-04-15T15:30:00+00:00,532.0880000000001,0
2019-04-15T15:35:00+00:00,602.265,0
2019-04-15T15:40:00+00:00,542.089,0
2019-04-15T15:45:00+00:00,619.884,0
2019-04-15T15:50:00+00:00,640.026,0
2019-04-15T15:55:00+00:00,615.5459999999999,0
2019-04-15T16:00:00+00:00,535.669,0
2019-04-15T16:05:00+00:00,663.263,0
2019-04-15T16:10:00+00:00,696.176,0
2019-04-15T16:15:00+00:00,709.57,0
2019-04-15T16:20:00+00:00,720.297,0
2019-04-15T16:25:00+00:00,741.4789999999999,0
2019-04-15T16:30:00+00:00,756.16,0
2019-04-15T16:35:00+00:00,772.105,0
2019-04-15T16:40:00+00:00,787.586,0
2019-04-15T16:45:00+00:00,803.993,0
2019-04-15T16:50:00+00:00,816.363,0
2019-04-15T16:55:00+00:00,830.944,0
2019-04-15T17:00:00+00:00,848.325,0
2019-04-15T17:05:00+00:00,862.183,0
2019-04-15T17:10:00+00:00,844.135,0
2019-04-15T17:15:00+00:00,876.2289999999999,0
2019-04-15T17:20:00+00:00,889.044,0
2019-04-15T17:25:00+00:00,902.704,0
2019-04-15T17:30:00+00:00,912.7310000000001,0
2019-04-15T17:35:00+00:00,923.09,0
2019-04-15T17:40:00+00:00,933.9680000000001,0
2019-04-15T17:45:00+00:00,943.538,0
2019-04-15T17:50:00+00:00,951.5260000000001,0
2019-04-15T17:55:00+00:00,961.005,0
2019-04-15T18:00:00+00:00,967.9910000000001,0
2019-04-15T18:05:00+00:00,977.7189999999999,0
2019-04-15T18:10:00+00:00,984.003,0
2019-04-15T18:15:00+00:00,991.053,0
2019-04-15T18:20:00+00:00,998.3510000000001,0
2019-04-15T18:25:00+00:00,1011.07,0
2019-04-15T18:30:00+00:00,1023.55,0
2019-04-15T18:35:00+00:00,956.725,0
2019-04-15T18:40:00+00:00,673.852,0
2019-04-15T18:45:00+00:00,869.235,0
2019-04-15T18:50:00+00:00,1059.85,0
2019-04-15T18:55:00+00:00,923.8989999999999,0
2019-04-15T19:00:00+00:00,1023.12,0
2019-04-15T19:05:00+00:00,1054.61,0
2019-04-15T19:10:00+00:00,1021.47,0
2019-04-15T19:15:00+00:00,960.933,0
2019-04-15T19:20:00+00:00,1037.77,0
2019-04-15T19:25:00+00:00,885.221,0
2019-04-15T19:30:00+00:00,1037.18,0
2019-04-15T19:35:00+00:00,1032.81,0
2019-04-15T19:40:00+00:00,1024.12,0
2019-04-15T19:45:00+00:00,1030.9,0
2019-04-15T19:50:00+00:00,1035.2,0
2019-04-15T19:55:00+00:00,1031.66,0
2019-04-15T20:00:00+00:00,1029.01,0
2019-04-15T20:05:00+00:00,1030.1,0
2019-04-15T20:10:00+00:00,1022.15,0
2019-04-15T20:15:00+00:00,1040.45,0
2019-04-15T20:20:00+00:00,1013.47,0
2019-04-15T20:25:00+00:00,1044.57,0
2019-04-15T20:30:00+00:00,1046.76,0
2019-04-15T20:35:00+00:00,994.2539999999999,0
2019-04-15T20:40:00+00:00,1000.12,0
2019-04-15T20:45:00+00:00,979.342,0
2019-04-15T20:50:00+00:00,963.86,0
2019-04-15T20:55:00+00:00,954.6339999999999,0
2019-04-15T21:00:00+00:00,945.7539999999999,0
2019-04-15T21:05:00+00:00,932.436,0
2019-04-15T21:10:00+00:00,926.655,0
2019-04-15T21:15:00+00:00,912.58,0
2019-04-15T21:20:00+00:00,885.745,0
2019-04-15T21:25:00+00:00,890.035,0
2019-04-15T21:30:00+00:00,881.33,0
2019-04-15T21:35:00+00:00,873.1110000000001,0
2019-04-15T21:40:00+00:00,862.566,0
2019-04-15T21:45:00+00:00,848.862,0
2019-04-15T21:50:00+00:00,837.1339999999999,0
2019-04-15T21:55:00+00:00,823.7389999999999,0
2019-04-15T22:00:00+00:00,787.9630000000001,0
2019-04-15T22:05:00+00:00,769.363,0
2019-04-15T22:10:00+00:00,741.2410000000001,0
2019-04-15T22:15:00+00:00,722.4060000000001,0
2019-04-15T22:20:00+00:00,743.5419999999999,0
2019-04-15T22:25:00+00:00,707.926,0
2019-04-15T22:30:00+00:00,598.981,0
2019-04-15T22:35:00+00:00,527.816,0
2019-04-15T22:40:00+00:00,384.231,0
2019-04-15T22:45:00+00:00,394.772,0
2019-04-15T22:50:00+00:00,343.18300000000005,0
2019-04-15T22:55:00+00:00,365.339,0
2019-04-15T23:00:00+00:00,316.932,0
2019-04-15T23:05:00+00:00,311.01,0
2019-04-15T23:10:00+00:00,269.47700000000003,0
2019-04-15T23:15:00+00:00,266.522,0
2019-04-15T23:20:00+00:00,323.63,0
2019-04-15T23:25:00+00:00,260.445,0
2019-04-15T23:30:00+00:00,300.435,0
2019-04-15T23:35:00+00:00,240.81400000000002,0
2019-04-15T23:40:00+00:00,261.992,0
2019-04-15T23:45:00+00:00,248.27200000000002,0
2019-04-15T23:50:00+00:00,373.164,0
2019-04-15T23:55:00+00:00,338.93,0
2019-04-16T00:00:00+00:00,447.551,0
2019-04-16T00:05:00+00:00,425.18,0
2019-04-16T00:10:00+00:00,253.859,0
2019-04-16T00:15:00+00:00,250.486,0
2019-04-16T00:20:00+00:00,182.83,0
2019-04-16T00:25:00+00:00,157.441,0
2019-04-16T00:30:00+00:00,160.778,0
2019-04-16T00:35:00+00:00,136.722,0
2019-04-16T00:40:00+00:00,116.979,0
2019-04-16T00:45:00+00:00,95.4898,0
2019-04-16T00:50:00+00:00,99.1536,0
2019-04-16T00:55:00+00:00,81.5057,0
2019-04-16T01:00:00+00:00,76.2751,0
2019-04-16T01:05:00+00:00,98.7015,0
2019-04-16T01:10:00+00:00,138.364,0
2019-04-16T01:15:00+00:00,79.304,0
2019-04-16T01:20:00+00:00,42.3035,0
2019-04-16T01:25:00+00:00,30.6648,0
2019-04-16T01:30:00+00:00,29.0972,0
2019-04-16T01:35:00+00:00,21.0981,0
2019-04-16T01:40:00+00:00,11.9581,0
2019-04-16T01:45:00+00:00,6.61729,0
2019-04-16T01:50:00+00:00,1.87484,0
2019-04-16T01:55:00+00:00,0.080174,0
2019-04-16T02:00:00+00:00,-1.85017,0
2019-04-16T02:05:00+00:00,-2.59026,0
2019-04-16T02:10:00+00:00,-2.95407,0
2019-04-16T02:15:00+00:00,-2.9601900000000003,0
2019-04-16T02:20:00+00:00,-2.9601599999999997,0
2019-04-16T02:25:00+00:00,-2.9108099999999997,0
2019-04-16T02:30:00+00:00,-2.96015,0
2019-04-16T02:35:00+00:00,-2.9601599999999997,0
2019-04-16T02:40:00+00:00,-2.9601599999999997,0
2019-04-16T02:45:00+00:00,-2.88618,0
2019-04-16T02:50:00+00:00,-2.94785,0
2019-04-16T02:55:00+00:00,-2.96017,0
2019-04-16T03:00:00+00:00,-2.9602,0
2019-04-16T03:05:00+00:00,-2.85538,0
2019-04-16T03:10:00+00:00,-2.59021,0
2019-04-16T03:15:00+00:00,-2.5902,0
2019-04-16T03:20:00+00:00,-2.59022,0
2019-04-16T03:25:00+00:00,-2.59024,0
2019-04-16T03:30:00+00:00,-2.59024,0
2019-04-16T03:35:00+00:00,-2.60258,0
2019-04-16T03:40:00+00:00,-2.65192,0
2019-04-16T03:45:00+00:00,-2.70745,0
2019-04-16T03:50:00+00:00,-2.63959,0
2019-04-16T03:55:00+00:00,-2.84312,0
2019-04-16T04:00:00+00:00,-2.8307700000000002,0
2019-04-16T04:05:00+00:00,-2.82457,0
2019-04-16T04:10:00+00:00,-2.96026,0
2019-04-16T04:15:00+00:00,-2.96027,0
2019-04-16T04:20:00+00:00,-2.96025,0
2019-04-16T04:25:00+00:00,-2.9664,0
2019-04-16T04:30:00+00:00,-2.96017,0
2019-04-16T04:35:00+00:00,-2.96015,0
2019-04-16T04:40:00+00:00,-2.96017,0
2019-04-16T04:45:00+00:00,-2.92315,0
2019-04-16T04:50:00+00:00,-2.83682,0
2019-04-16T04:55:00+00:00,-2.94789,0
2019-04-16T05:00:00+00:00,-2.96027,0
2019-04-16T05:05:00+00:00,-2.96026,0
2019-04-16T05:10:00+00:00,-2.95411,0
2019-04-16T05:15:00+00:00,-2.88632,0
2019-04-16T05:20:00+00:00,-2.83701,0
2019-04-16T05:25:00+00:00,-2.9048599999999998,0
2019-04-16T05:30:00+00:00,-2.9603200000000003,0
2019-04-16T05:35:00+00:00,-2.60875,0
2019-04-16T05:40:00+00:00,-2.59026,0
2019-04-16T05:45:00+00:00,-2.69508,0
2019-04-16T05:50:00+00:00,-2.86777,0
2019-04-16T05:55:00+00:00,-2.69506,0
2019-04-16T06:00:00+00:00,-2.59021,0
2019-04-16T06:05:00+00:00,-2.59023,0
2019-04-16T06:10:00+00:00,-2.59022,0
2019-04-16T06:15:00+00:00,-2.59022,0
2019-04-16T06:20:00+00:00,-2.59024,0
2019-04-16T06:25:00+00:00,-2.59023,0
2019-04-16T06:30:00+00:00,-2.59022,0
2019-04-16T06:35:00+00:00,-2.59022,0
2019-04-16T06:40:00+00:00,-2.59023,0
2019-04-16T06:45:00+00:00,-2.59024,0
2019-04-16T06:50:00+00:00,-2.58405,0
2019-04-16T06:55:00+00:00,-2.59019,0
2019-04-16T07:00:00+00:00,-2.59015,0
2019-04-16T07:05:00+00:00,-2.58393,0
2019-04-16T07:10:00+00:00,-2.47913,0
2019-04-16T07:15:00+00:00,-2.54083,0
2019-04-16T07:20:00+00:00,-2.34964,0
2019-04-16T07:25:00+00:00,-2.26333,0
2019-04-16T07:30:00+00:00,-2.57167,0
2019-04-16T07:35:00+00:00,-2.59013,0
2019-04-16T07:40:00+00:00,-2.22627,0
2019-04-16T07:45:00+00:00,-2.26326,0
2019-04-16T07:50:00+00:00,-2.22625,0
2019-04-16T07:55:00+00:00,-2.59008,0
2019-04-16T08:00:00+00:00,-2.5284,0
2019-04-16T08:05:00+00:00,-2.58391,0
2019-04-16T08:10:00+00:00,-2.59001,0
2019-04-16T08:15:00+00:00,-2.59006,0
2019-04-16T08:20:00+00:00,-2.23239,0
2019-04-16T08:25:00+00:00,-2.28786,0
2019-04-16T08:30:00+00:00,-2.33102,0
2019-04-16T08:35:00+00:00,-2.30017,0
2019-04-16T08:40:00+00:00,-2.33714,0
2019-04-16T08:45:00+00:00,-2.27547,0
2019-04-16T08:50:00+00:00,-2.23227,0
2019-04-16T08:55:00+00:00,-2.25694,0
2019-04-16T09:00:00+00:00,-2.36794,0
2019-04-16T09:05:00+00:00,-2.28163,0
2019-04-16T09:10:00+00:00,-2.5899400000000004,0
2019-04-16T09:15:00+00:00,-2.55295,0
2019-04-16T09:20:00+00:00,-2.58995,0
2019-04-16T09:25:00+00:00,-2.58996,0
2019-04-16T09:30:00+00:00,-2.58995,0
2019-04-16T09:35:00+00:00,-1.86847,0
2019-04-16T09:40:00+00:00,-1.8499700000000001,0
2019-04-16T09:45:00+00:00,-2.58997,0
2019-04-16T09:50:00+00:00,-2.58997,0
2019-04-16T09:55:00+00:00,-2.58995,0
2019-04-16T10:00:00+00:00,-2.58993,0
2019-04-16T10:05:00+00:00,-2.5899,0
2019-04-16T10:10:00+00:00,-2.5899,0
2019-04-16T10:15:00+00:00,-2.54675,0
2019-04-16T10:20:00+00:00,-2.2816099999999997,0
2019-04-16T10:25:00+00:00,-2.58993,0
2019-04-16T10:30:00+00:00,-2.58993,0
2019-04-16T10:35:00+00:00,-2.8551,0
2019-04-16T10:40:00+00:00,-2.58992,0
2019-04-16T10:45:00+00:00,-2.22611,0
2019-04-16T10:50:00+00:00,-2.37412,0
2019-04-16T10:55:00+00:00,-2.58995,0
2019-04-16T11:00:00+00:00,-3.08324,0
2019-04-16T11:05:00+00:00,-3.23739,0
2019-04-16T11:10:00+00:00,-3.08324,0
2019-04-16T11:15:00+00:00,-2.58989,0
2019-04-16T11:20:00+00:00,-2.50968,0
2019-04-16T11:25:00+00:00,-2.51578,0
2019-04-16T11:30:00+00:00,-2.41095,0
2019-04-16T11:35:00+00:00,-2.3185,0
2019-04-16T11:40:00+00:00,-2.1705200000000002,0
2019-04-16T11:45:00+00:00,-2.50964,0
2019-04-16T11:50:00+00:00,-2.5898,0
2019-04-16T11:55:00+00:00,-2.58977,0
2019-04-16T12:00:00+00:00,-2.58973,0
2019-04-16T12:05:00+00:00,-2.58356,0
2019-04-16T12:10:00+00:00,-2.18894,0
2019-04-16T12:15:00+00:00,-2.09028,0
2019-04-16T12:20:00+00:00,-1.84982,0
2019-04-16T12:25:00+00:00,-1.9115099999999998,0
2019-04-16T12:30:00+00:00,-2.07183,0
2019-04-16T12:35:00+00:00,-2.21366,0
2019-04-16T12:40:00+00:00,-1.8498299999999999,0
2019-04-16T12:45:00+00:00,-0.653601,0
2019-04-16T12:50:00+00:00,2.61437,0
2019-04-16T12:55:00+00:00,4.0572,0
2019-04-16T13:00:00+00:00,7.22663,0
2019-04-16T13:05:00+00:00,17.3453,0
2019-04-16T13:10:00+00:00,32.329,0
2019-04-16T13:15:00+00:00,49.1626,0
2019-04-16T13:20:00+00:00,64.7622,0
2019-04-16T13:25:00+00:00,83.6739,0
2019-04-16T13:30:00+00:00,92.5838,0
2019-04-16T13:35:00+00:00,90.1717,0
2019-04-16T13:40:00+00:00,112.14399999999999,0
2019-04-16T13:45:00+00:00,164.34799999999998,0
2019-04-16T13:50:00+00:00,234.75599999999997,0
2019-04-16T13:55:00+00:00,246.387,0
2019-04-16T14:00:00+00:00,272.242,0
2019-04-16T14:05:00+00:00,271.619,0
2019-04-16T14:10:00+00:00,286.362,0
2019-04-16T14:15:00+00:00,237.521,0
2019-04-16T14:20:00+00:00,200.06799999999998,0
2019-04-16T14:25:00+00:00,93.2373,0
2019-04-16T14:30:00+00:00,90.6478,0
2019-04-16T14:35:00+00:00,94.2381,0
2019-04-16T14:40:00+00:00,104.932,0
2019-04-16T14:45:00+00:00,120.898,0
2019-04-16T14:50:00+00:00,130.222,0
2019-04-16T14:55:00+00:00,141.884,0
2019-04-16T15:00:00+00:00,216.80200000000002,0
2019-04-16T15:05:00+00:00,288.144,0
2019-04-16T15:10:00+00:00,524.619,0
2019-04-16T15:15:00+00:00,547.526,0
2019-04-16T15:20:00+00:00,140.826,0
2019-04-16T15:25:00+00:00,354.62800000000004,0
2019-04-16T15:30:00+00:00,466.901,0
2019-04-16T15:35:00+00:00,594.6519999999999,0
2019-04-16T15:40:00+00:00,441.285,0
2019-04-16T15:45:00+00:00,539.64,0
2019-04-16T15:50:00+00:00,639.173,0
2019-04-16T15:55:00+00:00,642.967,0
2019-04-16T16:00:00+00:00,658.3919999999999,0
2019-04-16T16:05:00+00:00,679.262,0
2019-04-16T16:10:00+00:00,699.2760000000001,0
2019-04-16T16:15:00+00:00,726.8710000000001,0
2019-04-16T16:20:00+00:00,751.487,0
2019-04-16T16:25:00+00:00,771.829,0
2019-04-16T16:30:00+00:00,728.345,0
2019-04-16T16:35:00+00:00,750.4680000000001,0
2019-04-16T16:40:00+00:00,807.5239999999999,0
2019-04-16T16:45:00+00:00,706.362,0
2019-04-16T16:50:00+00:00,850.466,0
2019-04-16T16:55:00+00:00,940.403,0
2019-04-16T17:00:00+00:00,802.26,0
2019-04-16T17:05:00+00:00,891.529,0
2019-04-16T17:10:00+00:00,289.269,0
2019-04-16T17:15:00+00:00,798.425,0
2019-04-16T17:20:00+00:00,269.858,0
2019-04-16T17:25:00+00:00,329.37,0
2019-04-16T17:30:00+00:00,898.745,0
2019-04-16T17:35:00+00:00,766.643,0
2019-04-16T17:40:00+00:00,1005.39,0
2019-04-16T17:45:00+00:00,887.747,0
2019-04-16T17:50:00+00:00,733.038,0
2019-04-16T17:55:00+00:00,961.1410000000001,0
2019-04-16T18:00:00+00:00,985.0110000000001,0
2019-04-16T18:05:00+00:00,1027.04,0
2019-04-16T18:10:00+00:00,1020.3,0
2019-04-16T18:15:00+00:00,995.9,0
2019-04-16T18:20:00+00:00,1006.89,0
2019-04-16T18:25:00+00:00,1026.46,0
2019-04-16T18:30:00+00:00,1027.81,0
2019-04-16T18:35:00+00:00,1048.59,0
2019-04-16T18:40:00+00:00,1115.36,0
2019-04-16T18:45:00+00:00,1084.52,0
2019-04-16T18:50:00+00:00,1106.36,0
2019-04-16T18:55:00+00:00,1155.42,0
2019-04-16T19:00:00+00:00,794.558,0
2019-04-16T19:05:00+00:00,1197.29,0
2019-04-16T19:10:00+00:00,350.171,0
2019-04-16T19:15:00+00:00,580.599,0
2019-04-16T19:20:00+00:00,1228.67,0
2019-04-16T19:25:00+00:00,1266.43,0
2019-04-16T19:30:00+00:00,1223.9,0
2019-04-16T19:35:00+00:00,1106.54,0
2019-04-16T19:40:00+00:00,1122.57,0
2019-04-16T19:45:00+00:00,1152.58,0
2019-04-16T19:50:00+00:00,719.179,0
2019-04-16T19:55:00+00:00,447.056,0
2019-04-16T20:00:00+00:00,402.275,0
2019-04-16T20:05:00+00:00,595.24,0
2019-04-16T20:10:00+00:00,1173.89,0
2019-04-16T20:15:00+00:00,580.9540000000001,0
2019-04-16T20:20:00+00:00,375.871,0
2019-04-16T20:25:00+00:00,1242.88,0
2019-04-16T20:30:00+00:00,503.10900000000004,0
2019-04-16T20:35:00+00:00,1153.81,0
2019-04-16T20:40:00+00:00,1154.69,0
2019-04-16T20:45:00+00:00,489.097,0
2019-04-16T20:50:00+00:00,401.384,0
2019-04-16T20:55:00+00:00,545.076,0
2019-04-16T21:00:00+00:00,267.79,0
2019-04-16T21:05:00+00:00,196.813,0
2019-04-16T21:10:00+00:00,265.881,0
2019-04-16T21:15:00+00:00,231.655,0
2019-04-16T21:20:00+00:00,296.884,0
2019-04-16T21:25:00+00:00,308.718,0
2019-04-16T21:30:00+00:00,251.62900000000002,0
2019-04-16T21:35:00+00:00,254.757,0
2019-04-16T21:40:00+00:00,311.839,0
2019-04-16T21:45:00+00:00,304.407,0
2019-04-16T21:50:00+00:00,367.164,0
2019-04-16T21:55:00+00:00,311.964,0
2019-04-16T22:00:00+00:00,324.35400000000004,0
2019-04-16T22:05:00+00:00,387.246,0
2019-04-16T22:10:00+00:00,279.862,0
2019-04-16T22:15:00+00:00,164.74900000000002,0
2019-04-16T22:20:00+00:00,117.959,0
2019-04-16T22:25:00+00:00,139.999,0
2019-04-16T22:30:00+00:00,133.882,0
2019-04-16T22:35:00+00:00,98.0075,0
2019-04-16T22:40:00+00:00,82.1886,0
2019-04-16T22:45:00+00:00,93.1478,0
2019-04-16T22:50:00+00:00,102.325,0
2019-04-16T22:55:00+00:00,151.645,0
2019-04-16T23:00:00+00:00,137.683,0
2019-04-16T23:05:00+00:00,243.864,0
2019-04-16T23:10:00+00:00,208.706,0
2019-04-16T23:15:00+00:00,121.73,0
2019-04-16T23:20:00+00:00,107.057,0
2019-04-16T23:25:00+00:00,43.7813,0
2019-04-16T23:30:00+00:00,48.154,0
2019-04-16T23:35:00+00:00,63.4735,0
2019-04-16T23:40:00+00:00,54.4389,0
2019-04-16T23:45:00+00:00,69.6048,0
2019-04-16T23:50:00+00:00,68.4514,0
2019-04-16T23:55:00+00:00,65.0645,0
2019-04-17T00:00:00+00:00,71.5086,0
2019-04-17T00:05:00+00:00,139.45600000000002,0
2019-04-17T00:10:00+00:00,173.799,0
2019-04-17T00:15:00+00:00,170.15400000000002,0
2019-04-17T00:20:00+00:00,115.132,0
2019-04-17T00:25:00+00:00,75.0135,0
2019-04-17T00:30:00+00:00,63.6482,0
2019-04-17T00:35:00+00:00,95.7993,0
2019-04-17T00:40:00+00:00,43.5426,0
2019-04-17T00:45:00+00:00,37.5979,0
2019-04-17T00:50:00+00:00,71.2115,0
2019-04-17T00:55:00+00:00,53.3279,0
2019-04-17T01:00:00+00:00,32.997,0
2019-04-17T01:05:00+00:00,16.6621,0
2019-04-17T01:10:00+00:00,7.67766,0
2019-04-17T01:15:00+00:00,4.32305,0
2019-04-17T01:20:00+00:00,3.5645599999999997,0
2019-04-17T01:25:00+00:00,2.1276599999999997,0
2019-04-17T01:30:00+00:00,3.36729,0
2019-04-17T01:35:00+00:00,1.8378400000000001,0
2019-04-17T01:40:00+00:00,0.7339,0
2019-04-17T01:45:00+00:00,-0.104843,0
2019-04-17T01:50:00+00:00,-0.740074,0
2019-04-17T01:55:00+00:00,-1.4801600000000001,0
2019-04-17T02:00:00+00:00,-1.8501900000000002,0
2019-04-17T02:05:00+00:00,-1.65283,0
2019-04-17T02:10:00+00:00,-2.59021,0
2019-04-17T02:15:00+00:00,-2.96022,0
2019-04-17T02:20:00+00:00,-2.45443,0
2019-04-17T02:25:00+00:00,-1.85005,0
2019-04-17T02:30:00+00:00,-1.8500299999999998,0
2019-04-17T02:35:00+00:00,-1.85,0
2019-04-17T02:40:00+00:00,-2.20764,0
2019-04-17T02:45:00+00:00,-1.8499700000000001,0
2019-04-17T02:50:00+00:00,-1.6402900000000002,0
2019-04-17T02:55:00+00:00,-1.85612,0
2019-04-17T03:00:00+00:00,-1.8499700000000001,0
2019-04-17T03:05:00+00:00,-1.81913,0
2019-04-17T03:10:00+00:00,-1.67113,0
2019-04-17T03:15:00+00:00,-1.84995,0
2019-04-17T03:20:00+00:00,-1.8067900000000001,0
2019-04-17T03:25:00+00:00,-1.8499599999999998,0
2019-04-17T03:30:00+00:00,-1.9856200000000002,0
2019-04-17T03:35:00+00:00,-2.95995,0
2019-04-17T03:40:00+00:00,-2.34331,0
2019-04-17T03:45:00+00:00,-1.51698,0
2019-04-17T03:50:00+00:00,-2.33712,0
2019-04-17T03:55:00+00:00,-4.82225,0
2019-04-17T04:00:00+00:00,-3.9835,0
2019-04-17T04:05:00+00:00,-2.30004,0
2019-04-17T04:10:00+00:00,-2.2198,0
2019-04-17T04:15:00+00:00,-2.90423,0
2019-04-17T04:20:00+00:00,-3.32355,0
2019-04-17T04:25:00+00:00,-2.8610700000000002,0
2019-04-17T04:30:00+00:00,-2.5897900000000003,0
2019-04-17T04:35:00+00:00,-2.21365,0
2019-04-17T04:40:00+00:00,-2.78091,0
2019-04-17T04:45:00+00:00,-3.3296900000000003,0
2019-04-17T04:50:00+00:00,-3.5455,0
2019-04-17T04:55:00+00:00,-2.9597599999999997,0
2019-04-17T05:00:00+00:00,-3.3112,0
2019-04-17T05:05:00+00:00,-2.5898,0
2019-04-17T05:10:00+00:00,-2.58981,0
2019-04-17T05:15:00+00:00,-2.5897799999999997,0
2019-04-17T05:20:00+00:00,-2.5897799999999997,0
2019-04-17T05:25:00+00:00,-1.85601,0
2019-04-17T05:30:00+00:00,-1.8498299999999999,0
2019-04-17T05:35:00+00:00,-1.84981,0
2019-04-17T05:40:00+00:00,-1.84981,0
2019-04-17T05:45:00+00:00,-1.84978,0
2019-04-17T05:50:00+00:00,-1.84978,0
2019-04-17T05:55:00+00:00,-1.8497700000000001,0
2019-04-17T06:00:00+00:00,-1.76964,0
2019-04-17T06:05:00+00:00,-1.51683,0
2019-04-17T06:10:00+00:00,-1.15921,0
2019-04-17T06:15:00+00:00,-1.49834,0
2019-04-17T06:20:00+00:00,-1.51068,0
2019-04-17T06:25:00+00:00,-1.1961899999999999,0
2019-04-17T06:30:00+00:00,-1.84981,0
2019-04-17T06:35:00+00:00,-1.8498700000000001,0
2019-04-17T06:40:00+00:00,-2.18287,0
2019-04-17T06:45:00+00:00,-1.91156,0
2019-04-17T06:50:00+00:00,-1.84988,0
2019-04-17T06:55:00+00:00,-1.8498599999999998,0
"""
obs_df = pd.read_csv(StringIO(ghi_csv))
obs_df = obs_df.set_index('timestamp')
obs_df.index = pd.to_datetime(obs_df.index)
obs_df.index.name = 'timestamp'
return obs_df
@pytest.fixture
def new_role(api):
def fn(**kwargs):
role_json = ROLE.copy()
role_json.update(kwargs)
role = api.post('/roles/', BASE_URL, json=role_json)
role_id = role.data.decode('utf-8')
return role_id
return fn
@pytest.fixture
def new_observation(api):
def fn():
obs = api.post('/observations/', BASE_URL, json=VALID_OBS_JSON)
obs_id = obs.data.decode('utf-8')
return obs_id
return fn
@pytest.fixture
def new_forecast(api):
def fn():
fx = api.post('/forecasts/single/', BASE_URL,
json=VALID_FORECAST_JSON)
fx_id = fx.data.decode('utf-8')
return fx_id
return fn
@pytest.fixture
def new_perm(api):
def fn(**kwargs):
perm_json = PERMISSION.copy()
perm_json.update(kwargs)
perm = api.post('/permissions/', BASE_URL, json=perm_json)
perm_id = perm.data.decode('utf-8')
return perm_id
return fn
@pytest.fixture
def current_roles(api):
roles_req = api.get('/roles/', BASE_URL)
return [role['role_id'] for role in roles_req.json]
@pytest.fixture
def current_role(api, current_roles):
return current_roles[0]
@pytest.fixture
def remove_perms_from_current_role(api, current_role):
def fn(action, object_type):
perm_req = api.get('/permissions/', BASE_URL)
perms = perm_req.json
to_remove = [perm['permission_id'] for perm in perms
if perm['object_type'] == object_type
and perm['action'] == action]
for perm_id in to_remove:
api.delete(f'/roles/{current_role}/permissions/{perm_id}',
BASE_URL)
return fn
@pytest.fixture
def remove_all_perms(api, current_roles):
def fn(action, object_type):
perm_req = api.get('/permissions/', BASE_URL)
perms = perm_req.json
to_remove = [perm['permission_id'] for perm in perms
if perm['object_type'] == object_type
and perm['action'] == action]
for role_id in current_roles:
for perm_id in to_remove:
api.delete(f'/roles/{role_id}/permissions/{perm_id}',
BASE_URL)
return fn
@pytest.fixture
def random_post_payload():
def fn(npts, mimetype, include_flags=True):
idx = pd.date_range(
start=pd.to_datetime('2017-01-01T00:00Z'),
periods=npts,
freq='1min'
)
data = {'value': np.random.uniform(0, 999.9, size=idx.size)}
if include_flags:
data['quality_flags'] = 0
df = pd.DataFrame(data=data, index=idx)
if mimetype == 'application/json':
value_string = json.dumps({
'values': df.to_dict(orient='records')
})
else:
value_string = df.to_csv(index=False)
return value_string
return fn
@pytest.fixture()
def startend():
return '?start=20190101T0000Z&end=20200101T0000Z'
@pytest.fixture()
def root_cursor(mocker):
app = create_app('TestingConfig')
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = '<PASSWORD>'
with app.app_context():
try:
storage_interface.mysql_connection()
except pymysql.err.OperationalError:
pytest.skip('No connection to test database')
else:
with _make_nocommit_cursor(mocker) as cursor:
yield cursor
@pytest.fixture()
def addmayvalues(root_cursor):
root_cursor.execute(
"INSERT INTO forecasts_values (id, timestamp, value) "
"SELECT id, '2019-05-01 00:00', 1.0 FROM forecasts"
)
root_cursor.execute(
"INSERT INTO observations_values (id, timestamp, value, quality_flag)"
" SELECT id, '2019-05-01 00:00', 1.0, 1 FROM observations"
)
root_cursor.execute(
"INSERT INTO observations_values (id, timestamp, value, quality_flag)"
" SELECT id, '2019-05-02 00:00', 1.0, 5 FROM observations"
)
root_cursor.execute(
"INSERT INTO cdf_forecasts_values (id, timestamp, value) "
"SELECT | |
""" Functions to create S104 files and populate with data from other sources
"""
import logging
import sys
import datetime
import numpy
from ..s1xx import s1xx_sequence
from .api import S104File, FILLVALUE_HEIGHT, FILLVALUE_TREND, S104Exception
def _get_S104File(output_file):
""" Small helper function to convert the output_file parameter into a S104File"""
if isinstance(output_file, S104File):
data_file = output_file
else:
try:
data_file = S104File(output_file, "w")
except TypeError as typeerr:
msg = "Failed to create S104File using {}".format(str(output_file))
logging.error(msg)
raise type(typeerr)(msg).with_traceback(sys.exc_info()[2])
return data_file
def create_s104(output_file) -> S104File:
""" Creates or updates an S104File object.
Default values are set for any data that doesn't have options or are mandatory to be filled in the S104 spec.
Parameters
----------
output_file
S104File object
Returns
-------
data_file
The S104File object created or updated by this function.
"""
data_file = _get_S104File(output_file)
root = data_file.root
root.water_level_create()
root.feature_information_create()
group_f = root.feature_information
group_f.feature_code_create()
group_f.water_level_feature_dataset_create()
water_level_feature_dataset = root.feature_information.water_level_feature_dataset
water_level_height_info = water_level_feature_dataset.append_new_item()
water_level_height_info.code = "waterLevelHeight"
water_level_height_info.name = "Water level height"
water_level_height_info.unit_of_measure = "meters"
water_level_height_info.datatype = "H5T_FLOAT"
water_level_height_info.fill_value = FILLVALUE_HEIGHT
water_level_height_info.lower = "-99.99"
water_level_height_info.upper = "99.99"
water_level_height_info.closure = "closedInterval"
water_level_trend_info = water_level_feature_dataset.append_new_item()
water_level_trend_info.code = "waterLevelTrend"
water_level_trend_info.name = "Water level trend"
water_level_trend_info.unit_of_measure = ""
water_level_trend_info.datatype = "H5T_ENUM"
water_level_trend_info.fill_value = FILLVALUE_TREND
water_level_trend_info.lower = "1"
water_level_trend_info.upper = "3"
water_level_trend_info.closure = "closedInterval"
water_level_time_info = water_level_feature_dataset.append_new_item()
water_level_time_info.code = "waterLevelTime"
water_level_time_info.name = "Water level time"
water_level_time_info.unit_of_measure = "DateTime"
water_level_time_info.datatype = "H5T_C_S1"
water_level_time_info.fill_value = ""
water_level_time_info.lower = "19000101T000000Z"
water_level_time_info.upper = "21500101T000000Z"
water_level_time_info.closure = "closedInterval"
utc_now = datetime.datetime.utcnow()
root.issue_date = utc_now.strftime('%Y%m%d')
root.issue_time = utc_now.strftime('%H%M%SZ')
return data_file
def add_metadata(metadata: dict, data_file) -> S104File:
""" Updates an S104File object based on input metadata.
Parameters
----------
data_file
S104File object
metadata
a dictionary of metadata describing the data passed in,
metadata should have the following key/value pairs:
- "productSpecification": The product specification used to create
this dataset.
- "horizontalCRS": Horizontal Datum EPSG code.
- "metadata": File name for the associated discovery metadata (xml)
- "geographicIdentifier": Location of the data, ex: "Tampa Bay".
An empty string ("") is the default.
- "waterLevelHeightUncertainty": In (meters) arises from the
hydrodynamic model, and the spatial interpolation method.
The default, denoting a missing value, is -1.0.
- "verticalUncertainty": Accuracy of vertical datum
The default, denoting a missing value, is -1.0.
- "horizontalPositionUncertainty": Accuracy of geolocation
techniques, model grid accuracy. The default, denoting a missing
value, is -1.0.
- "timeUncertainty": Sensor accuracy, data time tagging accuracy
The default, denoting a missing value, is -1.0.
- "waterLevelTrendThreshold": Critical value used to determine
steady water level trend. Units are meters/hour (m/hr).
- "verticalCS": Vertical datum EPSG Code.
- "verticalCoordinateBase":
- 'Sea surface': 1
- 'Vertical datum': 2
- 'Sea Bottom': 3
- "verticalDatumReference": For verticalCoordinateBase(2) only
- 'S-100 vertical datum': 1
- 'EPSG': 2
- "verticalDatum":
- 'meanLowWaterSprings': 1
- 'meanLowerLowWaterSprings': 2
- 'meanSeaLevel': 3
- 'lowestLowWater': 4
- 'meanLowWater': 5
- 'lowestLowWaterSprings': 6
- 'approximateMeanLowWaterSprings': 7
- 'indianSpringLowWater': 8
- 'lowWaterSprings': 9
- 'approximateLowestAstronomicalTide': 10
- 'nearlyLowestLowWater': 11
- 'meanLowerLowWater': 12
- 'lowWater': 13
- 'approximateMeanLowWater': 14
- 'approximateMeanLowerLowWater': 15
- 'meanHighWater': 16
- 'meanHighWaterSprings': 17
- 'highWater': 18
- 'approximateMeanSeaLevel': 19
- 'highWaterSprings': 20
- 'meanHigherHighWater': 21
- 'equinoctialSpringLowWater': 22
- 'lowestAstronomicalTide': 23
- 'localDatum': 24
- 'internationalGreatLakesDatum1985': 25
- 'meanWaterLevel': 26
- 'lowerLowWaterLargeTide': 27
- 'higherHighWaterLargeTide': 28
- 'nearlyHighestHighWater': 29
- 'highestAstronomicalTide': 30
- "verticalDatumReference":
- 'S-100 Vertical datum': 1
- 'EPSG code': 2
- "commonPointRule":
- 'average': 1
- 'low': 2
- 'high': 3
- 'all': 4
- "interpolationType": Interpolation method recommended for
evaluation of the S100_GridCoverage.
- 'nearestneighbor': 1
- 'linear': 2
- 'quadratic': 3
- 'cubic': 4
- 'bilinear': 5
- 'biquadratic': 6
- 'bicubic': 7
- 'lostarea': 8
- 'barycentric': 9
- 'discrete': 10
- "typeOfWaterLevelData:
- Observation: 1
- Astronomical prediction: 2
- Analysis or hybrid method: 3
- Hydrodynamic model hindcast: 4
- Hydrodynamic model forecast: 5
- Observed minus predicted: 6
- Observed minus analysis: 7
- Observed minus hindcast: 8
- Observed minus forecast: 9
- Forecast minus predicted: 10
- "methodWaterLevelProduct": Brief description of tide gauge type,
forecast method or model, etc.
- "datetimeOfFirstRecord": Valid time of earliest value,
'YYYYMMDDTHHMMSSZ'
Returns
-------
data_file
An S104File object updated by this function.
"""
root = data_file.root
water_level_feature = root.water_level
water_level_feature.water_level_create()
water_level_feature_instance_01 = water_level_feature.water_level.append_new_item()
water_level_feature_instance_01.water_level_group_create()
water_level_feature_instance_01.uncertainty_dataset_create()
water_level_height_uncertainty = water_level_feature_instance_01.uncertainty_dataset.append_new_item()
water_level_height_uncertainty.name = "waterLevelHeight"
water_level_height_uncertainty.value = metadata["waterLevelHeightUncertainty"]
water_level_feature.min_dataset_height = 0
water_level_feature.max_dataset_height = 0
water_level_feature_instance_01.time_record_interval = 0
root.product_specification = S104File.PRODUCT_SPECIFICATION
root.metadata = metadata["metadata"]
root.horizontal_crs = metadata["horizontalCRS"]
root.geographic_identifier = metadata["geographicIdentifier"]
root.water_level_trend_threshold = metadata["waterLevelTrendThreshold"]
root.vertical_coordinate_system = metadata["verticalCS"]
root.vertical_coordinate_base = metadata["verticalCoordinateBase"]
root.vertical_datum_reference = metadata["verticalDatumReference"]
root.vertical_datum_epsg = metadata["verticalDatum"]
water_level_feature.common_point_rule = metadata["commonPointRule"]
water_level_feature.interpolation_type = metadata["interpolationType"]
water_level_feature.time_uncertainty = metadata["timeUncertainty"]
water_level_feature.vertical_uncertainty = metadata["verticalUncertainty"]
water_level_feature.horizontal_position_uncertainty = metadata["horizontalPositionUncertainty"]
water_level_feature.method_water_level_product = metadata["methodWaterLevelProduct"]
water_level_feature_instance_01.type_of_water_level_data = metadata["typeOfWaterLevelData"]
water_level_feature_instance_01.date_time_of_first_record = metadata["datetimeOfFirstRecord"]
return data_file
def add_data_from_arrays(height: s1xx_sequence, trend, data_file, grid_properties: dict, datetime_value, data_coding_format) -> S104File:
""" Updates an S104File object based on numpy array/h5py datasets.
Calls :any:`create_s104` then fills in the HDF5 datasets with the
supplied water level height and trend numpy.arrays.
Raises an S104Exception if the shapes of the water level height and
trend (if not None) grids are not equal.
Parameters
----------
height
1d or 2d array containing water level heights
trend
1d or 2d array containing water level trends
data_file
S104File object
datetime_value
datetime object
grid_properties
a dictionary of metadata describing the grids passed in,
metadata can have the following key/value pairs:
- "maxx": West bound longitude
- "minx": East bound longitude
- "miny": South bound latitude
- "maxy": North bound latitude
- "cellsize_x": Only for DCF2, grid spacing longitude
- "cellsize_y": Only for DCF2, grid spacing latitude
- "nx": Only for DCF2, number of points longitudinal
- "ny": Only for DCF2, number of points latitudinal
- "latitude": Only for DCF3, latitude of nodes
- "longitude": Only for DCF3, longitudes of nodes
- "nodes": Only for DCF3, number of nodes
data_coding_format
- 'Time Series at fixed stations': 1
- 'Regularly-Gridded arrays': 2
- 'Ungeorectified Grid': 3
- 'TIN': 7
- 'Time Series at fixed stations (stationwise)': 8
Returns
-------
data_file
An S104File object updated by this function.
"""
root = data_file.root
water_level_feature = root.water_level
water_level_feature_instance_01 = root.water_level.water_level[0]
if data_coding_format == 2:
water_level_feature.data_coding_format = data_coding_format
water_level_feature_instance_01.start_sequence = "0,0"
water_level_feature.sequencing_rule_scan_direction = "longitude, latitude"
water_level_feature.sequencing_rule_type = 1
water_level_feature_instance_01.grid_origin_longitude = grid_properties['maxx']
water_level_feature_instance_01.grid_origin_latitude = grid_properties['miny']
water_level_feature_instance_01.grid_spacing_longitudinal = grid_properties['cellsize_x']
water_level_feature_instance_01.grid_spacing_latitudinal = grid_properties['cellsize_y']
water_level_feature_instance_01.num_points_latitudinal = grid_properties['ny']
water_level_feature_instance_01.num_points_longitudinal = grid_properties['nx']
elif data_coding_format == 3:
water_level_feature.data_coding_format = data_coding_format
water_level_feature_instance_01.number_of_nodes = grid_properties['nodes']
water_level_feature_instance_01.positioning_group_create()
positioning = water_level_feature_instance_01.positioning_group
positioning.geometry_values_create()
geometry_values = positioning.geometry_values
geometry_values.longitude = grid_properties['longitude']
geometry_values.latitude = grid_properties['latitude']
water_level_feature_instance_01.east_bound_longitude = grid_properties['minx']
water_level_feature_instance_01.west_bound_longitude = grid_properties['maxx']
water_level_feature_instance_01.south_bound_latitude = grid_properties['miny']
water_level_feature_instance_01.north_bound_latitude = grid_properties['maxy']
root.water_level.dimension = height.ndim
water_level_feature.axis_names = numpy.array(["longitude", "latitude"])
min_height = numpy.round(numpy.nanmin(height), decimals=2)
max_height = numpy.round(numpy.nanmax(height), decimals=2)
if min_height < water_level_feature.min_dataset_height:
water_level_feature.min_dataset_height = min_height
if max_height > water_level_feature.max_dataset_height:
water_level_feature.max_dataset_height = max_height
if numpy.ma.is_masked(height):
height = height.filled(FILLVALUE_HEIGHT)
height = numpy.round(height, decimals=2)
trend.astype(int)
if height.shape != trend.shape:
raise S104Exception("Water level height & trend grids have different shapes")
water_level_group_object = water_level_feature_instance_01.water_level_group.append_new_item()
water_level_group_object.time_point = datetime_value
water_level_group_object.values_create()
grid = water_level_group_object.values
grid.water_level_height = height
grid.water_level_trend = trend
return data_file
def update_metadata(data_file, grid_properties: dict, update_meta: dict) -> S104File:
""" Updates an S104File object based on dynamic metadata.
Parameters
----------
data_file
S104File object
grid_properties
a dictionary of metadata describing the dynamic data passed in,
metadata can have the following key/value pairs:
- "maxx": West bound longitude
- "minx": East bound longitude
- "miny": South bound latitude
- "maxy": North bound latitude
- "cellsize_x": Only for DCF2, grid spacing longitude
- "cellsize_y": Only for DCF2, grid spacing latitude
- "nx": Only for DCF2, number of points longitudinal
- "ny": Only for DCF2, number of points latitudinal
- "latitude": Only for DCF3, latitude of nodes
- "longitude": Only for DCF3, longitudes of nodes
- "nodes": Only for DCF3, number of nodes
update_meta
a dictionary of dynamic metadata, metadata can have the following
key/value pairs:
- "dateTimeOfLastRecord": Valid ISO 8601 time of latest value
- "numberOfGroups": Number of forecasts
- "numberOfTimes": Number of valid times
- "timeRecordInterval": Time between forecasts in seconds
- "num_instances": Number of water level feature instances
| |
npisfinite(mags) & npisfinite(errs)
ftimes, fmags, ferrs = times[find], mags[find], errs[find]
# get the center value and stdev
if meanormedian == 'median': # stddev = 1.483 x MAD
center_mag = npmedian(fmags)
stddev_mag = (npmedian(npabs(fmags - center_mag))) * 1.483
elif meanormedian == 'mean':
center_mag = npmean(fmags)
stddev_mag = npstddev(fmags)
else:
LOGWARNING("unrecognized meanormedian value given to "
"sigclip_magseries: %s, defaulting to 'median'" %
meanormedian)
meanormedian = 'median'
center_mag = npmedian(fmags)
stddev_mag = (npmedian(npabs(fmags - center_mag))) * 1.483
# sigclip next for a single sigclip value
if sigclip and isinstance(sigclip, (float, int)):
if not iterative and niterations is None:
sigind = (npabs(fmags - center_mag)) < (sigclip * stddev_mag)
stimes = ftimes[sigind]
smags = fmags[sigind]
serrs = ferrs[sigind]
else:
#
# iterative version adapted from scipy.stats.sigmaclip
#
# First, if niterations is not set, iterate until covergence
if niterations is None:
delta = 1
this_times = ftimes
this_mags = fmags
this_errs = ferrs
while delta:
if meanormedian == 'mean':
this_center = npmean(this_mags)
this_stdev = npstddev(this_mags)
elif meanormedian == 'median':
this_center = npmedian(this_mags)
this_stdev = (
npmedian(npabs(this_mags - this_center))
) * 1.483
this_size = this_mags.size
# apply the sigclip
tsi = (
(npabs(this_mags - this_center)) <
(sigclip * this_stdev)
)
# update the arrays
this_times = this_times[tsi]
this_mags = this_mags[tsi]
this_errs = this_errs[tsi]
# update delta and go to the top of the loop
delta = this_size - this_mags.size
else: # If iterating only a certain number of times
this_times = ftimes
this_mags = fmags
this_errs = ferrs
iter_num = 0
delta = 1
while iter_num < niterations and delta:
if meanormedian == 'mean':
this_center = npmean(this_mags)
this_stdev = npstddev(this_mags)
elif meanormedian == 'median':
this_center = npmedian(this_mags)
this_stdev = (npmedian(npabs(this_mags -
this_center))) * 1.483
this_size = this_mags.size
# apply the sigclip
tsi = (
(npabs(this_mags - this_center)) <
(sigclip * this_stdev)
)
# update the arrays
this_times = this_times[tsi]
this_mags = this_mags[tsi]
this_errs = this_errs[tsi]
# update the number of iterations and delta and
# go to the top of the loop
delta = this_size - this_mags.size
iter_num += 1
# final sigclipped versions
stimes, smags, serrs = this_times, this_mags, this_errs
# this handles sigclipping for asymmetric +ve and -ve clip values
elif sigclip and isinstance(sigclip, (list,tuple)) and len(sigclip) == 2:
# sigclip is passed as [dimmingclip, brighteningclip]
dimmingclip = sigclip[0]
brighteningclip = sigclip[1]
if not iterative and niterations is None:
if magsarefluxes:
nottoodimind = (
(fmags - center_mag) > (-dimmingclip*stddev_mag)
)
nottoobrightind = (
(fmags - center_mag) < (brighteningclip*stddev_mag)
)
else:
nottoodimind = (
(fmags - center_mag) < (dimmingclip*stddev_mag)
)
nottoobrightind = (
(fmags - center_mag) > (-brighteningclip*stddev_mag)
)
sigind = nottoodimind & nottoobrightind
stimes = ftimes[sigind]
smags = fmags[sigind]
serrs = ferrs[sigind]
else:
#
# iterative version adapted from scipy.stats.sigmaclip
#
if niterations is None:
delta = 1
this_times = ftimes
this_mags = fmags
this_errs = ferrs
while delta:
if meanormedian == 'mean':
this_center = npmean(this_mags)
this_stdev = npstddev(this_mags)
elif meanormedian == 'median':
this_center = npmedian(this_mags)
this_stdev = (npmedian(npabs(this_mags -
this_center))) * 1.483
this_size = this_mags.size
if magsarefluxes:
nottoodimind = (
(this_mags - this_center) >
(-dimmingclip*this_stdev)
)
nottoobrightind = (
(this_mags - this_center) <
(brighteningclip*this_stdev)
)
else:
nottoodimind = (
(this_mags - this_center) <
(dimmingclip*this_stdev)
)
nottoobrightind = (
(this_mags - this_center) >
(-brighteningclip*this_stdev)
)
# apply the sigclip
tsi = nottoodimind & nottoobrightind
# update the arrays
this_times = this_times[tsi]
this_mags = this_mags[tsi]
this_errs = this_errs[tsi]
# update delta and go to top of the loop
delta = this_size - this_mags.size
else: # If iterating only a certain number of times
this_times = ftimes
this_mags = fmags
this_errs = ferrs
iter_num = 0
delta = 1
while iter_num < niterations and delta:
if meanormedian == 'mean':
this_center = npmean(this_mags)
this_stdev = npstddev(this_mags)
elif meanormedian == 'median':
this_center = npmedian(this_mags)
this_stdev = (npmedian(npabs(this_mags -
this_center))) * 1.483
this_size = this_mags.size
if magsarefluxes:
nottoodimind = (
(this_mags - this_center) >
(-dimmingclip*this_stdev)
)
nottoobrightind = (
(this_mags - this_center) <
(brighteningclip*this_stdev)
)
else:
nottoodimind = (
(this_mags - this_center) < (dimmingclip*this_stdev)
)
nottoobrightind = (
(this_mags - this_center) >
(-brighteningclip*this_stdev)
)
# apply the sigclip
tsi = nottoodimind & nottoobrightind
# update the arrays
this_times = this_times[tsi]
this_mags = this_mags[tsi]
this_errs = this_errs[tsi]
# update the number of iterations and delta
# and go to top of the loop
delta = this_size - this_mags.size
iter_num += 1
# final sigclipped versions
stimes, smags, serrs = this_times, this_mags, this_errs
else:
stimes = ftimes
smags = fmags
serrs = ferrs
if returnerrs:
return stimes, smags, serrs
else:
return stimes, smags, None
def sigclip_magseries_with_extparams(times, mags, errs, extparams,
sigclip=None,
iterative=False,
magsarefluxes=False):
'''Sigma-clips a magnitude or flux time-series and associated measurement
arrays.
Selects the finite times, magnitudes (or fluxes), and errors from the passed
values, and apply symmetric or asymmetric sigma clipping to them. Uses the
same array indices as these values to filter out the values of all arrays in
the `extparams` list. This can be useful for simultaneously sigma-clipping a
magnitude/flux time-series along with their associated values of external
parameters, such as telescope hour angle, zenith distance, temperature, moon
phase, etc.
Parameters
----------
times,mags,errs : np.array
The magnitude or flux time-series arrays to sigma-clip. This doesn't
assume all values are finite or if they're positive/negative. All of
these arrays will have their non-finite elements removed, and then will
be sigma-clipped based on the arguments to this function.
`errs` is optional. Set it to None if you don't have values for these. A
'faked' `errs` array will be generated if necessary, which can be
ignored in the output as well.
extparams : list of np.array
This is a list of all external parameter arrays to simultaneously filter
along with the magnitude/flux time-series. All of these arrays should
have the same length as the `times`, `mags`, and `errs` arrays.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
iterative : bool
If this is set to True, will perform iterative sigma-clipping. If
`niterations` is not set and this is True, sigma-clipping is iterated
until no more points are removed.
magsareflux : bool
True if your "mags" are in fact fluxes, i.e. if "fainter" corresponds to
`mags` getting smaller.
Returns
-------
(stimes, smags, serrs) : tuple
The sigma-clipped and nan-stripped time-series in `stimes`, `smags`,
`serrs` and the associated values of the `extparams` in `sextparams`.
'''
returnerrs = True
# fake the errors if they don't exist
# this is inconsequential to sigma-clipping
# we don't return these dummy values if the input errs are None
if errs is None:
# assume 0.1% errors if not given
# this should work for mags and fluxes
errs = 0.001*mags
returnerrs = False
# filter the input times, mags, errs; do sigclipping and normalization
find = npisfinite(times) & npisfinite(mags) & npisfinite(errs)
ftimes, fmags, ferrs = times[find], mags[find], errs[find]
# apply the same indices to the external parameters
for epi, eparr in enumerate(extparams):
extparams[epi] = eparr[find]
# get the median and stdev = 1.483 x MAD
median_mag | |
""" Consolidating concepts by implementing heap creation
Originally by: Wyzzard123
Feel free to make changes."""
# Heap Array Representation Rules:
# for i starting at 1:
# for a node at i, parent node is at = i // 2
# for a node at i, left child node is at i * 2
# for a node at i, right child node is at (i * 2) + 1
def heap_insert(heap, value, type="max"):
"""Insert value at the top of a max or min heap"""
heap.append(value)
n = len(heap)
current_index = n
current_i = n - 1
parent_index = current_index // 2
# print("not none")
# print(f"parent_index - 1 is {parent_index-1}")
# print(f"current index is {current_i}")
# print(f"heap[parent_index -1] = {heap[parent_index - 1]}; heap[current_i] = {heap[current_i]}")
if type == "max":
while heap[parent_index - 1] < heap[current_i]:
print(f"current_index is {current_index}")
print(f"current i is {current_i}")
temp_value = heap[parent_index - 1]
heap[parent_index - 1] = heap[current_i]
heap[current_i] = temp_value
current_index //= 2
parent_index = current_index // 2
current_i = current_index - 1
left_child = current_index * 2
right_child = (current_index * 2 + 1)
if current_i == 0:
break
else:
pass
# print("none")
elif type == "min":
while heap[parent_index - 1] > heap[current_i]:
print(f"current_index is {current_index}")
print(f"current i is {current_i}")
temp_value = heap[parent_index - 1]
heap[parent_index - 1] = heap[current_i]
heap[current_i] = temp_value
current_index //= 2
parent_index = current_index // 2
current_i = current_index - 1
left_child = current_index * 2
right_child = (current_index * 2 + 1)
if current_i == 0:
break
else:
pass
# print("none")
return heap
def heap_create(array, type="max"):
"""Create max or min heap from a given array in nlogn time"""
# Start from the left and add
n = len(array)
heap = [None] * n
# print(heap)
height = 0
# Max Heap
if type == "max":
# starting from 1 to handle this mathematically
for index in range (1, n + 1):
parent_index = index // 2
left_child = index * 2
right_child = (index * 2) + 1
# insert index into heap
heap[index - 1] = array[index - 1]
# -1 to compensate for 0-counting
if heap[parent_index - 1] is not None:
current_index = index
i = index - 1
current_i = i
# print("not none")
# print(f"parent_index - 1 is {parent_index-1}")
# print(f"current index is {current_i}")
# print(f"heap[parent_index -1] = {heap[parent_index - 1]}; heap[current_i] = {heap[current_i]}")
while heap[parent_index - 1] < heap[current_i]:
# print(f"current_index is {current_index}")
# print(f"current i is {current_i}")
temp_value = heap[parent_index - 1]
heap[parent_index - 1] = heap[current_i]
heap[current_i] = temp_value
current_index //= 2
parent_index = current_index // 2
current_i = current_index - 1
left_child = current_index * 2
right_child = (current_index * 2 + 1)
if current_i == 0:
break
else:
# print("none")
pass
# Min Heap
if type == "min":
# starting from 1 to handle this mathematically
for index in range (1, n + 1):
parent_index = index // 2
left_child = index * 2
right_child = (index * 2) + 1
# insert index into heap
heap[index - 1] = array[index - 1]
# -1 to compensate for 0-counting
if heap[parent_index - 1] is not None:
current_index = index
i = index - 1
current_i = i
# print("not none")
# print(f"parent_index - 1 is {parent_index-1}")
# print(f"current index is {current_i}")
# print(f"heap[parent_index -1] = {heap[parent_index - 1]}; heap[current_i] = {heap[current_i]}")
while heap[parent_index - 1] > heap[current_i]:
# print(f"current_index is {current_index}")
# print(f"current i is {current_i}")
temp_value = heap[parent_index - 1]
heap[parent_index - 1] = heap[current_i]
heap[current_i] = temp_value
current_index //= 2
parent_index = current_index // 2
current_i = current_index - 1
left_child = current_index * 2
right_child = (current_index * 2 + 1)
if current_i == 0:
break
else:
pass
# print("none")
return heap
# array = [30, 123, 12, 321, 10, 442, 13, 320, 111, 310, 11, 1]
# # array = [1, 2, 3, 4, 5]
# heap = heap_create(array, "max")
# print("array is", array)
# print(heap)
# heap_insert(heap,554)
# print(heap)
def heapify(array, type=max):
"""Create a max or min heap from a given array in logn time."""
#TODO
pass
def heap_delete(heap, type="max"):
"""Delete from top of a max or min heap in logn time. Returns the deleted value (which can be used in heap sort)."""
deleted_element = heap[0] # Stores deleted element to be returned
n = len(heap)
current_index = 1
current_i = current_index - 1
left_child = current_index * 2
right_child = (current_index * 2) + 1
left_child_i = left_child - 1
right_child_i = right_child - 1
has_left_child = True
has_right_child = True
last_element_index = n - 1
if right_child_i > last_element_index:
has_right_child = False
if left_child_i > last_element_index:
has_left_child = False
# Replace deleted element with last element
heap[current_i] = heap[last_element_index]
# Remove last element of heap.
heap.pop()
n = len(heap)
last_element_index = n - 1
if right_child_i > last_element_index:
has_right_child = False
if left_child_i > last_element_index:
has_left_child = False
if type == "max":
# Check and swap elements below
# Condition is if it has a left child because this is a complete binary tree
while has_left_child is True:
if has_right_child is True:
# Check which child node is greater
if heap[right_child_i] > heap[left_child_i]:
# Check if greater child node is greater than current node
if heap[right_child_i] > heap[current_i]:
temp_value = heap[right_child_i]
heap[right_child_i] = heap[current_i]
heap[current_i] = temp_value
# + 1 because the current_index is now the right node
current_index = (current_index * 2) + 1
current_i = current_index - 1
left_child = current_index * 2
left_child_i = left_child - 1
right_child = (current_index * 2) + 1
right_child_i = right_child - 1
# Break if the sorting is done
else:
break
# This currently will also swap the left child by default if the two child nodes are equal
elif heap[left_child_i] >= heap[right_child_i]:
if heap[left_child_i] > heap[current_i]:
temp_value = heap[left_child_i]
heap[left_child_i] = heap[current_i]
heap[current_i] = temp_value
current_index *= 2
current_i = current_index - 1
left_child = current_index * 2
left_child_i = left_child - 1
right_child = (current_index * 2 + 1)
right_child_i = right_child - 1
# Break if the sorting is done
else:
break
# In this condition, has_left_child is true, but has_right_child is false
else:
if heap[left_child_i] > heap[current_i]:
temp_value = heap[left_child_i]
heap[left_child_i] = heap[current_i]
heap[current_i] = temp_value
current_index *= 2
current_i = current_index - 1
left_child = current_index * 2
left_child_i = left_child - 1
right_child = (current_index * 2 + 1)
right_child_i = right_child - 1
# Reset has_right_child and has_left_child for loop
if right_child > last_element_index:
has_right_child = False
if left_child > last_element_index:
has_left_child = False
break
elif type == "min":
# Check and swap elements below
# Condition is if it has a left child because this is a complete binary tree
while has_left_child is True:
if has_right_child is True:
# Check which child node is smaller
if heap[right_child_i] < heap[left_child_i]:
# Check if smaller child node is greater than current node
if heap[right_child_i] < heap[current_i]:
temp_value = heap[right_child_i]
heap[right_child_i] = heap[current_i]
heap[current_i] = temp_value
# + 1 because the current_index is now the right node
current_index = (current_index * 2) + 1
current_i = current_index - 1
left_child = current_index * 2
left_child_i = left_child - 1
right_child = (current_index * 2) + 1
right_child_i = right_child - 1
# Break if the sorting is done
else:
break
# This currently will also swap the left child by default if the two child nodes are equal
elif heap[left_child_i] <= heap[right_child_i]:
if heap[left_child_i] < heap[current_i]:
temp_value = heap[left_child_i]
heap[left_child_i] = heap[current_i]
heap[current_i] = temp_value
current_index *= 2
current_i = current_index - 1
left_child = current_index * 2
left_child_i = left_child - 1
right_child = (current_index * 2 + 1)
right_child_i | |
#!usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: <NAME>
# @Date: 2018-07-14 14:31:36
# @Last modified by: <NAME>
# @Last Modified time: 2018-07-14 19:03:32
from __future__ import print_function, division, absolute_import
import sys
import os
import argparse
import glob
import shutil
import time
import re
import importlib.util
def _remove_link(link):
''' Remove a symlink if it exists
Parameters:
link (str):
The symlink filepath
'''
if os.path.islink(link):
os.remove(link)
def make_symlink(src, link):
'''create a symlink
Parameters:
src (str):
The fullpath source of the symlink
link (str):
The symlink file path
'''
_remove_link(link)
os.symlink(src, link)
def create_index_table(environ, envdir):
''' create an html table
Parameters:
environ (dict):
A tree environment dictionary
envdir (str):
The filepath for the env directory
Returns:
An html table definition string
'''
table_header = """<table id="list" cellpadding="0.1em" cellspacing="0">
<colgroup><col width="55%"/><col width="20%"/><col width="25%"/></colgroup>
<thead>
<tr><th><a href="?C=N&O=A">File Name</a> <a href="?C=N&O=D"> ↓ </a></th><th><a href="?C=S&O=A">File Size</a> <a href="?C=S&O=D"> ↓ </a></th><th><a href="?C=M&O=A">Date</a> <a href="?C=M&O=D"> ↓ </a></th></tr>
</thead><tbody>
<tr><td><a href="../">Parent directory/</a></td><td>-</td><td>-</td></tr>"""
table_footer = """</tbody></table>"""
# create table
table = table_header
# loop over the environment
for section, values in environ.items():
if section == 'default':
continue
for tree_name, tree_path in values.items():
skipmsg = 'Skipping {0} for {1}'.format(tree_name, section)
if '_ROOT' in tree_name:
continue
# create the src and target links
src = tree_path
link = os.path.join(envdir, tree_name.upper())
# get the local time of the symlink
try:
stattime = time.strftime('%d-%b-%Y %H:%M', time.localtime(os.stat(src).st_mtime))
except OSError:
print("{0} does not appear to exist, skipping...".format(src))
_remove_link(link)
continue
# skip the sas_base_dir
if section == 'general' and 'SAS_BASE_DIR' in tree_name:
print(skipmsg)
continue
# only create symlinks
if section == 'general' and tree_name in ['CAS_LOAD', 'STAGING_DATA']:
# only create links here if the target exist
if os.path.exists(src):
make_symlink(src, link)
else:
print(skipmsg)
else:
print('Processing {0} for {1}'.format(tree_name, section))
make_symlink(src, link)
# create the table entry
if os.path.exists(link):
table += ' <tr><td><a href="{0}/">{0}/</a></td><td>-</td><td>{1}</td></tr>\n'.format(tree_name.upper(), stattime)
table += table_footer
return table
def create_index_page(environ, defaults, envdir):
''' create the env index html page
Builds the index.html page containing a table of symlinks
to datamodel directories
Parameters:
environ (dict):
A tree environment dictionary
defaults (dict):
The defaults dictionary from environ['default']
envdir (str):
The filepath for the env directory
Returns:
A string defintion of an html page
'''
# header of index file
header = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><meta name="viewport" content="width=device-width"/><meta http-equiv="content-type" content="text/html; charset=utf-8"/><style type="text/css">body,html {{background:#fff;font-family:"Bitstream Vera Sans","Lucida Grande","Lucida Sans Unicode",Lucidux,Verdana,Lucida,sans-serif;}}tr:nth-child(even) {{background:#f4f4f4;}}th,td {{padding:0.1em 0.5em;}}th {{text-align:left;font-weight:bold;background:#eee;border-bottom:1px solid #aaa;}}#list {{border:1px solid #aaa;width:100%%;}}a {{color:#a33;}}a:hover {{color:#e33;}}</style>
<link rel="stylesheet" href="{url}/css/sas.css" type="text/css"/>
<title>Index of /sas/{name}/env/</title>
</head><body><h1>Index of /sas/{name}/env/</h1>
"""
# footer of index file
footer = """<h3><a href='{url}/sas/'>{location}</a></h3>
<p>This directory contains links to the contents of
environment variables defined by the tree product, version {name}.
To examine the <em>types</em> of files contained in each environment variable
directory, visit <a href="/datamodel/files/">the datamodel.</a></p>
</body></html>
"""
# create index html file
index = header.format(**defaults)
index += create_index_table(environ, envdir)
index += footer.format(**defaults)
return index
def create_env(environ, mirror=None, verbose=None):
''' create the env symlink directory structure
Creates the env folder filled with symlinks to datamodel directories
for a given tree config file.
Parameters:
environ (dict):
A tree environment dictionary
mirror (bool):
If True, use the SAM url location
verbose (bool):
If True, print more information
'''
defaults = environ['default'].copy()
defaults['url'] = "https://data.mirror.sdss.org" if mirror else "https://data.sdss.org"
defaults['location'] = "SDSS-IV Science Archive Mirror (SAM)" if mirror else "SDSS-IV Science Archive Server (SAS)"
if not os.path.exists(environ['general']['SAS_ROOT']):
if verbose:
print("{0} doesn't exist, skipping env link creation.".format(environ['general']['SAS_ROOT']))
return
if verbose:
print("Found {0}.".format(environ['general']['SAS_ROOT']))
# sets and creates envdir
envdir = os.path.join(environ['general']['SAS_ROOT'], 'env')
if not os.path.exists(envdir):
os.makedirs(envdir)
if not os.access(envdir, os.W_OK):
return
# create index html
index = create_index_page(environ, defaults, envdir)
# write the index file
indexfile = os.path.join(envdir, 'index.html')
with open(indexfile, 'w') as f:
f.write(index)
def check_sas_base_dir(root=None):
''' Check for the SAS_BASE_DIR environment variable
Will set the SAS_BASE_DIR in your local environment
or prompt you to define one if is undefined
Parameters:
root (str):
Optional override of the SAS_BASE_DIR envvar
'''
sasbasedir = root or os.getenv("SAS_BASE_DIR")
if not sasbasedir:
sasbasedir = input('Enter a path for SAS_BASE_DIR: ')
os.environ['SAS_BASE_DIR'] = sasbasedir
def write_header(term='bash', tree_dir=None, name=None):
''' Write proper file header in a given shell format
Parameters:
term (str):
The type of shell header to write, can be "bash", "tsch", or "modules"
tree_dir (str):
The path to this repository
name (str):
The name of the configuration
Returns:
A string header to insert
'''
assert term in ['bash', 'tsch', 'modules'], 'term must be either bash, tsch, or module'
product_dir = tree_dir.rstrip('/')
base = 'export' if term == 'bash' else 'setenv'
sep = '=' if term == 'bash' else ' '
if term != 'modules':
hdr = """# Set up tree/{0} for {1}
{2} TREE_DIR{4}{3}
{2} TREE_VER{4}{0}
{2} PATH{4}$TREE_DIR/bin:$PATH
{2} PYTHONPATH{4}$TREE_DIR/python:$PYTHONPATH
""".format(name, term, base, product_dir, sep)
else:
hdr = """#%Module1.0
proc ModulesHelp {{ }} {{
global product version
puts stderr "This module adds $product/$version to various paths"
}}
set name tree
set product tree
set version {1}
conflict $product
module load sdsstools/0.1.7
prereq sdsstools/0.1.7
module load sdss_access
prereq sdss_access
module-whatis "Sets up $product/$version in your environment"
set PRODUCT_DIR {0}
setenv [string toupper $product]_DIR $PRODUCT_DIR
setenv [string toupper $product]_VER $version
prepend-path PATH $PRODUCT_DIR/bin
prepend-path PYTHONPATH $PRODUCT_DIR/python
""".format(product_dir, name)
return hdr.strip()
def write_version(name):
''' Make the default modules version string '''
modules_version = "#%Module1.0\nset ModulesVersion {0}".format(name)
return modules_version
def write_file(environ, term='bash', out_dir=None, tree_dir=None, default=None):
''' Write a tree environment file
Loops over the tree environ and writes them out to a bash, tsch, or
modules file
Parameters:
environ (dict):
The tree dictionary environment
term (str):
The type of shell header to write, can be "bash", "tsch", or "modules"
tree_dir (str):
The path to this repository
out_dir (str):
The output path to write the files (default is etc/)
default (str):
The default config to write into the .version file
'''
# get the proper name, header and file extension
name = environ['default']['name']
header = write_header(term=term, name=name, tree_dir=tree_dir)
exts = {'bash': '.sh', 'tsch': '.csh', 'modules': '.module'}
ext = exts[term]
# shell command
if term == 'bash':
cmd = 'export {0}={1}\n'
else:
cmd = 'setenv {0} {1}\n'
# write the environment config files
filename = os.path.join(out_dir, name + ext)
with open(filename, 'w') as f:
f.write(header + '\n')
for key, values in environ.items():
if key != 'default':
# write separator
f.write('#\n# {0}\n#\n'.format(key))
# write tree names and paths
for tree_name, tree_path in values.items():
if tree_path.startswith(os.getenv("SAS_BASE_DIR")):
f.write(cmd.format(tree_name.upper(), tree_path))
# write default .version file for modules (or default for lua modules)
default = default if default else name
modules_version = write_version(default)
if term == 'modules' and environ['default']['current'] == 'True':
# write .version file for tcl
version_name = os.path.join(out_dir, '.version')
with open(version_name, 'w') as f:
f.write(modules_version)
def get_python_path():
''' Finds and switches to the tree python package directory '''
# get the TREE directory
tree_dir = os.getenv('TREE_DIR', None)
if not tree_dir:
path = os.path.dirname(os.path.abspath(__file__))
tree_dir = os.path.realpath(os.path.join(path, '..'))
pypath = os.path.join(tree_dir, 'python')
if pypath not in sys.path:
sys.path.append(pypath)
os.chdir(pypath)
def get_tree(config=None):
''' Get the tree for a given config
Parameters:
config (str):
The name of the tree config to load
Returns:
a Python Tree instance
'''
# ensure the tree package is importable
mod = importlib.util.find_spec('tree')
if not (mod and mod.origin):
get_python_path()
# extract the config format from either XXXX.cfg or full filepath
has_cfg = re.search(r'(\w+)\.cfg', config)
if has_cfg:
config = has_cfg.group()
from tree.tree import Tree
tree = Tree(config=config)
return tree
def copy_modules(filespath=None, modules_path=None, verbose=None, default=None):
''' Copy over the tree module files into your path '''
# find or define a modules path
if not modules_path:
modulepath = os.getenv("MODULEPATH")
if not modulepath:
modules_path = input('Enter the root path for your module files:')
else:
split_mods = modulepath.split(':')
if len(split_mods) > 1:
# select which module paths to use
items = ['Multiple module paths found. Choose which module paths to use '
'(e.g. "1,2") or hit enter for "all". Or "q" to quit: ']
items += ['{0}. {1}'.format(i + 1, t) for i, t in enumerate(split_mods)] + ['\n']
msg = '\n'.join(items)
selected = input(msg) or 'all'
# check to quit
if selected == 'q':
if verbose:
print('Quitting module copy.')
return
# select choices
choices = range(len(split_mods)) if selected == 'all' else [
int(i) - 1 for i in selected.split(',')]
# loop over selected module paths to install | |
8, 7, 6, 5, 4, 3, 2, 1]
if cyclic:
index = slice(start-stop-1, None, step)
roll[i] = -1 - stop
else:
index = slice(start, stop, step)
else:
start, stop, step = index.indices(size)
if (start == stop or
(start < stop and step < 0) or
(start > stop and step > 0)):
raise IndexError(
"Invalid indices dimension with size {}: {}".format(
size, index))
if step < 0 and stop < 0:
stop = None
index = slice(start, stop, step)
elif isinstance(index, (int, _numpy_integer)):
# --------------------------------------------------------
# Index is an integer
# --------------------------------------------------------
if index < 0:
index += size
index = slice(index, index+1, 1)
is_slice = True
else:
convert2positve = True
if (getattr(getattr(index, 'dtype', None), 'kind', None) == 'b' or
isinstance(index[0], bool)):
# ----------------------------------------------------
# Index is a sequence of booleans
# ----------------------------------------------------
# Convert booleans to non-negative integers. We're
# assuming that anything with a dtype attribute also
# has a size attribute.
if _numpy_size(index) != size:
raise IndexError(
"Incorrect number ({}) of boolean indices for dimension with size {}: {}".format(
_numpy_size(index), size, index))
index = _numpy_where(index)[0]
convert2positve = False
if not _numpy_ndim(index):
if index < 0:
index += size
index = slice(index, index+1, 1)
is_slice = True
else:
len_index = len(index)
if len_index == 1:
index = index[0]
if index < 0:
index += size
index = slice(index, index+1, 1)
is_slice = True
elif len_index:
if convert2positve:
# Convert to non-negative integer numpy array
index = _numpy_array(index)
index = _numpy_where(index < 0, index+size, index)
steps = index[1:] - index[:-1]
step = steps[0]
if step and not (steps - step).any():
# Replace the numpy array index with a slice
if step > 0:
start, stop = index[0], index[-1]+1
elif step < 0:
start, stop = index[0], index[-1]-1
if stop < 0:
stop = None
index = slice(start, stop, step)
is_slice = True
else:
if ((step > 0 and (steps <= 0).any()) or
(step < 0 and (steps >= 0).any()) or
not step):
raise ValueError(
"Bad index (not strictly monotonic): {}".format(index))
if reverse and step < 0:
# The array is strictly monotoniticall
# decreasing, so reverse it so that it's
# strictly monotonically increasing. Make
# a note that this dimension will need
# flipping later
index = index[::-1]
flip.append(i)
step = -step
if envelope:
# Create an envelope slice for a parsed
# index of a numpy array of integers
compressed_indices.append(index)
step = _numpy_sign(step)
if step > 0:
stop = index[-1] + 1
else:
stop = index[-1] - 1
if stop < 0:
stop = None
index = slice(index[0], stop, step)
is_slice = True
else:
raise IndexError(
"Invalid indices {} for array with shape {}".format(
parsed_indices, shape))
#--- End: if
#--- End: if
if is_slice:
if reverse and index.step < 0:
# If the slice step is negative, then transform
# the original slice to a new slice with a
# positive step such that the result of the new
# slice is the reverse of the result of the
# original slice.
#
# For example, if the original slice is
# slice(6,0,-2) then the new slice will be
# slice(2,7,2):
#
# >>> a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# >>> a[slice(6, 0, -2)]
# [6, 4, 2]
# >>> a[slice(2, 7, 2)]
# [2, 4, 6]
# a[slice(6, 0, -2)] == list(reversed(a[slice(2, 7, 2)]))
# True
start, stop, step = index.indices(size)
step *= -1
div, mod = divmod(start-stop-1, step)
div_step = div*step
start -= div_step
stop = start + div_step + 1
index = slice(start, stop, step)
flip.append(i)
#--- End: if
# If step is greater than one then make sure that
# index.stop isn't bigger than it needs to be
if cyclic and index.step > 1:
start, stop, step = index.indices(size)
div, mod = divmod(stop-start-1, step)
stop = start + div*step + 1
index = slice(start, stop, step)
#--- End: if
#
if envelope:
# Create an envelope slice for a parsed
# index of a numpy array of integers
compressed_indices.append(index)
index = slice(start, stop, (1 if reverse else _numpy_sign(step)))
#--- End: if
parsed_indices[i] = index
#--- End: for
if not (cyclic or reverse or envelope or mask):
return parsed_indices
out = [parsed_indices]
if cyclic:
out.append(roll)
if reverse:
out.append(flip)
if envelope:
out.append(compressed_indices)
if mask:
out.append(mask_indices)
return out
def get_subspace(array, indices):
'''TODO
Subset the input numpy array with the given indices. Indexing is
similar to that of a numpy array. The differences to numpy array
indexing are:
1. An integer index i takes the i-th element but does not reduce
the rank of the output array by one.
2. When more than one dimension's slice is a 1-d boolean array or
1-d sequence of integers then these indices work independently
along each dimension (similar to the way vector subscripts work
in Fortran).
indices must contain an index for each dimension of the input array.
:Parameters:
array: `numpy.ndarray`
indices: `list`
'''
gg = [i for i, x in enumerate(indices) if not isinstance(x, slice)]
len_gg = len(gg)
if len_gg < 2:
# ------------------------------------------------------------
# At most one axis has a list-of-integers index so we can do a
# normal numpy subspace
# ------------------------------------------------------------
return array[tuple(indices)]
else:
# ------------------------------------------------------------
# At least two axes have list-of-integers indices so we can't
# do a normal numpy subspace
# ------------------------------------------------------------
if _numpy_ma_isMA(array):
take = _numpy_ma_take
else:
take = _numpy_take
indices = indices[:]
for axis in gg:
array = take(array, indices[axis], axis=axis)
indices[axis] = slice(None)
if len_gg < len(indices):
array = array[tuple(indices)]
return array
_equals = cfdm.Data()._equals
def equals(x, y, rtol=None, atol=None, ignore_data_type=False,
**kwargs):
'''
'''
if rtol is None:
rtol = RTOL()
if atol is None:
atol = ATOL()
return _equals(x, y, rtol=rtol, atol=atol,
ignore_data_type=ignore_data_type,
**kwargs)
def equivalent(x, y, rtol=None, atol=None, traceback=False):
'''True if and only if two objects are logically equivalent.
If the first argument, *x*, has an `!equivalent` method then it is
used, and in this case ``equivalent(x, y)`` is the same as
``x.equivalent(y)``.
:Parameters:
x, y :
The objects to compare for equivalence.
atol : float, optional
The absolute tolerance for all numerical comparisons, By
default the value returned by the `ATOL` function is used.
rtol : float, optional
The relative tolerance for all numerical comparisons, By
default the value returned by the `RTOL` function is used.
traceback : bool, optional
If True then print a traceback highlighting where the two
objects differ.
:Returns:
`bool`
Whether or not the two objects are equivalent.
**Examples:**
>>> f
<CF Field: rainfall_rate(latitude(10), longitude(20)) kg m2 s-1>
>>> cf.equivalent(f, f)
True
>>> cf.equivalent(1.0, 1.0)
True
>>> cf.equivalent(1.0, 33)
False
>>> cf.equivalent('a', 'a')
True
>>> cf.equivalent('a', 'b')
False
>>> cf.equivalent(cf.Data(1000, units='m'), cf.Data(1, units='km'))
True
For a field, ``f``:
>>> cf.equivalent(f, f.transpose())
True
'''
if rtol is None:
rtol = RTOL()
if atol is None:
atol = ATOL()
eq = getattr(x, 'equivalent', None)
if callable(eq):
# x has a callable equivalent method
return eq(y, rtol=rtol, atol=atol, traceback=traceback)
eq = getattr(y, 'equivalent', None)
if callable(eq):
# y has a callable equivalent method
return eq(x, rtol=rtol, atol=atol, traceback=traceback)
return equals(x, y, rtol=rtol, atol=atol, ignore_fill_value=True,
traceback=traceback)
def load_stash2standard_name(table=None, delimiter='!', merge=True):
'''Load a STASH to standard name conversion table.
This used when reading PP and UM fields files.
:Parameters:
table: `str`, optional
Use the conversion table at this file location. By default
the table will be looked for at
``os.path.join(os.path.dirname(cf.__file__),'etc/STASH_to_CF.txt')``
delimiter: `str`, optional
The delimiter of the table columns. By default, ``!`` is
taken as the delimiter.
merge: `bool`, optional
If *table* is None then *merge* is taken as | |
:param variables: a function takes ``app`` and ``model
object`` arguments. The ``app`` argument is optional. It
can construct the variables used in the path (including any
URL parameters). If ``variables`` is omitted, variables are
retrieved from the model by using the arguments of the
decorated function.
:param converters: a dictionary containing converters for variables.
The key is the variable name, the value is a
:class:`morepath.Converter` instance.
:param required: list or set of names of those URL parameters which
should be required, i.e. if missing a 400 Bad Request response is
given. Any default value is ignored. Has no effect on path
variables. Optional.
:param get_converters: a function that returns a converter dictionary.
This function is called once during configuration time. It can
be used to programmatically supply converters. It is merged
with the ``converters`` dictionary, if supplied. Optional.
:param absorb: If set to ``True``, matches any subpath that
matches this path as well. This is passed into the decorated
function as the ``absorb`` argument.
"""
self.model = model
self.path = path
self.variables = variables
self.converters = converters
self.required = required
self.get_converters = get_converters
self.absorb = absorb
def actions(self, obj):
# this composite action exists to let you use path with a
# class and still have the path action discriminator work
# correctly, which reports a conflict if you use the path
# action with the same model multiple times.
model = self.model
if isinstance(obj, type):
if model is not None:
raise dectate.DirectiveError(
"@path decorates class so cannot "
"have explicit model: %s" % model
)
model = obj
if model is None:
raise dectate.DirectiveError(
"@path does not decorate class and has no explicit model"
)
yield PathAction(
self.path,
model,
self.variables,
self.converters,
self.required,
self.get_converters,
self.absorb,
), obj
class PermissionRuleAction(dectate.Action):
config = {}
filter_convert = {
"model": dectate.convert_dotted_name,
"permission": dectate.convert_dotted_name,
"identity": dectate.convert_dotted_name,
}
filter_compare = {
"model": isbaseclass,
"permission": issubclass,
"identity": issubclass,
}
app_class_arg = True
depends = [SettingAction]
def __init__(self, model, permission, identity=Identity):
"""Declare whether a model has a permission.
The decorated function receives ``app``, ``model``,
``permission`` (instance of any permission object) and
``identity`` (:class:`morepath.Identity`) parameters. The
``app`` argument is optional. The decorated function should
return ``True`` only if the given identity exists and has that
permission on the model.
:param model: the model class
:param permission: permission class
:param identity: identity class to check permission for. If ``None``,
the identity to check for is the special
:data:`morepath.NO_IDENTITY`.
"""
self.model = model
self.permission = permission
if identity is None:
identity = NoIdentity
self.identity = identity
def identifier(self, app_class):
return (self.model, self.permission, self.identity)
def perform(self, obj, app_class):
app_class._permits.register(
methodify(obj, selfname="app"),
identity=self.identity,
obj=self.model,
permission=self.permission,
)
template_directory_id = 0
class TemplateDirectoryAction(dectate.Action):
config = {"template_engine_registry": TemplateEngineRegistry}
depends = [SettingAction]
filter_name = {"after": "_after", "before": "_before"}
filter_convert = {
"after": dectate.convert_dotted_name,
"before": dectate.convert_dotted_name,
}
def __init__(self, after=None, before=None, name=None):
"""Register template directory.
The decorated function gets no argument and should return a
relative or absolute path to a directory containing templates
that can be loaded by this app. If a relative path, it is made
absolute from the directory this module is in.
Template directories can be ordered: templates in a directory
``before`` another one are found before templates in a
directory ``after`` it. But you can leave both ``before`` and
``after`` out: template directories defined in
sub-applications automatically have a higher priority than
those defined in base applications.
:param after: Template directory function this template directory
function to be under. The other template directory has a higher
priority. You usually want to use ``over``. Optional.
:param before: Template directory function function this function
should have priority over. Optional.
:param name: The name under which to register this template
directory, so that it can be overridden by applications that
extend this one. If no name is supplied a default name is
generated.
"""
global template_directory_id
self._after = after
self._before = before
if name is None:
name = "template_directory_%s" % template_directory_id
template_directory_id += 1
self.name = name
def identifier(self, template_engine_registry):
return self.name
def perform(self, obj, template_engine_registry):
directory = obj()
if not os.path.isabs(directory):
directory = os.path.join(
os.path.dirname(self.code_info.path), directory
)
# hacky to have to get configurable and pass it in.
# note that this cannot be app_class as we want the app of
# the directive that *defined* it so we sort things properly.
template_engine_registry.register_template_directory_info(
obj,
directory,
self._before,
self._after,
self.directive.configurable,
)
class TemplateLoaderAction(dectate.Action):
config = {"template_engine_registry": TemplateEngineRegistry}
depends = [TemplateDirectoryAction]
def __init__(self, extension):
"""Create a template loader.
The decorated function gets a ``template_directories`` argument,
which is a list of absolute paths to directories that contain
templates. It also gets a ``settings`` argument, which is
application settings that can be used to configure the loader.
It should return an object that can load the template
given the list of template directories.
"""
self.extension = extension
def identifier(self, template_engine_registry):
return self.extension
def perform(self, obj, template_engine_registry):
template_engine_registry.initialize_template_loader(self.extension, obj)
class TemplateRenderAction(dectate.Action):
config = {"template_engine_registry": TemplateEngineRegistry}
depends = [SettingAction, TemplateLoaderAction]
def __init__(self, extension):
"""Register a template engine.
:param extension: the template file extension (``.pt``, etc)
we want this template engine to handle.
The decorated function gets ``loader``, ``name`` and
``original_render`` arguments. It should return a ``callable``
that is a view ``render`` function: take a ``content`` and
``request`` object and return a :class:`morepath.Response`
instance. This render callable should render the return value
of the view with the template supplied through its
``template`` argument.
"""
self.extension = extension
def identifier(self, template_engine_registry):
return self.extension
def perform(self, obj, template_engine_registry):
template_engine_registry.register_template_render(self.extension, obj)
def issubclass_or_none(a, b):
if a is None or b is None:
return a == b
return issubclass(a, b)
class ViewAction(dectate.Action):
config = {
"template_engine_registry": TemplateEngineRegistry,
}
depends = [SettingAction, PredicateAction, TemplateRenderAction]
filter_convert = {
"model": dectate.convert_dotted_name,
"render": dectate.convert_dotted_name,
"load": dectate.convert_dotted_name,
"permission": dectate.convert_dotted_name,
"internal": dectate.convert_bool,
}
def filter_get_value(self, name):
return self.predicates.get(name, dectate.NOT_FOUND)
filter_compare = {
"model": isbaseclass,
"permission": issubclass_or_none,
}
app_class_arg = True
def __init__(
self,
model,
render=None,
template=None,
load=None,
permission=None,
internal=False,
**predicates
):
"""Register a view for a model.
The decorated function gets ``self`` (model instance) and
``request`` (:class:`morepath.Request`) parameters. The
function should return either a (unicode) string that is
the response body, or a :class:`morepath.Response` object.
If a specific ``render`` function is given the output of the
function is passed to this first, and the function could
return whatever the ``render`` parameter expects as input.
This function should take the object to render and the
request. func:`morepath.render_json` for instance expects as
its first argument a Python object such as a dict that can be
serialized to JSON.
See also :meth:`morepath.App.json` and
:meth:`morepath.App.html`.
:param model: the class of the model for which this view is registered.
The ``self`` passed into the view function is an instance
of the model (or of a subclass).
:param render: an optional function that can render the output of the
view function to a response, and possibly set headers such as
``Content-Type``, etc. This function takes ``self`` and
``request`` parameters as input.
:param template: a path to a template file. The path is relative
to the directory this module is in. The template is applied to
the content returned from the decorated view function.
Use the :meth:`morepath.App.template_loader` and
:meth:`morepath.App.template_render` directives to define
support for new template engines.
:param load: a load function that turns the request into an object.
If load is in use, this object will be the third argument to the
view function.
:param permission: a permission class. The model should have this
permission, otherwise access to this view is forbidden. If omitted,
the view function is public.
:param internal: Whether this view is internal only. If
``True``, the view is only useful programmatically using
:meth:`morepath.Request.view`, but will not be published on
the web. It will be as if the view is not there.
By default a view is ``False``, so not internal.
:param name: the name of the view as it appears in the | |
# Copyright (C) Schrodinger, LLC.
# All Rights Reserved
#
# For more information, see LICENSE in PyMOL's home directory.
#
# pymolhttpd.py
#
# web server interface for controlling PyMOL
# we make extensive use of Python's build-in in web infrastructure
import BaseHTTPServer, cgi, urlparse
import StringIO, socket
# we also rely upon Python's json infrastructure
try:
import simplejson as json
except:
import json
# standard Python dependencies
import types, os, sys, traceback, threading
# NOTE: Let's attempt to follow Python PEP 8 for coding style for this
# source code file. URL: http://www.python.org/de/peps/pep-0008
#
# * maximum target line length to be 79 characters.....................seventy9
# * methods and attribute names as lower_case_underscore
# * class names as UpperCaseCaps
# * private symbols start with a leading underscore
# * uniform indentation consisting of 4 spaces (no tabs!)
_json_mime_types = [ 'text/json', 'application/json' ]
class _PymolHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
# for now, we're using a single-threaded server
# our actual HTTP server class is private for the time being
# if we need to, then we'll change this
def do_GET(self):
self.process_request()
def do_POST(self):
self.process_request()
def log_message(self, format, *args):
if self.server.pymol_logging:
BaseHTTPServer.BaseHTTPRequestHandler.log_message(self,format,
*args)
def process_request(self):
"""
parse any URL or FORM arguments and process the request
"""
# verify that the request is coming from this machine
try:
host, port = self.client_address
if (host[0:6] != '127.0.'):
self.send_error(403,
"Only localhost requests are allowed (not: %s)"
% host)
else:
self.session = self.server.pymol_session # local session
self.callback = None
self.parse_args()
self.process_urlpath()
except socket.error:
traceback.print_exc()
print "broken pipe"
pass
def parse_args(self):
"""
parses URL arguments into a urlpath (before the ?)
and a cgiFieldStorage object (args after the ?).
for example:
http://localhost:8080/apply/pymol.cmd.color?color=blue&selection=benz
would yield self.fs.getvalue("color") as "blue"
and self.fs.getvalue("selection") as "benz"
self.urlpath would be "/apply/pymol.cmd.color"
"""
if (self.command == "POST"):
self.fs = cgi.FieldStorage(fp=self.rfile, headers=self.headers,
environ = {'REQUEST_METHOD':'POST'},
keep_blank_values = 1)
self.urlpath = self.path
elif (self.command == "GET"):
scheme,netloc,path,params,qs,fragment = urlparse.urlparse(self.path)
self.fs = cgi.FieldStorage(environ = {'REQUEST_METHOD':'GET',
'QUERY_STRING':qs},
keep_blank_values = 1)
self.urlpath = path
else:
self.fs = None
def process_urlpath(self):
"""
self.urlpath can be a request for a document, or a
special request, such as apply or getattr
"""
parts = self.urlpath.split('/')
# for example:
# if http://localhost:8080/apply/pymol.cmd.color?...
# then parts is ['', 'apply', 'pymol.cmd.color...']
# or if http://localhost:8080/apply?_json=...
# then parts is ['', 'apply?_json=...']
if len(parts) < 2: # then it cannot be a PyMOL request
self.send_doc() # simple file retrieval
else: # might be a PyMOL request
if len(parts) == 2: # no method name or trailing slash -> blank
parts.append('')
if (parts[1] == 'apply'): # calling a method
self.pymol_apply(parts[2])
elif (parts[1] == 'getattr'): # retrieving a property
self.pymol_getattr(parts[2])
elif (parts[1] == 'echo'): # for debugging purposes
self.send_resp_header(200,'text/plain')
self.echo_args(parts[2])
else: # simple file retrieval
self.send_doc()
def pymol_getattr(self, attr):
"""
apply the repr method to the requested attr, but only for
allowed attributes - those stored in the session dictionary
"""
key = '/getattr/' + attr;
if self.session.has_key(key):
try:
result = repr(self.session[key])
self.send_json_result(result)
except:
self.send_error(500,"Unable to get attribute.")
self.wfile.write(" %s\n" % attr)
traceback.print_exc(file=self.wfile)
else:
self.send_error(404,"Not a recognized attribute")
self.wfile.write(" %s is not a recognized attribute\n" % attr)
def wrap_return(self, result, status="OK", indent=None):
r = { 'status' : status, 'result' : result }
if self.server.wrap_natives==1:
return json.dumps(r,indent)
else:
return json.dumps(result,indent)
def send_json_result(self, result):
"""
send the mime header and result body. requests that came from
XMLHTTPRequest have specified they will accept (expect) json
formatted results. other requests will have come from
ordinary GET or POST requests via links or forms
"""
if self.callback != None:
self.send_resp_header(200,'text/javascript')
self.wfile.write("%s(%s)"%(self.callback,self.wrap_return(result)))
else:
accept_mime = self.headers.getheader('Accept')
if accept_mime in _json_mime_types:
self.send_resp_header(200,accept_mime)
self.wfile.write(self.wrap_return(result))
else:
self.send_resp_header(200,'text/html')
self.wfile.write("PyMOL's JSON response: <pre>")
self.wfile.write(self.wrap_return(result,indent=4))
self.wfile.write("</pre>")
def send_json_error(self, code, message):
if self.callback != None:
self.send_resp_header(code,'text/javascript')
self.wfile.write("%s(%s)"%(self.callback,self.wrap_return(message,"ERROR")))
else:
accept_mime = self.headers.getheader('Accept')
if accept_mime in _json_mime_types:
self.send_resp_header(code,accept_mime)
self.wfile.write(self.wrap_return(message,"ERROR"))
else:
self.send_resp_header(code,'text/html')
self.wfile.write("PyMOL's JSON response: <pre>")
self.wfile.write(self.wrap_return(message,"ERROR",indent=4))
self.wfile.write("</pre>")
def send_exception_json(self, code, message):
fp = StringIO.StringIO()
traceback.print_exc(file=fp)
tb = fp.getvalue()
message = message + tb.split('\n')
response = json.dumps(message)
if self.callback != None:
self.send_resp_header(code, 'text/javascript')
self.wfile.write("%s(%s)"%(self.callback,response))
else:
accept_mime = self.headers.getheader('Accept')
if accept_mime in _json_mime_types:
self.send_resp_header(code,accept_mime)
self.wfile.write(response)
else:
self.send_resp_header(code,'text/html')
self.wfile.write("PyMOL's JSON response: <pre>")
self.wfile.write(json.dumps(json.loads(response),indent=4))
self.wfile.write("</pre>")
def pymol_apply(self,method):
"""
apply the appropriate method held in the session dictionary.
supply the method arguements in the form of key/value
"""
args = None
kwds = None
query_kwds = {}
send_multi_result_list = False
for k in self.fs.keys():
if k[0:1] == '_': # leading-underscore argument (special handling)
if k == '_callback':
self.callback = self.fs.getfirst(k)
elif k == '_json': # main path for Javascript API
method = json.loads(self.fs.getfirst(k))
# [ "my_method", [ arg1, ... ] , { 'key1' : 'val1, ... } ]
# or
# [ [ "my_met1", [ arg1, ... ], { 'key1' : 'val1, ... } ],
# [ "my_met2", [ arg1, ... ], { 'key1' : 'val1, ... } ] ]
elif k == '_method': # tentative, not in spec -- may disappear
# a method name "my_method"
method = json.loads(self.fs.getfirst(k))
elif k == '_args': # tentative, not in spec -- may disappear
args = json.loads(self.fs.getfirst(k))
elif k == '_kwds': # tentative, not in spec -- may disappear
kwds = json.loads(self.fs.getfirst(k))
# other underscore arguments are ignored (not passed on)
elif k[0:1] != '_':
query_kwds[k] = self.fs.getfirst(k)
blocks = []
if isinstance(method,types.StringType):
# method is merely a string
if kwds == None:
kwds = query_kwds
if args == None:
args = ()
if len(method):
blocks = [ [ method, args, kwds ] ]
elif isinstance(method,types.ListType) and len(method):
# method is a list
if not isinstance(method[0],types.ListType):
blocks = [ method ] # contains just [name, args, kwds]
else:
blocks = method
# contains [ [name, arg, kwds], [name, args, kwds], ... ]
send_multi_result_list = False # only return final result
else:
self.send_json_error(500,[ "Unable to apply method:", str(method)])
return
result = []
if len(blocks):
for block in blocks:
if self.server.pymol_logging:
print 'applying: ' + str(block)
fn = self.session.get(block[0],None)
if fn != None:
len_block = len(block)
if len_block>1:
args = tuple(block[1])
else:
args = ()
if len_block>2:
kwds = block[2]
else:
kwds = {}
try:
result.append( fn(*args, **kwds) )
except:
self.send_exception_json(500,
[ "Exception in: %s" %
block[0],
"Args: " + str(args) ,
"Kwds: " + str(kwds)])
return
else:
self.send_json_error(500,[ "Method not found:",
str(block) ])
return
if block[0] == '_quit': # special quit behavior
self.send_resp_header()
self.wfile.write("<html>")
href = None
if kwds.has_key("href"):
href = str(kwds['href'])
elif len(args):
href = str(args[1])
if href == None:
self.wfile.write("<body>")
elif not len(href): # simply
self.wfile.write("<body onload=\"window.close()\">")
else:
self.wfile.write(
"<body onload=\"document.location.replace('"+
kwds['href']+"')\">")
self.wfile.write("<p>PyMOL-HTTPd: Shutting down...</p>")
self.wfile.write("<p><i>Please close this window.</i></p>")
self.wfile.write("</body></html>")
self.wfile.flush()
self.server.pymol_cmd.quit()
return
if send_multi_result_list:
self.send_json_result(result)
elif len(result):
self.send_json_result(result[-1])
else:
self.send_json_result(None)
return
def send_doc(self):
"""
send a document (file) in the current directory or any sub-directory
"""
path_list = self.path.split('/')[1:]
if '..' in path_list: # prevent access to parent directories
self.send_error(404,"Illegal path.")
self.wfile.write(": %s" % self.path)
elif self.server.pymol_root == None:
self.send_error(404,"No content root specified.")
else:
try:
full_path = os.path.join(*[self.server.pymol_root] +
list(path_list))
if os.path.isdir(full_path):
full_path = full_path + "/index.html"
fp = open(full_path,"rb")
self.send_resp_header(200,self.guess_mime(full_path))
self.wfile.write(fp.read())
fp.close()
except:
self.send_error(404,"Unable to locate document.")
self.wfile.write(": %s" % self.path)
self.wfile.write(str(sys.exc_info()))
# exc_info() is thread safe
# self.wfile.write(sys.exc_value) # exc_value not thread safe
def guess_mime(self,path):
"""
guess the mime type based on the file extension
"""
if path.endswith('.html'):
return 'text/html'
elif path.endswith('.js'):
return 'application/x-javascript'
elif path.endswith('.jpg'):
return 'image/jpeg'
elif path.endswith('.png'):
return 'image/png'
elif path.endswith('.gif'):
return 'image/gif'
elif path.endswith('.sdf'):
return 'chemical/x-mdl-sdfile'
elif path.endswith('.mol'):
return 'chemical/x-mdl-molfile'
elif path.endswith('.pwg'):
return 'application/x-pymol'
else:
return 'text/plain'
def send_error(self,errcode,errmsg):
self.send_response(errcode)
self.send_header('Content-type', 'text/plain')
self.send_header('Pragma','no-cache')
self.send_header('Cache-Control','no-cache, must-revalidate')
self.send_header('Expires','Sat, 10 Jan 2008 01:00:00 GMT')
self.end_headers()
self.wfile.write("PyMOL-HTTPd-Error: "+errmsg+"\n")
def send_resp_header(self, code=200, mime='text/html'):
self.send_response(code)
self.send_header('Content-type', mime)
self.send_header('Pragma','no-cache')
self.send_header('Cache-Control','no-cache, must-revalidate')
self.send_header('Expires','Sat, 10 Jan 2008 01:00:00 GMT')
self.end_headers()
def echo_args(self):
"""
for debugging requests
"""
self.wfile.write("%s\n" % self.command)
if (self.fs):
for k in self.fs.keys():
self.wfile.write("%s = " % k)
# key can have multiple values, as with checkboxes,
# but also arbitrarily
if (isinstance(self.fs[k], types.ListType)):
self.wfile.write("%s\n" % self.fs.getlist(k))
else:
# key can be | |
<reponame>sawansib/Sniper<filename>pin_kit/extras/pinplay/PinPoints/scripts/sniper_pinpoints.py
#!/usr/bin/env python
# BEGIN_LEGAL
# BSD License
#
# Copyright (c)2013 Ghent University. All rights reserved.
# Copyright (c)2014 Intel Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer. Redistributions
# in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution. Neither the name of
# the Intel Corporation nor the names of its contributors may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL OR
# ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# END_LEGAL
#
#
# @ORIGINAL_AUTHORS: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# $Id: sniper_pinpoints.py,v 1.32 2014/07/05 19:44:27 tmstall Exp tmstall $$
#
# This is a script to replay one pinplay process
#
import sys
import os
import glob
import re
import locale
# Local modules
#
import cmd_options
import config
import msg
import pinpoints
import util
# Local utilities
def sniper_root(parser):
parser.add_option("--sniper_root", dest="sniper_root",
default=os.getenv('SNIPER_ROOT', ''),
help="Sniper root is the top level directory where Sniper is installed. Defaults "
"to the environment variable SNIPER_ROOT.")
def sniper_options(parser):
parser.add_option("--sniper_options", dest="sniper_options",
default=False,
help="Replace the SniperLite configuration file with user defined config/options.")
def ignore_sniper_error(parser):
parser.add_option("--ignore_sniper_error", dest="ignore_sniper_error",
default=False, action='store_true',
help="Ignore any erorrs in the Sniper output")
def no_sniperlite(parser):
parser.add_option("--no_sniperlite", dest="no_sniperlite",
default=False, action='store_true',
help="Use Sniper instead of SniperLite as the simulator.")
def region_sim(parser, group):
method = cmd_options.GetMethod(parser, group)
method('-T', '--region_sim', dest="region_sim", action="store_true",
help="Run Sniper on the region pinballs.")
def whole_sim(parser, group):
method = cmd_options.GetMethod(parser, group)
method('-W', '--whole_sim', dest="whole_sim", action="store_true",
help="Run Sniper on the whole program pinballs.")
def RunSniper(pp_dir, sim_replay_cmd, phase_length, options):
def round(x, roundby = 1000000):
return int(int((x / float(roundby))) * float(roundby))
# List of pinballs used to print Sniper output when all runs are complete.
#
pb_list = []
ret = 0
if not os.path.isdir(pp_dir):
# If running in MPI_MT_MODE, then it's possible for one process to not
# have a thread corresponding to the the current focus thread.
# However, another process might have this thread. Thus, only return
# an error if not tracing a MPI_MT application.
#
if options.mode == config.MPI_MT_MODE:
msg.PrintMsg('WARNING: Directory containing pinballs to run with simulator does not exist:\n ' + \
pp_dir)
msg.PrintMsg('Since tracing mode is \'mpi_mt\', this may be OK.')
return 0
else:
msg.PrintMsg('ERROR: Directory containing pinballs to run with simulator does not exist:\n ' + \
pp_dir)
return -1
# List of output sniper directories.
#
output_dir_list =[]
for fn in os.listdir(pp_dir):
if fn.endswith('.address'):
pinball_path = os.path.join(pp_dir, os.path.splitext(fn)[0])
fn = os.path.splitext(fn)[0]
sniper_outputdir = os.path.join(config.sniper_result_dir,pinball_path)
output_dir_list += [sniper_outputdir]
sniper_outputfile = pinball_path + config.sniper_out_ext
if options.debug:
# If debugging, check to see if the Sniper result files already exist for the pinball. If so,
# then print it out but don't run Sniper again.
#
if os.path.isdir(sniper_outputdir):
msg.PrintMsgPlus('WARNING: Skipping Sniper execution because output file already exists.\n'
' %s' % sniper_outputdir)
pb_list.append(pinball_path)
continue
# Select the proper config/options to run the desired version of
# Sniper/SniperLite.
#
# import pdb; pdb.set_trace()
if options.sniper_options:
common_sniper_opts = options.sniper_options
else:
if options.no_sniperlite:
common_sniper_opts = ''
else:
# use_orig = True # Use older SniperLite options
use_orig = False # New SniperLite options
if use_orig:
common_sniper_opts = ' -c dunnington -c cacheonly -c nehalem_cmpsim.cfg ' \
'-g --general/enable_icache_modeling=false ' \
'-g --perf_model/dram/direct_access=true ' \
'-g --perf_model/dram/queue_model/type=contention ' \
'-g --perf_model/dtlb/size=0'
else:
# Older patched Sniper 5.3
#
# common_sniper_opts = ' -c dunnington -c nehalem_cmpsim.cfg -c ccpp1c --pinball-non-sift \
# -g -replay:addr_trans -g --general/enable_icache_modeling=false'
# Newer patched Sniper 5.3
#
# common_sniper_opts = ' -c dunnington -c nehalem_cmpsim.cfg -c cc-fast --pinball-non-sift \
# -g -replay:addr_trans -g --general/enable_icache_modeling=false'
# Production SniperLite 6.0 options
#
common_sniper_opts = ' -c nehalem-lite --pinball-non-sift '
partial_run_cmd = (common_sniper_opts + ' --no-cache-warming')
try:
# If re.search() fails, code falls though to the exception.
#
warmup_region = re.search('warmup(\d+)_prolog(\d+)_region(\d+)_epilog(\d+)_(\d+)_(\d-\d+)', \
pinball_path)
# Get info on the length of regions in the pinball.
#
warmup = int(warmup_region.group(1))
prolog = int(warmup_region.group(2))
file_region = int(warmup_region.group(3))
epilog = int(warmup_region.group(4))
region_num = int(warmup_region.group(5))
weight = warmup_region.group(6).replace('-', '.')
icount = util.GetMaxIcount('', pinball_path)
sim_region_len = icount - warmup
calc_region = sim_region_len - prolog - epilog
if warmup != 0:
# If there are warmups, then need to use options to first do cache warmups in Sniper,
# then simulate the region.
#
partial_run_cmd = (common_sniper_opts + ' -s stop-by-icount:%d:%d ' \
% (sim_region_len, round(warmup)) + ' --roi-script ')
if not options.list:
# Print details about the various sections in the pinball.
#
msg.PrintMsgPlus('Running Sniper on: ' + fn)
msg.PrintMsg(' Warmup count: ' + locale.format('%14d', warmup, True))
msg.PrintMsg(' Prolog count: ' + locale.format('%14d', prolog, True))
msg.PrintMsg(' Actual region count: ' + locale.format('%14d', calc_region, True) + \
' (from file name: ' + locale.format('%d', file_region, True) + ')')
msg.PrintMsg(' Epilog count: ' + locale.format('%14d', epilog, True))
msg.PrintMsg(' Total Instr count: ' + locale.format('%14d', icount, True))
except AttributeError:
if 'whole_program' not in pinball_path:
# Whole program pinballs cannot be processed using the options
# given above (using -s stop-by-icount:) because they don't have
# warmup instructions to be skipped.
#
# Let the user know the results may be suspect because the dir
# appears to contain whole program pinballs, but the name doesn't
# contain the string 'whole_program'. However, don't flag this as
# an erorr. It's possible for the user to give the name of a WP
# pinball directory which does not contain this string.
#
msg.PrintMsgPlus('WARNING: This pinball may not really be a whole program pinball.\n'
' If this is true, the results may not be valid.\n'
' ' + pinball_path)
msg.PrintMsgPlus('Running Sniper on whole program pinball: ' + pinball_path)
# Format the command and execute it asynchronously.
#
cmd = os.path.join(options.sniper_root,sim_replay_cmd) + partial_run_cmd + \
(' -d "%s" ' % sniper_outputdir) + ' --pinballs ' + pinball_path + \
' > ' + sniper_outputfile + ' 2>&1 '
pb_list.append(pinball_path)
end_str = fn
# import pdb; pdb.set_trace()
result = util.RunCmd(cmd, options, end_str, True)
ret = result or ret
result = util.WaitJobs(options)
ret = result or ret
# Make sure some of the important Sniper output files exist for each pinball.
#
error = False
file_list = ['sim.stats.sqlite3', 'sim.info', 'sim.cfg', 'sim.out']
for sim_dir in output_dir_list:
for f in file_list:
if not os.path.isfile(os.path.join(sim_dir, f)):
msg.PrintMsg('\nERROR: Sniper output file does not exist:\n' \
' ' + os.path.join(sim_dir, f))
error = True
# Define a set of strings which can be ignored as errors if they occur in
# the output. These are special cases which must be added by hand when a
# new application is found that contains a string which might be detected
# as an erorr, but is a normal component of the output from running the
# application.
# Errors generated by Sniper itself which are acceptable.
#
ign_str = ['void Sift::Writer::Sync(): Assertion', 'Sift::Reader::decodeInstruction']
pin_app_term = 'Pin app terminated abnormally'
ign_str += [pin_app_term]
# Strings generated by SPEC CPU2006 benchmarks: dealII-ref-1
#
ign_str += ['Estimated error=']
# Strings generated by MILC
#
ign_str += ['error_per_site', 'No O(a^2) errors', 'error_for_propagator']
# Print the output from running Sniper and look for errors in the output.
#
for pinball_path in pb_list:
# Get just | |
import sys
import os
import platform
import re
import imp
from Tkinter import *
import tkSimpleDialog
import tkMessageBox
import webbrowser
from idlelib.MultiCall import MultiCallCreator
from idlelib import WindowList
from idlelib import SearchDialog
from idlelib import GrepDialog
from idlelib import ReplaceDialog
from idlelib import PyParse
from idlelib.configHandler import idleConf
from idlelib import aboutDialog, textView, configDialog
from idlelib import macosxSupport
from idlelib import help
# The default tab setting for a Text widget, in average-width characters.
TK_TABWIDTH_DEFAULT = 8
_py_version = ' (%s)' % platform.python_version()
def _sphinx_version():
"Format sys.version_info to produce the Sphinx version string used to install the chm docs"
major, minor, micro, level, serial = sys.version_info
release = '%s%s' % (major, minor)
if micro:
release += '%s' % (micro,)
if level == 'candidate':
release += 'rc%s' % (serial,)
elif level != 'final':
release += '%s%s' % (level[0], serial)
return release
def _find_module(fullname, path=None):
"""Version of imp.find_module() that handles hierarchical module names"""
file = None
for tgt in fullname.split('.'):
if file is not None:
file.close() # close intermediate files
(file, filename, descr) = imp.find_module(tgt, path)
if descr[2] == imp.PY_SOURCE:
break # find but not load the source file
module = imp.load_module(tgt, file, filename, descr)
try:
path = module.__path__
except AttributeError:
raise ImportError, 'No source for module ' + module.__name__
if descr[2] != imp.PY_SOURCE:
# If all of the above fails and didn't raise an exception,fallback
# to a straight import which can find __init__.py in a package.
m = __import__(fullname)
try:
filename = m.__file__
except AttributeError:
pass
else:
file = None
base, ext = os.path.splitext(filename)
if ext == '.pyc':
ext = '.py'
filename = base + ext
descr = filename, None, imp.PY_SOURCE
return file, filename, descr
class HelpDialog(object):
def __init__(self):
self.parent = None # parent of help window
self.dlg = None # the help window iteself
def display(self, parent, near=None):
""" Display the help dialog.
parent - parent widget for the help window
near - a Toplevel widget (e.g. EditorWindow or PyShell)
to use as a reference for placing the help window
"""
import warnings as w
w.warn("EditorWindow.HelpDialog is no longer used by Idle.\n"
"It will be removed in 3.6 or later.\n"
"It has been replaced by private help.HelpWindow\n",
DeprecationWarning, stacklevel=2)
if self.dlg is None:
self.show_dialog(parent)
if near:
self.nearwindow(near)
def show_dialog(self, parent):
self.parent = parent
fn=os.path.join(os.path.abspath(os.path.dirname(__file__)),'help.txt')
self.dlg = dlg = textView.view_file(parent,'Help',fn, modal=False)
dlg.bind('<Destroy>', self.destroy, '+')
def nearwindow(self, near):
# Place the help dialog near the window specified by parent.
# Note - this may not reposition the window in Metacity
# if "/apps/metacity/general/disable_workarounds" is enabled
dlg = self.dlg
geom = (near.winfo_rootx() + 10, near.winfo_rooty() + 10)
dlg.withdraw()
dlg.geometry("=+%d+%d" % geom)
dlg.deiconify()
dlg.lift()
def destroy(self, ev=None):
self.dlg = None
self.parent = None
helpDialog = HelpDialog() # singleton instance, no longer used
class EditorWindow(object):
from idlelib.Percolator import Percolator
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from idlelib.IOBinding import IOBinding, filesystemencoding, encoding
from idlelib import Bindings
from Tkinter import Toplevel
from idlelib.MultiStatusBar import MultiStatusBar
help_url = None
def __init__(self, flist=None, filename=None, key=None, root=None):
if EditorWindow.help_url is None:
dochome = os.path.join(sys.prefix, 'Doc', 'index.html')
if sys.platform.count('linux'):
# look for html docs in a couple of standard places
pyver = 'python-docs-' + '%s.%s.%s' % sys.version_info[:3]
if os.path.isdir('/var/www/html/python/'): # "python2" rpm
dochome = '/var/www/html/python/index.html'
else:
basepath = '/usr/share/doc/' # standard location
dochome = os.path.join(basepath, pyver,
'Doc', 'index.html')
elif sys.platform[:3] == 'win':
chmfile = os.path.join(sys.prefix, 'Doc',
'Python%s.chm' % _sphinx_version())
if os.path.isfile(chmfile):
dochome = chmfile
elif sys.platform == 'darwin':
# documentation may be stored inside a python framework
dochome = os.path.join(sys.prefix,
'Resources/English.lproj/Documentation/index.html')
dochome = os.path.normpath(dochome)
if os.path.isfile(dochome):
EditorWindow.help_url = dochome
if sys.platform == 'darwin':
# Safari requires real file:-URLs
EditorWindow.help_url = 'file://' + EditorWindow.help_url
else:
EditorWindow.help_url = "https://docs.python.org/%d.%d/" % sys.version_info[:2]
self.flist = flist
root = root or flist.root
self.root = root
try:
sys.ps1
except AttributeError:
sys.ps1 = '>>> '
self.menubar = Menu(root)
self.top = top = WindowList.ListedToplevel(root, menu=self.menubar)
if flist:
self.tkinter_vars = flist.vars
#self.top.instance_dict makes flist.inversedict available to
#configDialog.py so it can access all EditorWindow instances
self.top.instance_dict = flist.inversedict
else:
self.tkinter_vars = {} # keys: Tkinter event names
# values: Tkinter variable instances
self.top.instance_dict = {}
self.recent_files_path = os.path.join(idleConf.GetUserCfgDir(),
'recent-files.lst')
self.text_frame = text_frame = Frame(top)
self.vbar = vbar = Scrollbar(text_frame, name='vbar')
self.width = idleConf.GetOption('main','EditorWindow','width', type='int')
text_options = {
'name': 'text',
'padx': 5,
'wrap': 'none',
'highlightthickness': 0,
'width': self.width,
'height': idleConf.GetOption('main', 'EditorWindow', 'height', type='int')}
if TkVersion >= 8.5:
# Starting with tk 8.5 we have to set the new tabstyle option
# to 'wordprocessor' to achieve the same display of tabs as in
# older tk versions.
text_options['tabstyle'] = 'wordprocessor'
self.text = text = MultiCallCreator(Text)(text_frame, **text_options)
self.top.focused_widget = self.text
self.createmenubar()
self.apply_bindings()
self.top.protocol("WM_DELETE_WINDOW", self.close)
self.top.bind("<<close-window>>", self.close_event)
if macosxSupport.isAquaTk():
# Command-W on editorwindows doesn't work without this.
text.bind('<<close-window>>', self.close_event)
# Some OS X systems have only one mouse button, so use
# control-click for popup context menus there. For two
# buttons, AquaTk defines <2> as the right button, not <3>.
text.bind("<Control-Button-1>",self.right_menu_event)
text.bind("<2>", self.right_menu_event)
else:
# Elsewhere, use right-click for popup menus.
text.bind("<3>",self.right_menu_event)
text.bind("<<cut>>", self.cut)
text.bind("<<copy>>", self.copy)
text.bind("<<paste>>", self.paste)
text.bind("<<center-insert>>", self.center_insert_event)
text.bind("<<help>>", self.help_dialog)
text.bind("<<python-docs>>", self.python_docs)
text.bind("<<about-idle>>", self.about_dialog)
text.bind("<<open-config-dialog>>", self.config_dialog)
text.bind("<<open-module>>", self.open_module)
text.bind("<<do-nothing>>", lambda event: "break")
text.bind("<<select-all>>", self.select_all)
text.bind("<<remove-selection>>", self.remove_selection)
text.bind("<<find>>", self.find_event)
text.bind("<<find-again>>", self.find_again_event)
text.bind("<<find-in-files>>", self.find_in_files_event)
text.bind("<<find-selection>>", self.find_selection_event)
text.bind("<<replace>>", self.replace_event)
text.bind("<<goto-line>>", self.goto_line_event)
text.bind("<<smart-backspace>>",self.smart_backspace_event)
text.bind("<<newline-and-indent>>",self.newline_and_indent_event)
text.bind("<<smart-indent>>",self.smart_indent_event)
text.bind("<<indent-region>>",self.indent_region_event)
text.bind("<<dedent-region>>",self.dedent_region_event)
text.bind("<<comment-region>>",self.comment_region_event)
text.bind("<<uncomment-region>>",self.uncomment_region_event)
text.bind("<<tabify-region>>",self.tabify_region_event)
text.bind("<<untabify-region>>",self.untabify_region_event)
text.bind("<<toggle-tabs>>",self.toggle_tabs_event)
text.bind("<<change-indentwidth>>",self.change_indentwidth_event)
text.bind("<Left>", self.move_at_edge_if_selection(0))
text.bind("<Right>", self.move_at_edge_if_selection(1))
text.bind("<<del-word-left>>", self.del_word_left)
text.bind("<<del-word-right>>", self.del_word_right)
text.bind("<<beginning-of-line>>", self.home_callback)
if flist:
flist.inversedict[self] = key
if key:
flist.dict[key] = self
text.bind("<<open-new-window>>", self.new_callback)
text.bind("<<close-all-windows>>", self.flist.close_all_callback)
text.bind("<<open-class-browser>>", self.open_class_browser)
text.bind("<<open-path-browser>>", self.open_path_browser)
self.set_status_bar()
vbar['command'] = text.yview
vbar.pack(side=RIGHT, fill=Y)
text['yscrollcommand'] = vbar.set
text['font'] = idleConf.GetFont(self.root, 'main', 'EditorWindow')
text_frame.pack(side=LEFT, fill=BOTH, expand=1)
text.pack(side=TOP, fill=BOTH, expand=1)
text.focus_set()
# usetabs true -> literal tab characters are used by indent and
# dedent cmds, possibly mixed with spaces if
# indentwidth is not a multiple of tabwidth,
# which will cause Tabnanny to nag!
# false -> tab characters are converted to spaces by indent
# and dedent cmds, and ditto TAB keystrokes
# Although use-spaces=0 can be configured manually in config-main.def,
# configuration of tabs v. spaces is not supported in the configuration
# dialog. IDLE promotes the preferred Python indentation: use spaces!
usespaces = idleConf.GetOption('main', 'Indent', 'use-spaces', type='bool')
self.usetabs = not usespaces
# tabwidth is the display width of a literal tab character.
# CAUTION: telling Tk to use anything other than its default
# tab setting causes it to use an entirely different tabbing algorithm,
# treating tab stops as fixed distances from the left margin.
# Nobody expects this, so for now tabwidth should never be changed.
self.tabwidth = 8 # must remain 8 until Tk is fixed.
# indentwidth is the number of screen characters per indent level.
# The recommended Python indentation is four spaces.
self.indentwidth = self.tabwidth
self.set_notabs_indentwidth()
# If context_use_ps1 is true, parsing searches back for a ps1 line;
# else searches for a popular (if, def, ...) Python stmt.
self.context_use_ps1 = False
# When searching backwards for a reliable place to begin parsing,
# first start num_context_lines[0] lines back, then
# num_context_lines[1] lines back if that didn't work, and so on.
# The last value should be huge (larger than the # of lines in a
# conceivable file).
# Making the initial values larger slows things down more often.
self.num_context_lines = 50, 500, 5000000
self.per = per = self.Percolator(text)
self.undo = undo = self.UndoDelegator()
per.insertfilter(undo)
text.undo_block_start = undo.undo_block_start
text.undo_block_stop = undo.undo_block_stop
undo.set_saved_change_hook(self.saved_change_hook)
# IOBinding implements file I/O and printing functionality
self.io = io = self.IOBinding(self)
io.set_filename_change_hook(self.filename_change_hook)
# Create the recent files submenu
self.recent_files_menu = Menu(self.menubar, tearoff=0)
self.menudict['file'].insert_cascade(3, label='Recent Files',
underline=0,
menu=self.recent_files_menu)
self.update_recent_files_list()
self.color = None # initialized below in self.ResetColorizer
if filename:
if os.path.exists(filename) and not os.path.isdir(filename):
io.loadfile(filename)
else:
io.set_filename(filename)
self.ResetColorizer()
self.saved_change_hook()
self.set_indentation_params(self.ispythonsource(filename))
self.load_extensions()
menu = self.menudict.get('windows')
if menu:
end = menu.index("end")
if end is None:
end = -1
if end >= 0:
menu.add_separator()
end = end + 1
self.wmenu_end = end
WindowList.register_callback(self.postwindowsmenu)
# Some abstractions so IDLE extensions are cross-IDE
self.askyesno = tkMessageBox.askyesno
self.askinteger = tkSimpleDialog.askinteger
self.showerror = tkMessageBox.showerror
def _filename_to_unicode(self, filename):
"""convert filename to | |
"name": "arrow-merge-000-left"
},
{
"url": "/images/status/arrow-merge-090-left.png",
"name": "arrow-merge-090-left"
},
{
"url": "/images/status/arrow-merge-090.png",
"name": "arrow-merge-090"
},
{
"url": "/images/status/arrow-merge-180-left.png",
"name": "arrow-merge-180-left"
},
{
"url": "/images/status/arrow-merge-180.png",
"name": "arrow-merge-180"
},
{
"url": "/images/status/arrow-merge-270-left.png",
"name": "arrow-merge-270-left"
},
{
"url": "/images/status/arrow-merge-270.png",
"name": "arrow-merge-270"
},
{
"url": "/images/status/arrow-merge.png",
"name": "arrow-merge"
},
{
"url": "/images/status/arrow-move.png",
"name": "arrow-move"
},
{
"url": "/images/status/arrow-out.png",
"name": "arrow-out"
},
{
"url": "/images/status/arrow-repeat-once.png",
"name": "arrow-repeat-once"
},
{
"url": "/images/status/arrow-repeat.png",
"name": "arrow-repeat"
},
{
"url": "/images/status/arrow-resize-045.png",
"name": "arrow-resize-045"
},
{
"url": "/images/status/arrow-resize-090.png",
"name": "arrow-resize-090"
},
{
"url": "/images/status/arrow-resize-135.png",
"name": "arrow-resize-135"
},
{
"url": "/images/status/arrow-resize.png",
"name": "arrow-resize"
},
{
"url": "/images/status/arrow-return-000-left.png",
"name": "arrow-return-000-left"
},
{
"url": "/images/status/arrow-return-090-left.png",
"name": "arrow-return-090-left"
},
{
"url": "/images/status/arrow-return-090.png",
"name": "arrow-return-090"
},
{
"url": "/images/status/arrow-return-180-left.png",
"name": "arrow-return-180-left"
},
{
"url": "/images/status/arrow-return-180.png",
"name": "arrow-return-180"
},
{
"url": "/images/status/arrow-return-270-left.png",
"name": "arrow-return-270-left"
},
{
"url": "/images/status/arrow-return-270.png",
"name": "arrow-return-270"
},
{
"url": "/images/status/arrow-return.png",
"name": "arrow-return"
},
{
"url": "/images/status/arrow-retweet.png",
"name": "arrow-retweet"
},
{
"url": "/images/status/arrow-skip-090.png",
"name": "arrow-skip-090"
},
{
"url": "/images/status/arrow-skip-180.png",
"name": "arrow-skip-180"
},
{
"url": "/images/status/arrow-skip-270.png",
"name": "arrow-skip-270"
},
{
"url": "/images/status/arrow-skip.png",
"name": "arrow-skip"
},
{
"url": "/images/status/arrow-split-090.png",
"name": "arrow-split-090"
},
{
"url": "/images/status/arrow-split-180.png",
"name": "arrow-split-180"
},
{
"url": "/images/status/arrow-split-270.png",
"name": "arrow-split-270"
},
{
"url": "/images/status/arrow-split.png",
"name": "arrow-split"
},
{
"url": "/images/status/arrow-step-out.png",
"name": "arrow-step-out"
},
{
"url": "/images/status/arrow-step-over.png",
"name": "arrow-step-over"
},
{
"url": "/images/status/arrow-step.png",
"name": "arrow-step"
},
{
"url": "/images/status/arrow-stop-090.png",
"name": "arrow-stop-090"
},
{
"url": "/images/status/arrow-stop-180.png",
"name": "arrow-stop-180"
},
{
"url": "/images/status/arrow-stop-270.png",
"name": "arrow-stop-270"
},
{
"url": "/images/status/arrow-stop.png",
"name": "arrow-stop"
},
{
"url": "/images/status/arrow-switch-090.png",
"name": "arrow-switch-090"
},
{
"url": "/images/status/arrow-switch-180.png",
"name": "arrow-switch-180"
},
{
"url": "/images/status/arrow-switch-270.png",
"name": "arrow-switch-270"
},
{
"url": "/images/status/arrow-switch.png",
"name": "arrow-switch"
},
{
"url": "/images/status/arrow-transition-090.png",
"name": "arrow-transition-090"
},
{
"url": "/images/status/arrow-transition-180.png",
"name": "arrow-transition-180"
},
{
"url": "/images/status/arrow-transition-270.png",
"name": "arrow-transition-270"
},
{
"url": "/images/status/arrow-transition.png",
"name": "arrow-transition"
},
{
"url": "/images/status/arrow-turn-000-left.png",
"name": "arrow-turn-000-left"
},
{
"url": "/images/status/arrow-turn-090-left.png",
"name": "arrow-turn-090-left"
},
{
"url": "/images/status/arrow-turn-090.png",
"name": "arrow-turn-090"
},
{
"url": "/images/status/arrow-turn-180-left.png",
"name": "arrow-turn-180-left"
},
{
"url": "/images/status/arrow-turn-180.png",
"name": "arrow-turn-180"
},
{
"url": "/images/status/arrow-turn-270-left.png",
"name": "arrow-turn-270-left"
},
{
"url": "/images/status/arrow-turn-270.png",
"name": "arrow-turn-270"
},
{
"url": "/images/status/arrow-turn.png",
"name": "arrow-turn"
},
{
"url": "/images/status/arrow.png",
"name": "arrow"
},
{
"url": "/images/status/asterisk.png",
"name": "asterisk"
},
{
"url": "/images/status/auction-hammer--arrow.png",
"name": "auction-hammer--arrow"
},
{
"url": "/images/status/auction-hammer--exclamation.png",
"name": "auction-hammer--exclamation"
},
{
"url": "/images/status/auction-hammer--minus.png",
"name": "auction-hammer--minus"
},
{
"url": "/images/status/auction-hammer--pencil.png",
"name": "auction-hammer--pencil"
},
{
"url": "/images/status/auction-hammer--plus.png",
"name": "auction-hammer--plus"
},
{
"url": "/images/status/auction-hammer-gavel.png",
"name": "auction-hammer-gavel"
},
{
"url": "/images/status/auction-hammer.png",
"name": "auction-hammer"
},
{
"url": "/images/status/balance--arrow.png",
"name": "balance--arrow"
},
{
"url": "/images/status/balance--exclamation.png",
"name": "balance--exclamation"
},
{
"url": "/images/status/balance--minus.png",
"name": "balance--minus"
},
{
"url": "/images/status/balance--pencil.png",
"name": "balance--pencil"
},
{
"url": "/images/status/balance--plus.png",
"name": "balance--plus"
},
{
"url": "/images/status/balance-unbalance.png",
"name": "balance-unbalance"
},
{
"url": "/images/status/balance.png",
"name": "balance"
},
{
"url": "/images/status/balloon--arrow.png",
"name": "balloon--arrow"
},
{
"url": "/images/status/balloon--exclamation.png",
"name": "balloon--exclamation"
},
{
"url": "/images/status/balloon--minus.png",
"name": "balloon--minus"
},
{
"url": "/images/status/balloon--pencil.png",
"name": "balloon--pencil"
},
{
"url": "/images/status/balloon--plus.png",
"name": "balloon--plus"
},
{
"url": "/images/status/balloon-ellipsis.png",
"name": "balloon-ellipsis"
},
{
"url": "/images/status/balloon-facebook-left.png",
"name": "balloon-facebook-left"
},
{
"url": "/images/status/balloon-facebook.png",
"name": "balloon-facebook"
},
{
"url": "/images/status/balloon-left.png",
"name": "balloon-left"
},
{
"url": "/images/status/balloon-quotation.png",
"name": "balloon-quotation"
},
{
"url": "/images/status/balloon-small-left.png",
"name": "balloon-small-left"
},
{
"url": "/images/status/balloon-small.png",
"name": "balloon-small"
},
{
"url": "/images/status/balloon-smiley.png",
"name": "balloon-smiley"
},
{
"url": "/images/status/balloon-sound.png",
"name": "balloon-sound"
},
{
"url": "/images/status/balloon-twitter-left.png",
"name": "balloon-twitter-left"
},
{
"url": "/images/status/balloon-twitter-retweet.png",
"name": "balloon-twitter-retweet"
},
{
"url": "/images/status/balloon-twitter.png",
"name": "balloon-twitter"
},
{
"url": "/images/status/balloon.png",
"name": "balloon"
},
{
"url": "/images/status/balloons-facebook.png",
"name": "balloons-facebook"
},
{
"url": "/images/status/balloons-twitter.png",
"name": "balloons-twitter"
},
{
"url": "/images/status/balloons.png",
"name": "balloons"
},
{
"url": "/images/status/bandaid--arrow.png",
"name": "bandaid--arrow"
},
{
"url": "/images/status/bandaid--exclamation.png",
"name": "bandaid--exclamation"
},
{
"url": "/images/status/bandaid--minus.png",
"name": "bandaid--minus"
},
{
"url": "/images/status/bandaid--pencil.png",
"name": "bandaid--pencil"
},
{
"url": "/images/status/bandaid--plus.png",
"name": "bandaid--plus"
},
{
"url": "/images/status/bandaid-small.png",
"name": "bandaid-small"
},
{
"url": "/images/status/bandaid.png",
"name": "bandaid"
},
{
"url": "/images/status/bank--arrow.png",
"name": "bank--arrow"
},
{
"url": "/images/status/bank--exclamation.png",
"name": "bank--exclamation"
},
{
"url": "/images/status/bank--minus.png",
"name": "bank--minus"
},
{
"url": "/images/status/bank--pencil.png",
"name": "bank--pencil"
},
{
"url": "/images/status/bank--plus.png",
"name": "bank--plus"
},
{
"url": "/images/status/bank.png",
"name": "bank"
},
{
"url": "/images/status/barcode-2d.png",
"name": "barcode-2d"
},
{
"url": "/images/status/barcode.png",
"name": "barcode"
},
{
"url": "/images/status/battery--arrow.png",
"name": "battery--arrow"
},
{
"url": "/images/status/battery--exclamation.png",
"name": "battery--exclamation"
},
{
"url": "/images/status/battery--minus.png",
"name": "battery--minus"
},
{
"url": "/images/status/battery--pencil.png",
"name": "battery--pencil"
},
{
"url": "/images/status/battery--plus.png",
"name": "battery--plus"
},
{
"url": "/images/status/battery-charge.png",
"name": "battery-charge"
},
{
"url": "/images/status/battery-empty.png",
"name": "battery-empty"
},
{
"url": "/images/status/battery-full.png",
"name": "battery-full"
},
{
"url": "/images/status/battery-low.png",
"name": "battery-low"
},
{
"url": "/images/status/battery-plug.png",
"name": "battery-plug"
},
{
"url": "/images/status/battery.png",
"name": "battery"
},
{
"url": "/images/status/beaker--arrow.png",
"name": "beaker--arrow"
},
{
"url": "/images/status/beaker--exclamation.png",
"name": "beaker--exclamation"
},
{
"url": "/images/status/beaker--minus.png",
"name": "beaker--minus"
},
{
"url": "/images/status/beaker--pencil.png",
"name": "beaker--pencil"
},
{
"url": "/images/status/beaker--plus.png",
"name": "beaker--plus"
},
{
"url": "/images/status/beaker-empty.png",
"name": "beaker-empty"
},
{
"url": "/images/status/beaker.png",
"name": "beaker"
},
{
"url": "/images/status/bean--arrow.png",
"name": "bean--arrow"
},
{
"url": "/images/status/bean--exclamation.png",
"name": "bean--exclamation"
},
{
"url": "/images/status/bean--minus.png",
"name": "bean--minus"
},
{
"url": "/images/status/bean--pencil.png",
"name": "bean--pencil"
},
{
"url": "/images/status/bean--plus.png",
"name": "bean--plus"
},
{
"url": "/images/status/bean-green.png",
"name": "bean-green"
},
{
"url": "/images/status/bean-small-green.png",
"name": "bean-small-green"
},
{
"url": "/images/status/bean-small.png",
"name": "bean-small"
},
{
"url": "/images/status/bean.png",
"name": "bean"
},
{
"url": "/images/status/beans.png",
"name": "beans"
},
{
"url": "/images/status/bell--arrow.png",
"name": "bell--arrow"
},
{
"url": "/images/status/bell--exclamation.png",
"name": "bell--exclamation"
},
{
"url": "/images/status/bell--minus.png",
"name": "bell--minus"
},
{
"url": "/images/status/bell--pencil.png",
"name": "bell--pencil"
},
{
"url": "/images/status/bell--plus.png",
"name": "bell--plus"
},
{
"url": "/images/status/bell-small.png",
"name": "bell-small"
},
{
"url": "/images/status/bell.png",
"name": "bell"
},
{
"url": "/images/status/bin--arrow.png",
"name": "bin--arrow"
},
{
"url": "/images/status/bin--exclamation.png",
"name": "bin--exclamation"
},
{
"url": "/images/status/bin--minus.png",
"name": "bin--minus"
},
{
"url": "/images/status/bin--pencil.png",
"name": "bin--pencil"
},
{
"url": "/images/status/bin--plus.png",
"name": "bin--plus"
},
{
"url": "/images/status/bin-full.png",
"name": "bin-full"
},
{
"url": "/images/status/bin-metal-full.png",
"name": "bin-metal-full"
},
{
"url": "/images/status/bin-metal.png",
"name": "bin-metal"
},
{
"url": "/images/status/bin.png",
"name": "bin"
},
{
"url": "/images/status/binocular--arrow.png",
"name": "binocular--arrow"
},
{
"url": "/images/status/binocular--exclamation.png",
"name": "binocular--exclamation"
},
{
"url": "/images/status/binocular--minus.png",
"name": "binocular--minus"
},
{
"url": "/images/status/binocular--pencil.png",
"name": "binocular--pencil"
},
{
"url": "/images/status/binocular--plus.png",
"name": "binocular--plus"
},
{
"url": "/images/status/binocular-small.png",
"name": "binocular-small"
},
{
"url": "/images/status/binocular.png",
"name": "binocular"
},
{
"url": "/images/status/block--arrow.png",
"name": "block--arrow"
},
{
"url": "/images/status/block--exclamation.png", | |
<reponame>TechLabs-Dortmund/motion-miners
import pandas as pd
import numpy as np
import json
import pickle
import seaborn as sns
import datetime
import math
from matplotlib import pyplot as plt
import constant
""""
This py document contains all side functions that are used in main.py
"""
def create_mapped_layout(layout_path):
"""
Create a completed layout has beacons, regions and flows
Parameters
----------
layout_path: str
path of layout file
Returns
-------
pandas.DataFrame
a dataframe of beacons, regions and flows
"""
with open(layout_path, "r") as layout_file:
layout = json.load(layout_file)
beacons_layout = pd.DataFrame.from_dict(layout["beacons"])
regions_layout = pd.DataFrame.from_dict(layout["regions"])
beacons_regions_layout = beacons_layout.merge(
regions_layout, left_on="region_uuid", right_on="uuid"
)
beacons_regions_layout.rename(
columns={
"id_x": "beacon_id",
"id_y": "region_id",
"name": "region_name",
"position_x": "b_pos_x",
"position_y": "b_pos_y",
"position_top_left_x": "rpos_top_left_x",
"position_top_left_y": "rpos_top_left_y",
"position_bottom_right_x": "rpos_bottom_right_x",
"position_bottom_right_y": "rpos_bottom_right_y",
},
inplace=True,
)
beacons_regions_layout["region_beacon"] = (
beacons_regions_layout["region_id"].astype(str)
+ "_"
+ beacons_regions_layout["beacon_id"].astype(str)
)
region_flow_df = pd.DataFrame.from_dict(data=constant.region_flow_dict)
exploded_region_flow_df = region_flow_df.explode("region_id")
final_layout = beacons_regions_layout.merge(
exploded_region_flow_df, left_on="region_id", right_on="region_id"
)
final_layout["flow_beacon"] = (
final_layout["flow_id"].astype(str)
+ "_"
+ final_layout["beacon_id"].astype(str)
)
return final_layout
def get_flow_of_beacon(layout):
"""
Determine the flow id which beacon belongs to
Parameters
----------
layout: pandas.DataFrame
the layout has mapped beacons, regions and flows
Returns
-------
pandas.DataFrame
a dataframe of beacons and its corresponding flow id
"""
beacons_with_flow = layout.loc[
:, ["beacon_id", "flow_id"]
] # only the beacon and flow region ids are relevant here
beacons_with_flow.sort_values(by=["flow_id", "beacon_id"], inplace=True)
beacons_with_flow = beacons_with_flow[
["flow_id", "beacon_id"]
] # change order of the columns
return beacons_with_flow
def signal_to_m_converter(dataframe, dbm="4(dBm)"):
"""
This function convert a (beacon)dataframe with signal values from the tracer
to the corresponding *m*eter values, depend on dBm power that was used.
By default dbm = 4(dBm)
"""
# extract all different values from dataframe
dataframe_unique_values = np.unique(dataframe)
df_txpower = pd.DataFrame(constant.txpower_vs_distance)
# extract the used power values from table_"Beacon_datasheet"
choose_power = df_txpower[dbm]
# caculate the lenght from powerlevel used for later iteration
lenght_power = (df_txpower[dbm]).count()
# empty list for collecting the corresponding meter values for each signal value
list_meter_values = []
flag = True
# loop over unique_values over dataframe
for value in dataframe_unique_values:
# interpolation function
for i in range(0, lenght_power):
if choose_power[i] >= value and value >= choose_power[i + 1]:
meter_value = (
(df_txpower["Distance(m)"][i + 1] - df_txpower["Distance(m)"][i])
/ (choose_power[i + 1] - choose_power[i])
* (value - choose_power[i])
+ df_txpower["Distance(m)"][i]
)
list_meter_values.append(meter_value)
if flag:
print("\nDistance i+1", df_txpower["Distance(m)"][i + 1])
print("\nDistance i", df_txpower["Distance(m)"][i])
print("\nchoose_power i+1", choose_power[i + 1])
print("\nchoose_power i", choose_power[i])
print("\nvalue", value)
print("\ndf_txpower[distance][i]", df_txpower["Distance(m)"][i])
flag = False
break
else:
meter_value = np.nan
list_meter_values.append(meter_value)
mod_dataframe = dataframe.replace(list(dataframe_unique_values), list_meter_values)
return mod_dataframe
def extract_rssi_to_df(tracer_data_path):
"""
Takes path from pickle file and create a df with a timeline and the rssi_arr values
Parameters
----------
tracer_data_path: str
path to tracer data file
Returns
-------
pandas.DataFrame
tracer data
"""
# create a dataframe df from the picke.file
df_file = pickle.load(open(tracer_data_path, "rb"))
# extract rssi_arr key to numpy.array
rssi_arr = df_file["rssi_arr"]
# extract the time stamp
timestamp = df_file["timestamp"]
# set -inf equal to 0 (datacleaning)
rssi_arr[rssi_arr == -np.Inf] = np.nan
# figure out the shape from rssi_arr array for the timeline
dim = np.shape(rssi_arr)
# create a timeline and add it to the rssi_arr array
# problem that it changes from 0091 file to others why?
timeline = np.linspace(0.1, (dim[0]) * 0.1, dim[0]).reshape(dim[0], 1)
mod_rssi_arr = np.append(rssi_arr, timeline, axis=1)
# create the colum_names for df_rssi_arr (dataframe)
# Beacon 252 does not exist
# colum_names = []
# for i in range(1, dim[1] + 2):
# if i == 52:
# continue
# else:
# value = "Beacon_" + str(200 + i)
# colum_names.append(value)
# colum_names.append("timeline[s]")
# beacon id as column
colum_names = df_file["beacon_uuids"]
colum_names = np.append(colum_names, "timeline[s]")
# create df_rssi_arr
df_rssi_arr = pd.DataFrame(data=mod_rssi_arr, columns=colum_names)
df = df_rssi_arr.set_index("timeline[s]")
return df, timestamp
def add_flow_as_multi_index(tracer_df, beacon_flow):
"""
Add flow id as second level column to tracer data
Parameters
----------
tracer_df: pandas.DataFrame
tracer data
beacon_flow: pandas.DataFrame
the map of beacon and flow
Returns
-------
pandas.DataFrame
tracer data with multi column (multi index)
"""
tracer_df.columns = tracer_df.columns.map(int)
# list beacons that were not used
not_used_beacons = []
other_not_used_beacons = []
for beacon in tracer_df.columns.values:
if beacon not in list(beacon_flow["beacon_id"]):
not_used_beacons.append(beacon)
for index, row_beacon in beacon_flow.iterrows():
if row_beacon["beacon_id"] not in list(tracer_df.columns.values):
other_not_used_beacons.append(row_beacon["beacon_id"])
# delete beacon_columns that were not used (in both dfs)
new_tracer_df = tracer_df.drop(not_used_beacons, axis=1)
beacon_flow = beacon_flow.drop(
beacon_flow[beacon_flow.beacon_id.isin(other_not_used_beacons)].index, axis=0
)
# get tuples of flow + beacon in order to use pd.MultiIndex.from_tuples
multi_col_flow_tuple = list(beacon_flow.to_records(index=False))
multi_col_flow_tuple.sort(key=lambda tup: tup[1]) # sort the tuple by beacon ids
# add multicolumn index
new_tracer_df.columns = pd.MultiIndex.from_tuples(
multi_col_flow_tuple, names=("Flow", "Beacon")
)
return new_tracer_df[np.sort(new_tracer_df.columns)]
def get_max_signal_values(tracer_df):
max_df = pd.DataFrame(
data=list(tracer_df.max(axis=1)),
index=range(len(tracer_df)),
columns=["max_signal"],
) # max values of each row in the original df
max_df["time"] = tracer_df.index
max_df["region_beacon"] = list(
tracer_df.idxmax(axis=1)
) # corresponding beacon id and region number of the max value
# max_signal = max(max_df["max_signal"]) - 30
# mean_signal = np.mean(max_df["max_signal"]) + 10
location = []
for row in max_df.itertuples():
current_high = row[3]
if len(location) == 0:
location.append(0)
elif row[1] >= -65:
location.append(row[3][0])
else:
if location[-1] == 1:
location.append(2)
elif location[-1] == 3:
location.append(4)
elif location[-1] == 6:
location.append(7)
elif location[-1] == 8:
location.append(9)
else:
location.append(location[-1])
max_df["location_of_tracer"] = location
max_df = max_df[["time", "max_signal", "region_beacon", "location_of_tracer"]]
return max_df
# def get_min_distance_values(tracer_df):
# min_df = pd.DataFrame(
# data=list(tracer_df.min(axis=1)),
# index=range(len(tracer_df)),
# columns=["min_distance"],
# ) # min values of each row in the original df
# min_df["time"] = tracer_df.index
# # corresponding beacon id and region number of the max value
# min_df["region_beacon"] = list(tracer_df.idxmin(axis=1))
# # max_df['location_of_tracer'] = 0 #zero as default
# location = []
# for row in min_df.itertuples():
# # if the maximum value is under -65 (adjust the value?) then the tracer is located in the respoing region
# if row[1] < -65:
# location.append(row[3][0])
# # otherwise the tracer can be still allocated to the previous region (the region where it has been located before)
# else:
# location.append(location[-1])
# min_df["location_of_tracer"] = location
# min_df = min_df[["time", "min_distance", "region_beacon", "location_of_tracer"]]
# min_df["location_number"] = min_df["location_of_tracer"].replace(
# constant.regions, range(0, 10)
# )
# return min_df
def order_list(df_location):
order_reg = []
order_reg_index = []
for index, value in df_location.iteritems():
if index == 0:
order_reg.append(value)
order_reg_index.append([value, index])
elif value != order_reg_index[-1][0]:
order_reg.append(value)
order_reg_index.append([value, index])
return order_reg, order_reg_index
def make_person_list(order_of_df, o_plus_index):
newlist = []
innerlist = []
for index, value in enumerate(order_of_df):
if index == 0 and value == 0:
next
elif value != 0:
innerlist.append(value)
else:
newlist.append(value)
newlist.append(innerlist)
innerlist = []
newlist_tup = []
innerlist_tup = []
for index, values in enumerate(o_plus_index):
if index == 0 and values[0] == 0:
next
elif values[0] != 0:
innerlist_tup.append((values[0], values[1]))
else:
newlist_tup.append((values[0], values[1]))
newlist_tup.append(innerlist)
innerlist_tup = []
return newlist, newlist_tup
def plot_beacon(beacon, dataframe):
"""
This plot function plots the values from a beacon over the time.
Input beacon as a string format -> beacon= "Beacon_2xx" .
"""
sns.relplot(x="timeline[s]", y=beacon, data=dataframe, kind="scatter")
plt.title(beacon)
plt.xlabel("Time[s]")
plt.ylabel("Distance person between Beacon[m]")
plt.show()
def plot_region(region_as_number, dataframe):
"""
This plot function plots the values from beacons in a section area as a scatter plot.
Input section_as_number as a integer.
"""
# extract index time and add it as a column
df_mod_without_timelineindex = dataframe.reset_index()
# from above (beginning py file) it takes the name of the section
select_region = constant.regions[region_as_number]
# from above (beginning py file) it takes the name of the beacons
beacons_in_region = constant.beacons_each_region[select_region]
# loop over the beacons in the section
for beacon in beacons_in_region:
plt.scatter(
df_mod_without_timelineindex["timeline[s]"],
df_mod_without_timelineindex[beacon],
alpha=0.7,
label=beacon,
)
plt.title(select_region)
plt.xlabel("Time[s]")
plt.ylabel("Distance person between Beacon[m]")
plt.legend()
plt.show()
def beacons_list_from_region(region_as_number):
"""
This fuction gives you the associated beacons as a list in the section you want.
"""
# from above (beginning py file) it takes the name of the section
select_region = constant.regions[region_as_number]
# from above (beginning py file) it takes the name of the beacons
beacons_in_region = constant.beacons_each_region[select_region]
return beacons_in_region
def number_to_region(region_number):
region_name = constant.regions[region_number]
return region_name
def region_to_number(value):
region_name = int(constant.regions[value].index)
return region_name
def time_analyse(max_signal_df, timestamp):
##Step 1)
# sclicing max_signal_df into possible persons by separate zeros
flag = True
slicing_index = []
for index, row in max_signal_df.iterrows():
if flag == True:
if row["location_of_tracer"] == 0:
slicing_index.append(index)
flag = False
else:
if row["location_of_tracer"] != 0:
flag = True
##Step 2)
# set a subdataframe for every possible person and filter the | |
search_terms_s, series_refs)
if series_form_result.equals("CANCEL") or self.__cancelled_b:
self.__cancelled_b = True
return BookStatus("SKIPPED") # user says 'cancel'
elif series_form_result.equals("SKIP"):
return BookStatus("SKIPPED") # user says 'skip this book'
elif series_form_result.equals("PERMSKIP"):
book.skip_forever()
return BookStatus("SKIPPED") # user says 'skip book always'
elif series_form_result.equals("SEARCH"):
return BookStatus("UNSCRAPED") # user says 'search again'
elif series_form_result.equals("SHOW") or \
series_form_result.equals("OK"): # user says 'ok'
scraped_series = ScrapedSeries( series_form_result.get_ref() )
# user has chosen a series, so ignore config.confirm_issue_b
# and only force the issue dialog if she clicked 'show'
force_issue_dialog_b = series_form_result.equals("SHOW")
scrape_cache[key] = scraped_series
# 4. at this point, the 'correct' series for the book is now in the
# series cache. now we try to pick the matching issue in that
# series. do so automatically if possible, or show the user the
# issue dialog if necessary (or requesting in config). METHOD EXIT:
# if the user sees the issue dialog, she may skip, cancel the
# whole scrape operation, go back to the series dialog, or
# actually scrape an issue.
scraped_series = scrape_cache[key]
# 5. now that we know the right series for this book, try to find
# the right issue, either automatically, or by showing the user
# the "issues dialog". METHOD EXIT: if we're scraping automatically
# we MUST be able to find the issue num automatically too, or else
# we delay the book til later. if we're manual, we may still be
# able to find the issue automatically, but if not we show the user
# the query dialog, and she may skip, cancel the whole scrape, go
# back to the series dialog, or actually choose an issue.
log.debug("searching for the right issue in '",
scraped_series.series_ref, "'")
issue_ref = None
if autoscrape_b:
# 5a. autoscrape means we MUST find the issue automatically...
series_ref = scraped_series.series_ref
if book.issue_num_s == "":
if series_ref.issue_count_n <=1:
refs = self.__query_issue_refs(series_ref)
if len(refs) == 1: issue_ref = list(refs)[0]
else:
issue_ref = db.query_issue_ref( series_ref, book.issue_num_s )
if issue_ref == None:
log.debug("couldn't find issue number. leaving until the end.")
del scrape_cache[key] # this was probably the wrong series, too
return BookStatus("DELAYED")
else:
log.debug(" ...identified issue number ", book.issue_num_s )
else:
# 5b. ...otherwise, try to find the issue interactively
issue_form_result = self.__choose_issue_ref( book,
scraped_series.series_ref, scraped_series.issue_refs,
force_issue_dialog_b)
if issue_form_result.equals("CANCEL") or self.__cancelled_b:
self.__cancelled_b = True
return BookStatus("SKIPPED")
elif issue_form_result.equals("SKIP") or \
issue_form_result.equals("PERMSKIP"):
if force_issue_dialog_b and not self.config.confirm_issue_b:
# the user clicked 'show issues', then 'skip', so we have to
# ignore his previous series selection.
del scrape_cache[key]
if issue_form_result.equals("PERMSKIP"):
book.skip_forever()
return BookStatus("SKIPPED")
elif issue_form_result.equals("BACK"):
# ignore user's previous series selection
del scrape_cache[key]
else:
issue_ref = issue_form_result.get_ref() # not None!
if issue_ref != None:
# we've found the right issue! copy it's data into the book.
log.debug("querying comicvine for issue details...")
issue = db.query_issue( issue_ref, self.config.update_rating_b )
book.update(issue)
# record the users choice. this allows the SeriesForm to give this
# choice a higher priority (sort order) in the future
self.__matchscore.record_choice(scraped_series.series_ref)
return BookStatus("SCRAPED")
raise Exception("should never get here")
# ==========================================================================
def __sort_books(self, books):
'''
Examines the given list of ComicBook objects, and returns a new list
that contains the same comics, but sorted in order of increasing series
name, and where the series names are the same, in order of increasing
issue number. Comics for which an IssueRef can be instantly generated
(comics that have been scraped before) will automatically be sorted to
the beginning of the list.
'''
# this is the comparator we'll use for sorting this list
def __compare_books(book1, book2):
result = book1.unique_series_s.CompareTo(book2.unique_series_s)
if result == 0:
num1 = '' if not book1.issue_num_s else book1.issue_num_s
num2 = '' if not book2.issue_num_s else book2.issue_num_s
def pad(num):
try:
f = float(num.lower().strip('abcdefgh'))
if f < 10: return "000" + num
elif f < 100: return "00" + num
elif f < 1000: return "0" + num
else: return num
except:
return num
result = pad(num1).CompareTo(pad(num2))
return result
# divide the books up into the ones that will scrape quickly ('cause they
# are rescrapes) and ones that have never been scraped before. sort each
# group separately, and append the sorted lists together so the fast ones
# will come first. (the idea is to save the user interaction until
# the end of the scrape operation. see issue 161.)
slow_scrape_books = []
fast_scrape_books = []
if self.config.fast_rescrape_b:
for book in books:
if book.skip_b or book.issue_ref:
fast_scrape_books.append(book)
else:
slow_scrape_books.append(book)
else:
slow_scrape_books = list(books)
slow_scrape_books.sort(cmp=__compare_books)
fast_scrape_books.sort(cmp=__compare_books)
return fast_scrape_books+slow_scrape_books
# ==========================================================================
def __choose_series_ref(self, book, search_terms_s, series_refs):
'''
This method displays the SeriesForm, a dialog that shows all of the
SeriesRefs from a database query and asks the user to choose one.
'book' -> the book that we are currently scraping
'search_terms_s' -> the search terms we used to find the SeriesRefs
'series_refs' -> a set of SeriesRefs; the results of the search
This method returns a SeriesFormResult object (from the SeriesForm).
'''
result = SeriesFormResult("SEARCH") # default
if series_refs:
log.debug('displaying the series selection dialog...')
with SeriesForm(self, book, series_refs, search_terms_s) as sform:
result = sform.show_form()
log.debug(' ...user chose to ', result.get_debug_string())
return result
# ==========================================================================
def __choose_issue_ref(self, book, series_ref, issue_refs, force_b):
'''
This method chooses the IssueRef that matches the given book from among
the given set of IssueRefs. It may do this automatically if it can, or
it may display the IssueForm, a dialog that displays the IssueRefs and
asks the user to choose one.
'book' -> the book that we are currently scraping
'series_ref_s' -> the SeriesRef for the given set of issue refs
'issue_refs' -> a set of IssueRefs; if empty, it MAY be filled with
the issue refs for the given series ref, if non-empty, this is the
list of IssueRefs we'll be choosing from.
'force_b' -> whether we should force the IssueForm to be shown, or
only show it when we have no choice.
This method returns a IssueFormResult object (from the IssueForm).
'''
result = None; # the return value; must start out null
series_name_s = series_ref.series_name_s
issue_num_s = '' if not book.issue_num_s else book.issue_num_s
if issue_refs == None: raise "issue_refs must be a set we can populate"
# 1. are our issue refs empty? if so, and we're not forced to display
# the IssueForm, then try the shortcut way to find the right issue ref.
# if that fails, get all the issue refs for this series (so we can
# search for the issue the long way.)
if len(issue_refs) == 0 and issue_num_s and not force_b:
issue_ref = db.query_issue_ref(series_ref, book.issue_num_s)
if issue_ref:
result = IssueFormResult("OK", issue_ref) # found it!
log.debug(" ...identified issue number ", issue_num_s )
# 2. if we don't have our issue_refs yet, and we're going to be
# displaying the issue dialog, then get the issue_refs
if len(issue_refs) == 0 and (not result or force_b):
for ref in self.__query_issue_refs(series_ref):
issue_refs.add(ref) # do NOT make a new set here!
if self.__cancelled_b:
result = IssueFormResult("CANCEL")
elif len(issue_refs) == 0:
MessageBox.Show(self.comicrack.MainWindow,
i18n.get("NoIssuesAvailableText").format(series_name_s),
i18n.get("NoIssuesAvailableTitle"), MessageBoxButtons.OK,
MessageBoxIcon.Warning)
result = IssueFormResult("BACK")
log.debug(" ...no issues in this series; user must go back")
# 3. try to find the issue number directly in the given issue_refs.
if not result and len(issue_refs) > 0 and issue_num_s:
counts = {}
for ref in issue_refs:
counts[ref.issue_num_s] = counts.get(ref.issue_num_s, 0) + 1
if issue_num_s in counts and counts[issue_num_s] > 1:
# the same issue number appears more than once! user must pick.
log.debug(" ...found more than one issue number ", issue_num_s, )
else:
for ref in issue_refs:
# use natural | |
# This module is the interface for creating images and video from text prompts
# This should also serve as examples of how you can use the Engine class to create images and video using your own creativity.
# Feel free to extract the contents of these methods and use them to build your own sequences.
# Change the image prompt weights over time
# Change the interval at which video frames are exported over time, to create the effect of speeding or slowing video
# Change the engine learning rate to increase or decrease the amount of change for each frame
# Create style transfer videos where each frame uses many image prompts, or many previous frames as image prompts.
# Create a zoom video where the shift_x and shift_x are functions of iteration to create spiraling zooms
# It's art. Go nuts!
from vqgan_clip.engine import Engine, VQGAN_CLIP_Config
from vqgan_clip.z_smoother import Z_Smoother
from tqdm.auto import tqdm
import os
import contextlib
import torch
import warnings
from PIL import ImageFile, Image, ImageChops, PngImagePlugin
ImageFile.LOAD_TRUNCATED_IMAGES = True
from torchvision.transforms import functional as TF
from vqgan_clip import _functional as VF
import cv2
from os import listdir
from os.path import isfile, join
import numpy as np
import shutil
import glob
from distutils.dir_util import copy_tree
import numpy as np
from PIL import Image
import sys
import math
from patch_based.train import style_transfer as st
def image(output_filename,
eng_config = VQGAN_CLIP_Config(),
text_prompts = [],
image_prompts = [],
noise_prompts = [],
init_image = None,
init_weight = 0.0,
iterations = 100,
save_every = None,
verbose = False,
leave_progress_bar = True):
"""Generate a single image using VQGAN+CLIP. The configuration of the algorithms is done via a VQGAN_CLIP_Config instance.
Args:
* output_filename (str) : location to save the output image. Omit the file extension.
* eng_config (VQGAN_CLIP_Config, optional): An instance of VQGAN_CLIP_Config with attributes customized for your use. See the documentation for VQGAN_CLIP_Config().
* text_prompts (str, optional) : Text that will be turned into a prompt via CLIP. Default = []
* image_prompts (str, optional) : Path to image that will be turned into a prompt via CLIP (analyzed for content). Default = []
* noise_prompts (str, optional) : Random number seeds can be used as prompts using the same format as a text prompt. E.g. \'123:0.1|234:0.2|345:0.3\' Stories (^) are supported. Default = []
* init_image (str, optional) : Path to an image file that will be used as the seed to generate output (analyzed for pixels).
* init_weight (float, optional) : Relative weight to assign to keeping the init_image content.
* iterations (int, optional) : Number of iterations of train() to perform before stopping. Default = 100
* save_every (int, optional) : An interim image will be saved as the final image is being generated. It's saved to the output location every save_every iterations, and training stats will be displayed. Default = None
* verbose (boolean, optional) : When true, prints diagnostic data every time a video frame is saved. Defaults to False.
* leave_progress_bar (boolean, optional) : When False, the tqdm progress bar will disappear when the work is completed. Useful for nested loops.
"""
if text_prompts not in [[], None] and not isinstance(text_prompts, str):
raise ValueError('text_prompts must be a string')
if image_prompts not in [[], None] and not isinstance(image_prompts, str):
raise ValueError('image_prompts must be a string')
if noise_prompts not in [[], None] and not isinstance(noise_prompts, str):
raise ValueError('noise_prompts must be a string')
if init_image not in [[], None] and not os.path.isfile(init_image):
raise ValueError(f'init_image does not exist.')
if save_every not in [[], None] and not isinstance(save_every, int):
raise ValueError(f'save_every must be an int.')
if text_prompts in [[], None] and image_prompts in [[], None] and noise_prompts in [[], None]:
raise ValueError('No valid prompts were provided')
# output_filename = _filename_to_jpg(output_filename)
output_folder_name = os.path.dirname(output_filename)
if output_folder_name:
os.makedirs(output_folder_name, exist_ok=True)
if init_image:
eng_config.init_image = init_image
output_size_X, output_size_Y = VF.filesize_matching_aspect_ratio(init_image, eng_config.output_image_size[0], eng_config.output_image_size[1])
eng_config.output_image_size = [output_size_X, output_size_Y]
eng_config.init_weight = init_weight
# suppress stdout to keep the progress bar clear
with open(os.devnull, 'w') as devnull:
with contextlib.redirect_stdout(devnull):
eng = Engine(eng_config)
eng.initialize_VQGAN_CLIP()
parsed_text_prompts, parsed_image_prompts, parsed_noise_prompts = VF.parse_all_prompts(text_prompts, image_prompts, noise_prompts)
eng.encode_and_append_prompts(0, parsed_text_prompts, parsed_image_prompts, parsed_noise_prompts)
eng.configure_optimizer()
# metadata to save to jpge file as data chunks
img_info = [('text_prompts',text_prompts),
('image_prompts',image_prompts),
('noise_prompts',noise_prompts),
('iterations',iterations),
('init_image',init_image),
('save_every',save_every),
('cut_method',eng_config.cut_method),
('seed',eng.conf.seed)]
# generate the image
try:
for iteration_num in tqdm(range(1,iterations+1),unit='iteration',desc='single image',leave=leave_progress_bar):
#perform iterations of train()
lossAll = eng.train(iteration_num)
if save_every and iteration_num % save_every == 0:
if verbose:
# display some statistics about how the GAN training is going whever we save an interim image
losses_str = ', '.join(f'{loss.item():7.3f}' for loss in lossAll)
tqdm.write(f'iteration:{iteration_num:6d}\tloss sum: {sum(lossAll).item():7.3f}\tloss for each prompt:{losses_str}')
# save an interim copy of the image so you can look at it as it changes if you like
eng.save_current_output(output_filename,img_info)
# Always save the output at the end
eng.save_current_output(output_filename,img_info)
except KeyboardInterrupt:
pass
config_info=f'iterations: {iterations}, '\
f'image_prompts: {image_prompts}, '\
f'noise_prompts: {noise_prompts}, '\
f'init_weight_method: {",".join(eng_config.init_image_method)}, '\
f'init_weight {",".join(str(eng_config.init_weight))}, '\
f'init_image {init_image}, '\
f'cut_method {eng_config.cut_method}, '\
f'seed {eng.conf.seed}'
return config_info
def style_transfer(video_frames,
eng_config=VQGAN_CLIP_Config(),
text_prompts = 'Covered in spiders | Surreal:0.5',
image_prompts = [],
noise_prompts = [],
iterations_per_frame = 15,
iterations_for_first_frame = 15,
current_source_frame_image_weight = 2.0,
change_prompts_on_frame = None,
generated_video_frames_path='./video_frames',
current_source_frame_prompt_weight=0.0,
z_smoother=False,
z_smoother_buffer_len=3,
z_smoother_alpha=0.7,
verbose=False,
leave_progress_bar = True,
output_extension='jpg'):
"""Apply a style to existing video frames using VQGAN+CLIP.
Set values of iteration_per_frame to determine how much the style transfer effect will be.
Set values of source_frame_weight to determine how closely the result will match the source image. Balance iteration_per_frame and source_frame_weight to influence output.
Set z_smoother to True to apply some latent-vector-based motion smoothing that will increase frame-to-frame consistency further at the cost of adding some motion blur.
Set current_source_frame_prompt_weight >0 to have the generated content CLIP-match the source image.
Args:
* video_frames (list of str) : List of paths to the video frames that will be restyled.
* eng_config (VQGAN_CLIP_Config, optional): An instance of VQGAN_CLIP_Config with attributes customized for your use. See the documentation for VQGAN_CLIP_Config().
* text_prompts (str, optional) : Text that will be turned into a prompt via CLIP. Default = []
* image_prompts (str, optional) : Path to image that will be turned into a prompt via CLIP. Default = []
* noise_prompts (str, optional) : Random number seeds can be used as prompts using the same format as a text prompt. E.g. \'123:0.1|234:0.2|345:0.3\' Stories (^) are supported. Default = []
* change_prompts_on_frame (list(int)) : All prompts (separated by "^" will be cycled forward on the video frames provided here. Defaults to None.
* iterations_per_frame (int, optional) : Number of iterations of train() to perform for each frame of video. Default = 15
* iterations_for_first_frame (int, optional) : Number of additional iterations of train() to perform on the first frame so that the image is not a gray/random field. Default = 30
* generated_video_frames_path (str, optional) : Path where still images should be saved as they are generated before being combined into a video. Defaults to './video_frames'.
* current_source_frame_image_weight (float) : Assigns a loss weight to make the output image look like the source image itself. Default = 0.0
* current_source_frame_prompt_weight (float) : Assigns a loss weight to make the output image look like the CLIP representation of the source image. Default = 0.0
* z_smoother (boolean, optional) : If true, smooth the latent vectors (z) used for image generation by combining multiple z vectors through an exponentially weighted moving average (EWMA). Defaults to False.
* z_smoother_buffer_len (int, optional) : How many images' latent vectors should be combined in the smoothing algorithm. Bigger numbers will be smoother, and have more blurred motion. Must be an odd number. Defaults to 3.
* z_smoother_alpha (float, optional) : When combining multiple latent vectors for smoothing, this sets how important the "keyframe" z is. As frames move further from the keyframe, their weight drops by (1-z_smoother_alpha) each frame. Bigger numbers apply more smoothing. Defaults to 0.6.
* leave_progress_bar (boolean, optional) : When False, the tqdm progress bar will disappear when the work | |
<reponame>tdeboer-ilmn/hail
COPY_TEST_SPECS = [{'dest_basename': None,
'dest_trailing_slash': True,
'dest_type': 'file',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': None,
'dest_trailing_slash': False,
'dest_type': 'file',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': None,
'dest_trailing_slash': True,
'dest_type': 'file',
'result': {'files': {'/a': 'src/a', '/keep': ''}},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': None,
'dest_trailing_slash': False,
'dest_type': 'file',
'result': {'files': {'/a': 'src/a', '/keep': ''}},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': None,
'dest_trailing_slash': True,
'dest_type': 'file',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_is_target'},
{'dest_basename': None,
'dest_trailing_slash': False,
'dest_type': 'file',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_is_target'},
{'dest_basename': None,
'dest_trailing_slash': True,
'dest_type': 'file',
'result': {'exception': 'IsADirectoryError'},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_is_target'},
{'dest_basename': None,
'dest_trailing_slash': False,
'dest_type': 'file',
'result': {'exception': 'IsADirectoryError'},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_is_target'},
{'dest_basename': None,
'dest_trailing_slash': True,
'dest_type': 'file',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'infer_dest'},
{'dest_basename': None,
'dest_trailing_slash': False,
'dest_type': 'file',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'infer_dest'},
{'dest_basename': None,
'dest_trailing_slash': True,
'dest_type': 'file',
'result': {'files': {'/a': 'src/a', '/keep': ''}},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'infer_dest'},
{'dest_basename': None,
'dest_trailing_slash': False,
'dest_type': 'file',
'result': {'files': {'/a': 'src/a', '/keep': ''}},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'infer_dest'},
{'dest_basename': 'a',
'dest_trailing_slash': True,
'dest_type': 'file',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': 'a',
'dest_trailing_slash': False,
'dest_type': 'file',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': 'a',
'dest_trailing_slash': True,
'dest_type': 'file',
'result': {'exception': 'NotADirectoryError'},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': 'a',
'dest_trailing_slash': False,
'dest_type': 'file',
'result': {'exception': 'NotADirectoryError'},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': 'a',
'dest_trailing_slash': True,
'dest_type': 'file',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_is_target'},
{'dest_basename': 'a',
'dest_trailing_slash': False,
'dest_type': 'file',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_is_target'},
{'dest_basename': 'a',
'dest_trailing_slash': True,
'dest_type': 'file',
'result': {'exception': 'IsADirectoryError'},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_is_target'},
{'dest_basename': 'a',
'dest_trailing_slash': False,
'dest_type': 'file',
'result': {'files': {'/a': 'src/a', '/keep': ''}},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_is_target'},
{'dest_basename': 'a',
'dest_trailing_slash': True,
'dest_type': 'file',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'infer_dest'},
{'dest_basename': 'a',
'dest_trailing_slash': False,
'dest_type': 'file',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'infer_dest'},
{'dest_basename': 'a',
'dest_trailing_slash': True,
'dest_type': 'file',
'result': {'exception': 'NotADirectoryError'},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'infer_dest'},
{'dest_basename': 'a',
'dest_trailing_slash': False,
'dest_type': 'file',
'result': {'files': {'/a': 'src/a', '/keep': ''}},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'infer_dest'},
{'dest_basename': 'x',
'dest_trailing_slash': True,
'dest_type': 'file',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': 'x',
'dest_trailing_slash': False,
'dest_type': 'file',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': 'x',
'dest_trailing_slash': True,
'dest_type': 'file',
'result': {'files': {'/a': 'dest/a', '/keep': '', '/x/a': 'src/a'}},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': 'x',
'dest_trailing_slash': False,
'dest_type': 'file',
'result': {'files': {'/a': 'dest/a', '/keep': '', '/x/a': 'src/a'}},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': 'x',
'dest_trailing_slash': True,
'dest_type': 'file',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_is_target'},
{'dest_basename': 'x',
'dest_trailing_slash': False,
'dest_type': 'file',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_is_target'},
{'dest_basename': 'x',
'dest_trailing_slash': True,
'dest_type': 'file',
'result': {'exception': 'IsADirectoryError'},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_is_target'},
{'dest_basename': 'x',
'dest_trailing_slash': False,
'dest_type': 'file',
'result': {'files': {'/a': 'dest/a', '/keep': '', '/x': 'src/a'}},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_is_target'},
{'dest_basename': 'x',
'dest_trailing_slash': True,
'dest_type': 'file',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'infer_dest'},
{'dest_basename': 'x',
'dest_trailing_slash': False,
'dest_type': 'file',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'infer_dest'},
{'dest_basename': 'x',
'dest_trailing_slash': True,
'dest_type': 'file',
'result': {'files': {'/a': 'dest/a', '/keep': '', '/x/a': 'src/a'}},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'infer_dest'},
{'dest_basename': 'x',
'dest_trailing_slash': False,
'dest_type': 'file',
'result': {'files': {'/a': 'dest/a', '/keep': '', '/x': 'src/a'}},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'infer_dest'},
{'dest_basename': None,
'dest_trailing_slash': True,
'dest_type': 'dir',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': None,
'dest_trailing_slash': False,
'dest_type': 'dir',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': None,
'dest_trailing_slash': True,
'dest_type': 'dir',
'result': {'exception': 'IsADirectoryError'},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': None,
'dest_trailing_slash': False,
'dest_type': 'dir',
'result': {'exception': 'IsADirectoryError'},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': None,
'dest_trailing_slash': True,
'dest_type': 'dir',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_is_target'},
{'dest_basename': None,
'dest_trailing_slash': False,
'dest_type': 'dir',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_is_target'},
{'dest_basename': None,
'dest_trailing_slash': True,
'dest_type': 'dir',
'result': {'exception': 'IsADirectoryError'},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_is_target'},
{'dest_basename': None,
'dest_trailing_slash': False,
'dest_type': 'dir',
'result': {'exception': 'IsADirectoryError'},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_is_target'},
{'dest_basename': None,
'dest_trailing_slash': True,
'dest_type': 'dir',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'infer_dest'},
{'dest_basename': None,
'dest_trailing_slash': False,
'dest_type': 'dir',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'infer_dest'},
{'dest_basename': None,
'dest_trailing_slash': True,
'dest_type': 'dir',
'result': {'exception': 'IsADirectoryError'},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'infer_dest'},
{'dest_basename': None,
'dest_trailing_slash': False,
'dest_type': 'dir',
'result': {'exception': 'IsADirectoryError'},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'infer_dest'},
{'dest_basename': 'a',
'dest_trailing_slash': True,
'dest_type': 'dir',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': 'a',
'dest_trailing_slash': False,
'dest_type': 'dir',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': 'a',
'dest_trailing_slash': True,
'dest_type': 'dir',
'result': {'files': {'/a/a': 'src/a',
'/a/file3': 'dest/a/file3',
'/a/subdir/file2': 'dest/a/subdir/file2',
'/keep': ''}},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': 'a',
'dest_trailing_slash': False,
'dest_type': 'dir',
'result': {'files': {'/a/a': 'src/a',
'/a/file3': 'dest/a/file3',
'/a/subdir/file2': 'dest/a/subdir/file2',
'/keep': ''}},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': 'a',
'dest_trailing_slash': True,
'dest_type': 'dir',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_is_target'},
{'dest_basename': 'a',
'dest_trailing_slash': False,
'dest_type': 'dir',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_is_target'},
{'dest_basename': 'a',
'dest_trailing_slash': True,
'dest_type': 'dir',
'result': {'exception': 'IsADirectoryError'},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_is_target'},
{'dest_basename': 'a',
'dest_trailing_slash': False,
'dest_type': 'dir',
'result': {'exception': 'IsADirectoryError'},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_is_target'},
{'dest_basename': 'a',
'dest_trailing_slash': True,
'dest_type': 'dir',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'infer_dest'},
{'dest_basename': 'a',
'dest_trailing_slash': False,
'dest_type': 'dir',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'infer_dest'},
{'dest_basename': 'a',
'dest_trailing_slash': True,
'dest_type': 'dir',
'result': {'files': {'/a/a': 'src/a',
'/a/file3': 'dest/a/file3',
'/a/subdir/file2': 'dest/a/subdir/file2',
'/keep': ''}},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'infer_dest'},
{'dest_basename': 'a',
'dest_trailing_slash': False,
'dest_type': 'dir',
'result': {'files': {'/a/a': 'src/a',
'/a/file3': 'dest/a/file3',
'/a/subdir/file2': 'dest/a/subdir/file2',
'/keep': ''}},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'infer_dest'},
{'dest_basename': 'x',
'dest_trailing_slash': True,
'dest_type': 'dir',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': 'x',
'dest_trailing_slash': False,
'dest_type': 'dir',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': 'x',
'dest_trailing_slash': True,
'dest_type': 'dir',
'result': {'files': {'/a/file3': 'dest/a/file3',
'/a/subdir/file2': 'dest/a/subdir/file2',
'/keep': '',
'/x/a': 'src/a'}},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': 'x',
'dest_trailing_slash': False,
'dest_type': 'dir',
'result': {'files': {'/a/file3': 'dest/a/file3',
'/a/subdir/file2': 'dest/a/subdir/file2',
'/keep': '',
'/x/a': 'src/a'}},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': 'x',
'dest_trailing_slash': True,
'dest_type': 'dir',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_is_target'},
{'dest_basename': 'x',
'dest_trailing_slash': False,
'dest_type': 'dir',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_is_target'},
{'dest_basename': 'x',
'dest_trailing_slash': True,
'dest_type': 'dir',
'result': {'exception': 'IsADirectoryError'},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_is_target'},
{'dest_basename': 'x',
'dest_trailing_slash': False,
'dest_type': 'dir',
'result': {'files': {'/a/file3': 'dest/a/file3',
'/a/subdir/file2': 'dest/a/subdir/file2',
'/keep': '',
'/x': 'src/a'}},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_is_target'},
{'dest_basename': 'x',
'dest_trailing_slash': True,
'dest_type': 'dir',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'infer_dest'},
{'dest_basename': 'x',
'dest_trailing_slash': False,
'dest_type': 'dir',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'infer_dest'},
{'dest_basename': 'x',
'dest_trailing_slash': True,
'dest_type': 'dir',
'result': {'files': {'/a/file3': 'dest/a/file3',
'/a/subdir/file2': 'dest/a/subdir/file2',
'/keep': '',
'/x/a': 'src/a'}},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'infer_dest'},
{'dest_basename': 'x',
'dest_trailing_slash': False,
'dest_type': 'dir',
'result': {'files': {'/a/file3': 'dest/a/file3',
'/a/subdir/file2': 'dest/a/subdir/file2',
'/keep': '',
'/x': 'src/a'}},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'infer_dest'},
{'dest_basename': None,
'dest_trailing_slash': True,
'dest_type': 'noexist',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': None,
'dest_trailing_slash': False,
'dest_type': 'noexist',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': None,
'dest_trailing_slash': True,
'dest_type': 'noexist',
'result': {'files': {'/a': 'src/a', '/keep': ''}},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': None,
'dest_trailing_slash': False,
'dest_type': 'noexist',
'result': {'files': {'/a': 'src/a', '/keep': ''}},
'src_trailing_slash': False,
'src_type': 'file',
'treat_dest_as': 'dest_dir'},
{'dest_basename': None,
'dest_trailing_slash': True,
'dest_type': 'noexist',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
'src_type': 'file',
'treat_dest_as': 'dest_is_target'},
{'dest_basename': None,
'dest_trailing_slash': False,
'dest_type': 'noexist',
'result': {'exception': 'FileNotFoundError'},
'src_trailing_slash': True,
| |
b
def calibrate_mass(bwidth, mass_left, mass_right, true_md):
bbins = np.arange(-mass_left, mass_right, bwidth)
H1, b1 = np.histogram(true_md, bins=bbins)
b1 = b1 + bwidth
b1 = b1[:-1]
popt, pcov = curve_fit(noisygaus, b1, H1, p0=[1, np.median(true_md), 1, 1])
mass_shift, mass_sigma = popt[1], abs(popt[2])
return mass_shift, mass_sigma, pcov[0][0]
def calibrate_RT_gaus(bwidth, mass_left, mass_right, true_md):
bbins = np.arange(-mass_left, mass_right, bwidth)
H1, b1 = np.histogram(true_md, bins=bbins)
b1 = b1 + bwidth
b1 = b1[:-1]
popt, pcov = curve_fit(noisygaus, b1, H1, p0=[1, np.median(true_md), bwidth * 5, 1])
mass_shift, mass_sigma = popt[1], abs(popt[2])
return mass_shift, mass_sigma, pcov[0][0]
def process_file(args):
utils.seen_target.clear()
utils.seen_decoy.clear()
args = utils.prepare_decoy_db(args)
return process_peptides(args)
def peptide_processor(peptide, **kwargs):
seqm = peptide
results = []
m = cmass.fast_mass(seqm, aa_mass=kwargs['aa_mass']) + kwargs['aa_mass'].get('Nterm', 0) + kwargs['aa_mass'].get('Cterm', 0)
acc_l = kwargs['acc_l']
acc_r = kwargs['acc_r']
dm_l = acc_l * m / 1.0e6
if acc_r == acc_l:
dm_r = dm_l
else:
dm_r = acc_r * m / 1.0e6
start = nmasses.searchsorted(m - dm_l)
end = nmasses.searchsorted(m + dm_r)
for i in range(start, end):
peak_id = ids[i]
I = Is[i]
massdiff = (m - nmasses[i]) / m * 1e6
mods = 0
results.append((seqm, massdiff, mods, i))
return results
def prepare_peptide_processor(fname, args):
global nmasses
global rts
global charges
global ids
global Is
global Scans
global Isotopes
global mzraw
global avraw
global imraw
min_ch = args['cmin']
max_ch = args['cmax']
min_isotopes = args['i']
min_scans = args['sc']
print('Reading spectra ...')
df_features = utils.iterate_spectra(fname, min_ch, max_ch, min_isotopes, min_scans)
# Sort by neutral mass
df_features = df_features.sort_values(by='massCalib')
nmasses = df_features['massCalib'].values
rts = df_features['rtApex'].values
charges = df_features['charge'].values
ids = df_features['id'].values
Is = df_features['intensityApex'].values
Scans = df_features['nScans'].values
Isotopes = df_features['nIsotopes'].values
mzraw = df_features['mz'].values
avraw = np.zeros(len(df_features))
if len(set(df_features['FAIMS'])) > 1:
imraw = df_features['FAIMS'].values
else:
imraw = df_features['ion_mobility'].values
print('Number of peptide isotopic clusters: %d' % (len(nmasses), ))
fmods = args['fmods']
aa_mass = mass.std_aa_mass
if fmods:
for mod in fmods.split(','):
m, aa = mod.split('@')
if aa == '[':
aa_mass['Nterm'] = float(m)
elif aa == ']':
aa_mass['Cterm'] = float(m)
else:
aa_mass[aa] += float(m)
acc_l = args['ptol']
acc_r = args['ptol']
return {'aa_mass': aa_mass, 'acc_l': acc_l, 'acc_r': acc_r, 'args': args}, df_features
def peptide_processor_iter_isoforms(peptide, **kwargs):
out = []
out.append(peptide_processor(peptide, **kwargs))
return out
def get_results(ms1results):
resdict = dict()
labels = [
'seqs',
'md',
'mods',
'iorig',
# 'rt',
# 'ids',
# 'Is',
# 'Scans',
# 'Isotopes',
# 'mzraw',
# 'av',
# 'ch',
# 'im',
]
for label, val in zip(labels, zip(*ms1results)):
resdict[label] = np.array(val)
return resdict
def filter_results(resultdict, idx):
tmp = dict()
for label in resultdict:
tmp[label] = resultdict[label][idx]
return tmp
def process_peptides(args):
fname = args['file']
fdr = args['fdr'] / 100
min_isotopes_calibration = args['ci']
try:
outpath = args['outpath']
except:
outpath = False
if outpath:
base_out_name = os.path.splitext(os.path.join(outpath, os.path.basename(fname)))[0]
else:
base_out_name = os.path.splitext(fname)[0]
out_log = open(base_out_name + '_log.txt', 'w')
out_log.close()
out_log = open(base_out_name + '_log.txt', 'w')
elude_path = args['elude']
elude_path = elude_path.strip()
deeplc_path = args['deeplc']
deeplc_path = deeplc_path.strip()
calib_path = args['pl']
calib_path = calib_path.strip()
if calib_path and args['ts']:
args['ts'] = 0
print('Two-stage RT prediction does not work with list of MS/MS identified peptides...')
args['enzyme'] = utils.get_enzyme(args['e'])
ms1results = []
peps = utils.peptide_gen(args)
kwargs, df_features = prepare_peptide_processor(fname, args)
func = peptide_processor_iter_isoforms
print('Running the search ...')
for y in utils.multimap(1, func, peps, **kwargs):
for result in y:
if len(result):
ms1results.extend(result)
prefix = args['prefix']
protsN, pept_prot = utils.get_prot_pept_map(args)
resdict = get_results(ms1results)
del ms1results
resdict['mc'] = np.array([parser.num_sites(z, args['enzyme']) for z in resdict['seqs']])
isdecoy = lambda x: x[0].startswith(prefix)
isdecoy_key = lambda x: x.startswith(prefix)
escore = lambda x: -x[1]
e_ind = np.array([Isotopes[iorig] for iorig in resdict['iorig']]) >= min_isotopes_calibration
# e_ind = resdict['Isotopes'] >= min_isotopes_calibration
# e_ind = resdict['Isotopes'] >= 1
resdict2 = filter_results(resdict, e_ind)
e_ind = resdict2['mc'] == 0
resdict2 = filter_results(resdict2, e_ind)
p1 = set(resdict2['seqs'])
if len(p1):
prots_spc2 = defaultdict(set)
for pep, proteins in pept_prot.items():
if pep in p1:
for protein in proteins:
prots_spc2[protein].add(pep)
for k in protsN:
if k not in prots_spc2:
prots_spc2[k] = set([])
prots_spc = dict((k, len(v)) for k, v in prots_spc2.items())
names_arr = np.array(list(prots_spc.keys()))
v_arr = np.array(list(prots_spc.values()))
n_arr = np.array([protsN[k] for k in prots_spc])
top100decoy_score = [prots_spc.get(dprot, 0) for dprot in protsN if isdecoy_key(dprot)]
top100decoy_N = [val for key, val in protsN.items() if isdecoy_key(key)]
p = np.mean(top100decoy_score) / np.mean(top100decoy_N)
print('p=%s' % (np.mean(top100decoy_score) / np.mean(top100decoy_N)))
prots_spc = dict()
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc[k] = all_pvals[idx]
checked = set()
for k, v in list(prots_spc.items()):
if k not in checked:
if isdecoy_key(k):
if prots_spc.get(k.replace(prefix, ''), -1e6) > v:
del prots_spc[k]
checked.add(k.replace(prefix, ''))
else:
if prots_spc.get(prefix + k, -1e6) > v:
del prots_spc[k]
checked.add(prefix + k)
filtered_prots = aux.filter(prots_spc.items(), fdr=0.05, key=escore, is_decoy=isdecoy, remove_decoy=True, formula=1,
full_output=True)
identified_proteins = 0
for x in filtered_prots:
identified_proteins += 1
print('results for default search: number of identified proteins = %d' % (identified_proteins, ))
print('Running mass recalibration...')
e_ind = resdict['mc'] == 0
resdict2 = filter_results(resdict, e_ind)
true_md = []
true_isotopes = []
true_seqs = []
true_prots = set(x[0] for x in filtered_prots)
for pep, proteins in pept_prot.items():
if any(protein in true_prots for protein in proteins):
true_seqs.append(pep)
e_ind = np.in1d(resdict2['seqs'], true_seqs)
true_seqs = resdict2['seqs'][e_ind]
true_md.extend(resdict2['md'][e_ind])
true_md = np.array(true_md)
# true_isotopes.extend(resdict2['Isotopes'][e_ind])
true_isotopes.extend(np.array([Isotopes[iorig] for iorig in resdict2['iorig']])[e_ind])
true_isotopes = np.array(true_isotopes)
true_intensities = np.array([Is[iorig] for iorig in resdict2['iorig']])[e_ind]
# true_intensities = np.array(resdict2['Is'][e_ind])
# true_rt = np.array(resdict2['rt'][e_ind])
# true_mz = np.array(resdict2['mzraw'][e_ind])
true_rt = np.array([rts[iorig] for iorig in resdict2['iorig']])[e_ind]
true_mz = np.array([mzraw[iorig] for iorig in resdict2['iorig']])[e_ind]
df1 = pd.DataFrame()
df1['mass diff'] = true_md
df1['mz'] = true_mz
df1['RT'] = true_rt
df1['Intensity'] = true_intensities
df1['seqs'] = true_seqs
df1['orig_md'] = true_md
mass_left = args['ptol']
mass_right = args['ptol']
try:
mass_shift, mass_sigma, covvalue = calibrate_mass(0.001, mass_left, mass_right, true_md)
except:
mass_shift, mass_sigma, covvalue = calibrate_mass(0.01, mass_left, mass_right, true_md)
print('Calibrated mass shift: ', mass_shift)
print('Calibrated mass sigma in ppm: ', mass_sigma)
out_log.write('Calibrated mass shift: %s\n' % (mass_shift, ))
out_log.write('Calibrated mass sigma in ppm: %s\n' % (mass_sigma, ))
e_all = abs(resdict['md'] - mass_shift) / (mass_sigma)
r = 3.0
e_ind = e_all <= r
resdict = filter_results(resdict, e_ind)
zs_all = e_all[e_ind] ** 2
e_ind = np.array([Isotopes[iorig] for iorig in resdict['iorig']]) >= min_isotopes_calibration
resdict2 = filter_results(resdict, e_ind)
e_ind = resdict2['mc'] == 0
resdict2 = filter_results(resdict2, e_ind)
p1 = set(resdict2['seqs'])
prots_spc2 = defaultdict(set)
for pep, proteins in pept_prot.items():
if pep in p1:
for protein in proteins:
prots_spc2[protein].add(pep)
for k in protsN:
if k not in prots_spc2:
prots_spc2[k] = set([])
prots_spc = dict((k, len(v)) for k, v in prots_spc2.items())
names_arr = np.array(list(prots_spc.keys()))
v_arr = np.array(list(prots_spc.values()))
n_arr = np.array([protsN[k] for k in prots_spc])
top100decoy_score = [prots_spc.get(dprot, 0) for dprot in protsN if isdecoy_key(dprot)]
top100decoy_N = [val for key, val in protsN.items() if isdecoy_key(key)]
p = np.mean(top100decoy_score) / np.mean(top100decoy_N)
print('p=%s' % (np.mean(top100decoy_score) / np.mean(top100decoy_N)))
prots_spc = dict()
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc[k] = all_pvals[idx]
checked = set()
for k, v in list(prots_spc.items()):
if k not in checked:
if isdecoy_key(k):
if prots_spc.get(k.replace(prefix, ''), -1e6) > v:
del prots_spc[k]
checked.add(k.replace(prefix, ''))
else:
if prots_spc.get(prefix + k, -1e6) > v:
del prots_spc[k]
checked.add(prefix + k)
filtered_prots = aux.filter(prots_spc.items(), fdr=0.05, key=escore, is_decoy=isdecoy, remove_decoy=True, formula=1,
full_output=True)
identified_proteins = 0
for x in filtered_prots:
identified_proteins += 1
print('results for default search after mass calibration: number of identified proteins = %d' % (identified_proteins, ))
print('Running RT prediction...')
e_ind = np.array([Isotopes[iorig] for iorig in resdict['iorig']]) >= min_isotopes_calibration
# e_ind = resdict['Isotopes'] >= min_isotopes_calibration
# e_ind = resdict['Isotopes'] >= 1
resdict2 = filter_results(resdict, e_ind)
e_ind = resdict2['mc'] == 0
resdict2 = filter_results(resdict2, e_ind)
true_seqs = []
true_rt = []
true_isotopes = []
true_prots = set(x[0] for x in filtered_prots)#[:5])
for pep, proteins in pept_prot.items():
if any(protein in true_prots for protein in proteins):
true_seqs.append(pep)
e_ind = np.in1d(resdict2['seqs'], true_seqs)
true_seqs = resdict2['seqs'][e_ind]
true_rt.extend(np.array([rts[iorig] for iorig in resdict2['iorig']])[e_ind])
# true_rt.extend(resdict2['rt'][e_ind])
true_rt = np.array(true_rt)
true_isotopes.extend(np.array([Isotopes[iorig] for iorig in resdict2['iorig']])[e_ind])
# true_isotopes.extend(resdict2['Isotopes'][e_ind])
true_isotopes = np.array(true_isotopes)
e_all = abs(resdict2['md'][e_ind] - mass_shift) / (mass_sigma)
zs_all_tmp = e_all ** 2
e_ind = true_isotopes >= min_isotopes_calibration
true_seqs = true_seqs[e_ind]
true_rt = true_rt[e_ind]
true_isotopes = true_isotopes[e_ind]
zs_all_tmp = zs_all_tmp[e_ind]
e_ind = np.argsort(zs_all_tmp)
true_seqs = true_seqs[e_ind]
true_rt = true_rt[e_ind]
true_isotopes = true_isotopes[e_ind]
true_seqs = true_seqs[:2500]
true_rt = true_rt[:2500]
true_isotopes = | |
# -*-coding:Utf-8 -*
# Copyright (c) 2010 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Ce fichier contient la classe BaseType, détaillée plus bas."""
from fractions import Fraction
from abstraits.obase import BaseObj
from primaires.format.description import Description
from primaires.objet.script import ScriptObjet
from . import MetaType
# Constantes
FLAGS = {
"ne peut pas prendre": 1,
}
class BaseType(BaseObj, metaclass=MetaType):
"""Classe abstraite représentant le type de base d'un objet.
Si des données doivent être communes à tous les types d'objet
(un objet a un nom, une description, quelque soit son type) c'est dans
cette classe qu'elles apparaissent.
Notons les attributs d'objet :
empilable_sur -- une liste de chaînes définissant les types
sur lesquels on peut empiler le type d'objet
empilable_sous -- une liste de chaînes identiques mais
désignant les types d'objets qui peuvent être
empilés par-dessus le type défini. On évitera
d'utiliser cet attribut sauf si le type
d'objet est défini dans un module secondaire
"""
nom_type = "" # à redéfinir
nom_scripting = "l'objet"
type_achat = "objet"
_nom = "base_type_objet"
_version = 3
# Doit-t-on nettoyer l'objet en cas d'inactivité
nettoyer = True
# Type d'objet sélectable dans le oedit
selectable = True
# Types enfants
types = {}
enregistrer = True
# Équipement
empilable_sur = []
empilable_sous = []
def __init__(self, cle=""):
"""Constructeur d'un type"""
BaseObj.__init__(self)
self.cle = cle
self._attributs = {}
self.no = 0 # nombre d'objets créés sur ce prototype
self.nom_singulier = "un objet indéfini"
self.etat_singulier = "est posé là"
self.nom_pluriel = "objets indéfinis"
self.etat_pluriel = "sont posés là"
self.noms_sup = []
self.description = Description(parent=self)
self.objets = []
self.unique = True # par défaut tout objet est unique
self.flags = 0
self._prix = 1 # valeur en magasin
self.sans_prix = False
self.poids_unitaire = 1 # 1 Kg
self.depecer_de = []
# Equipement
self.peut_prendre = True # définit si on peut manipuler l'objet à main
self.peut_tenir = False # définit si on peut tenir un objet par-dessus
self.emplacement = ""
self.epaisseur = 1
self.positions = ()
# Script
self.script = ScriptObjet(self)
self.etendre_script()
# Editeur
self._extensions_editeur = []
# Erreur de validation du type
self.err_type = "Le type de '{}' est invalide."
self._construire()
def __getnewargs__(self):
return ()
def __repr__(self):
return "<{} {}>".format(self.nom_type, self.cle)
def __str__(self):
return self.cle
def __getstate__(self):
"""Retourne le dictionnaire à enregistrer."""
attrs = self.__dict__.copy()
if "_extensions_editeur" in attrs:
del attrs["_extensions_editeur"]
if "_attributs" in attrs:
del attrs["_attributs"]
return attrs
def _get_prix(self):
"""Retourne le prix"""
return self._prix
def _set_prix(self, prix):
"""Modifie le prix"""
self._prix = int(prix)
prix = property(_get_prix, _set_prix)
@property
def m_valeur(self):
return self._prix
@property
def nom_achat(self):
return self.nom_singulier
@property
def poids(self):
"""Retourne le poids unitaire."""
return self.poids_unitaire
def etendre_script(self):
"""Méthode appelée pour étendre le scripting.
Si une classe-fille la surcharge, elle peut ajouter des évènements
au script de ce type d'objet, par exemple.
"""
pass
def etendre_editeur(self, raccourci, ligne, editeur, objet, attribut, *sup):
"""Permet d'étendre l'éditeur d'objet en fonction du type.
Paramètres à entrer :
- raccourci le raccourci permettant d'accéder à la ligne
- ligne la ligne de l'éditeur (exemple 'Description')
- editeur le contexte-éditeur (exemple Uniligne)
- objet l'objet à éditer
- attribut l'attribut à éditer
Cette méthode est appelée lors de la création de l'éditeur de
prototype.
"""
self._extensions_editeur.append(
(raccourci, ligne, editeur, objet, attribut, sup))
def reduire_editeur(self, raccourci):
"""Permet de supprimer un contexte-éditeur de la liste d'extensions."""
sup = ()
for editeur in self._extensions_editeur:
if editeur[0] == raccourci:
sup = editeur
break
if sup:
self._extensions_editeur.remove(sup)
def travailler_enveloppes(self, enveloppes):
"""Travail sur les enveloppes.
On récupère un dictionnaire représentant la présentation avec en
clé les raccourcis et en valeur les enveloppes.
Cela peut permettre de travailler sur les enveloppes ajoutées par
'etendre_editeur'.
"""
pass
def get_nom(self, nombre=1, pluriels=True):
"""Retourne le nom complet en fonction du nombre.
Par exemple :
Si nombre == 1 : retourne le nom singulier
Sinon : retourne le nombre et le nom pluriel
"""
if nombre <= 0:
raise ValueError("la fonction get_nom a été appelée " \
"avec un nombre négatif ou nul.")
elif nombre == 1:
return self.nom_singulier
else:
if pluriels and self.noms_sup:
noms_sup = list(self.noms_sup)
noms_sup.reverse()
for nom in noms_sup:
if nombre >= nom[0]:
return nom[1]
return str(nombre) + " " + self.nom_pluriel
def get_nom_etat(self, nombre):
"""Retourne le nom et l'état en fonction du nombre."""
nom = self.get_nom(nombre)
if nombre == 1:
return nom + " " + self.etat_singulier
else:
if self.noms_sup:
noms_sup = list(self.noms_sup)
noms_sup.reverse()
for nom_sup in noms_sup:
if nombre >= nom_sup[0]:
return nom + " " + nom_sup[2]
return nom + " " + self.etat_pluriel
def extraire_contenus(self, quantite=None, contenu_dans=None):
"""Méthode redéfinie pour la manipulation d'objets non uniques."""
return [self]
def extraire_contenus_qtt(self):
"""Méthode redéfinie pour la manipulation d'objets non uniques."""
return [(self, 1)]
def est_de_type(self, nom_type):
"""Retourne True si le type d'objet est de celui entré ou dérivé.
Par exemple, si on test si une épée est une arme, retournera True
car le type 'arme' a pour classes-filles 'épée' (notamment).
"""
classe = importeur.objet.types[nom_type]
prototype = hasattr(self, "prototype") and self.prototype or self
return isinstance(prototype, classe)
def calculer_poids(self):
"""Retourne le poids de l'objet."""
return self.poids_unitaire
def objets_contenus(self, objet):
"""Retourne les objets contenus."""
return []
def detruire_objet(self, objet):
"""Détruit l'objet passé en paramètre.
Par défaut cette méthode ne fait rien, mais si le type
est fait pour contenir d'autres objets, il doit les détruire.
"""
pass
# Actions sur les objets
def acheter(self, quantite, magasin, transaction):
"""Achète les objets dans la quantité spécifiée."""
salle = magasin.parent
objets = []
for i in range(quantite):
objet = importeur.objet.creer_objet(self)
salle.objets_sol.ajouter(objet)
objets.append(objet)
return objets
def peut_vendre(self, vendeur):
"""Retourne True si peut vendre l'objet."""
return True
def estimer_valeur(self, magasin, vendeur):
"""Estime la valeur d'un objet."""
valeur = self.m_valeur
return valeur * 0.7
def regarder(self, personnage, variables=None):
"""Le personnage regarde l'objet"""
salle = personnage.salle
variables = variables or {}
personnage << "Vous regardez {} :".format(self.get_nom())
autre = "{{}} regarde {}.".format(self.get_nom())
salle.envoyer(autre, personnage)
# Appel du script regarde.avant
self.script["regarde"]["avant"].executer(
objet=self, personnage=personnage)
description = self.description.regarder(personnage, self, variables)
if not description:
description = "Il n'y a rien de bien intéressant à voir."
personnage << description
# Appel du script regarde.après
self.script["regarde"]["apres"].executer(
objet=self, personnage=personnage)
return ""
def veut_jeter(self, personnage, sur):
"""Méthode appelée pour tester si le personnage peut jeter l'objet.
On doit préciser :
personnage -- le personnage voulant jeter l'objet
sur -- sur quoi veut-il jeter l'objet ?
Le dernier paramètre peut être n'importe quel élément observable
(un autre objet, un autre personnage...).
La méthode doit retourner :
Une chaîne vide si l'objet ne peut pas être lancé
Un nom de méthode à appeler si l'objet peut être lancé
"""
return ""
def jeter(self, personnage, sur):
"""Jette self | |
o.src += '_SA(0x0100|c.S++);_RD();c.P=(_GD()&~M6502_BF)|M6502_XF;'
# load return address low byte from stack
o.src += '_SA(0x0100|c.S++);_RD();l=_GD();'
# load return address high byte from stack
o.src += '_SA(0x0100|c.S);_RD();h=_GD();'
# update PC (which is already placed on the right return-to instruction)
o.src += 'c.PC=(h<<8)|l;'
# interrupt reponse after RTI is immediately
o.src += 'c.pi=c.P;'
#-------------------------------------------------------------------------------
def i_ora(o):
cmt(o,'ORA')
o.src += '_RD();c.A|=_GD();_NZ(c.A);'
#-------------------------------------------------------------------------------
def i_and(o):
cmt(o,'AND')
o.src += '_RD();c.A&=_GD();_NZ(c.A);'
#-------------------------------------------------------------------------------
def i_eor(o):
cmt(o,'EOR')
o.src += '_RD();c.A^=_GD();_NZ(c.A);'
#-------------------------------------------------------------------------------
def i_adc(o):
cmt(o,'ADC')
o.src += '_RD();_m6502_adc(&c,_GD());'
#-------------------------------------------------------------------------------
def i_sbc(o):
cmt(o,'SBC')
o.src += '_RD();_m6502_sbc(&c,_GD());'
#-------------------------------------------------------------------------------
def u_sbc(o):
u_cmt(o,'SBC')
o.src += '_RD();_m6502_sbc(&c,_GD());'
#-------------------------------------------------------------------------------
def i_cmp(o):
cmt(o,'CMP')
o.src += '_RD();l=_GD();'
o.src += 't=c.A-l;'
o.src += '_NZ((uint8_t)t)&~M6502_CF;'
o.src += 'if(!(t&0xFF00)){c.P|=M6502_CF;}'
#-------------------------------------------------------------------------------
def i_cpx(o):
cmt(o,'CPX')
o.src += '_RD();l=_GD();'
o.src += 't=c.X-l;'
o.src += '_NZ((uint8_t)t)&~M6502_CF;'
o.src += 'if(!(t&0xFF00)){c.P|=M6502_CF;}'
#-------------------------------------------------------------------------------
def i_cpy(o):
cmt(o,'CPY')
o.src += '_RD();l=_GD();'
o.src += 't=c.Y-l;'
o.src += '_NZ((uint8_t)t)&~M6502_CF;'
o.src += 'if(!(t&0xFF00)){c.P|=M6502_CF;}'
#-------------------------------------------------------------------------------
def i_dec(o):
cmt(o,'DEC')
o.src += '_RD();l=_GD();'
o.src += '_WR();' # first write is the unmodified value
o.src += 'l--;_NZ(l);'
o.src += '_SD(l);_WR();'
#-------------------------------------------------------------------------------
def u_dcp(o):
# undocumented 'decrement and compare'
u_cmt(o,'DCP')
o.src += '_RD();'
o.src += '_WR();'
o.src += 'l=_GD();l--;_NZ(l);_SD(l);_WR();'
# do a cmp operation on the decremented value
o.src += 't=c.A-l;'
o.src += '_NZ((uint8_t)t)&~M6502_CF;'
o.src += 'if(!(t&0xFF00)){c.P|=M6502_CF;}'
#-------------------------------------------------------------------------------
def x_sbx(o):
# undocumented SBX
# AND X register with accumulator and store result in X register, then
# subtract byte from X register (without borrow)
#
# we just ignore this for now and treat it like a imm-nop
#
u_cmt(o,'SBX (not impl)')
o.src += '_RD();'
#-------------------------------------------------------------------------------
def i_dex(o):
cmt(o,'DEX')
o.src += '_RD();c.X--;_NZ(c.X);'
#-------------------------------------------------------------------------------
def i_dey(o):
cmt(o,'DEY')
o.src += '_RD();c.Y--;_NZ(c.Y);'
#-------------------------------------------------------------------------------
def i_inc(o):
cmt(o,'INC')
o.src += '_RD();l=_GD();'
o.src += '_WR();' # first write is the unmodified value
o.src += 'l++;_NZ(l);'
o.src += '_SD(l);_WR();'
#-------------------------------------------------------------------------------
def i_inx(o):
cmt(o,'INX')
o.src += '_RD();c.X++;_NZ(c.X);'
#-------------------------------------------------------------------------------
def i_iny(o):
cmt(o,'INY')
o.src += '_RD();c.Y++;_NZ(c.Y);'
#-------------------------------------------------------------------------------
def u_isb(o):
# undocumented INC+SBC instruction
u_cmt(o,'ISB')
o.src += '_RD();'
o.src += '_WR();'
o.src += 'l=_GD();l++;_SD(l);_WR();'
o.src += '_m6502_sbc(&c,l);'
#-------------------------------------------------------------------------------
def _asl(val):
s = 'c.P=(c.P&~M6502_CF)|(('+val+'&0x80)?M6502_CF:0);'
s += val+'<<=1;'
s += '_NZ('+val+');'
return s
#-------------------------------------------------------------------------------
def i_asl(o):
cmt(o,'ASL')
o.src += '_RD();'
o.src += '_WR();' # write unmodified value
o.src += 'l=_GD();'
o.src += _asl('l')
o.src += '_SD(l);'
o.src += '_WR();'
#-------------------------------------------------------------------------------
def i_asla(o):
cmt(o,'ASLA')
o.src += '_RD();'
o.src += _asl('c.A')
#-------------------------------------------------------------------------------
def u_slo(o):
# undocumented ASL+OR
u_cmt(o,'SLO')
o.src += '_RD();'
o.src += '_WR();'
o.src += 'l=_GD();'
o.src += _asl('l')
o.src += '_SD(l);'
o.src += '_WR();'
o.src += 'c.A|=l;_NZ(c.A);'
#-------------------------------------------------------------------------------
def _lsr(val):
s = 'c.P=(c.P&~M6502_CF)|(('+val+'&0x01)?M6502_CF:0);'
s += val+'>>=1;'
s += '_NZ('+val+');'
return s
#-------------------------------------------------------------------------------
def i_lsr(o):
cmt(o,'LSR')
o.src += '_RD();'
o.src += '_WR();' # write unmodified value
o.src += 'l=_GD();'
o.src += _lsr('l')
o.src += '_SD(l);'
o.src += '_WR();'
#-------------------------------------------------------------------------------
def i_lsra(o):
cmt(o,'LSRA')
o.src += '_RD();'
o.src += _lsr('c.A')
#-------------------------------------------------------------------------------
def x_asr(o):
# undocumented AND+LSR
u_cmt(o, 'ASR')
o.src += '_RD();'
o.src += 'c.A&=_GD();'
o.src += _lsr('c.A')
#-------------------------------------------------------------------------------
def u_sre(o):
# undocumented LSR+EOR
u_cmt(o,'SRE')
o.src += '_RD();'
o.src += '_WR();'
o.src += 'l=_GD();'
o.src += _lsr('l')
o.src += '_SD(l);'
o.src += '_WR();'
o.src += 'c.A^=l;_NZ(c.A);'
#-------------------------------------------------------------------------------
def _rol(val):
s = '{'
s += 'bool carry=c.P&M6502_CF;'
s += 'c.P&=~(M6502_NF|M6502_ZF|M6502_CF);'
s += 'if('+val+'&0x80){c.P|=M6502_CF;}'
s += val+'<<=1;'
s += 'if(carry){'+val+'|=0x01;}'
s += '_NZ('+val+');'
s += '}'
return s
#-------------------------------------------------------------------------------
def i_rol(o):
cmt(o,'ROL')
o.src += '_RD();'
o.src += '_WR();' # write unmodified value
o.src += 'l=_GD();'
o.src += _rol('l')
o.src += '_SD(l);'
o.src += '_WR();'
#-------------------------------------------------------------------------------
def i_rola(o):
cmt(o,'ROLA')
o.src += '_RD();'
o.src += _rol('c.A')
#-------------------------------------------------------------------------------
def u_rla(o):
# uncodumented ROL+AND
u_cmt(o,'RLA')
o.src += '_RD();'
o.src += '_WR();'
o.src += 'l=_GD();'
o.src += _rol('l')
o.src += '_SD(l);'
o.src += '_WR();'
o.src += 'c.A&=l;_NZ(c.A);'
#-------------------------------------------------------------------------------
def _ror(val):
s = '{'
s += 'bool carry=c.P&M6502_CF;'
s += 'c.P&=~(M6502_NF|M6502_ZF|M6502_CF);'
s += 'if('+val+'&0x01){c.P|=M6502_CF;}'
s += val+'>>=1;'
s += 'if(carry){'+val+'|=0x80;}'
s += '_NZ('+val+');'
s += '}'
return s
#-------------------------------------------------------------------------------
def i_ror(o):
cmt(o,'ROR')
o.src += '_RD();'
o.src += '_WR();' # write unmodified value
o.src += 'l=_GD();'
o.src += _ror('l')
o.src += '_SD(l);'
o.src += '_WR();'
#-------------------------------------------------------------------------------
def i_rora(o):
cmt(o,'RORA')
o.src += '_RD();'
o.src += _ror('c.A')
#-------------------------------------------------------------------------------
def u_rra(o):
# undocumented ROR+ADC
u_cmt(o,'RRA')
o.src += '_RD();'
o.src += '_WR();'
o.src += 'l=_GD();'
o.src += _ror('l')
o.src += '_SD(l);'
o.src += '_WR();'
o.src += '_m6502_adc(&c,l);'
#-------------------------------------------------------------------------------
def x_arr(o):
# undocumented AND+ROR
u_cmt(o,'ARR')
o.src += '_RD();'
o.src += 'c.A&=_GD();'
o.src += '_m6502_arr(&c);'
#-------------------------------------------------------------------------------
def x_ane(o):
# undocumented ANE
# NOTE: this implementation fails in the Wolfgang Lorenz test suite
u_cmt(o,'ANE')
o.src += '_RD();'
o.src += 'l=_GD();c.A&=l&c.X;_NZ(c.A);'
#-------------------------------------------------------------------------------
def x_sha(o):
# undocumented SHA
# AND X register with accumulator then AND result with 7 and store in
# memory.
#
# we just ignore this for now
u_cmt(o,'SHA (not impl)')
o.src += '_RD();'
#-------------------------------------------------------------------------------
def x_shx(o):
# undocumented SHX
# AND X register with the high byte of the target address of the argument
# + 1. Store the result in memory.
#
# we just ignore this for now
u_cmt(o, 'SHX (not impl)')
o.src += '_RD();'
#-------------------------------------------------------------------------------
def x_shy(o):
# undocumented SHX
# AND Y register with the high byte of the target address of the argument
# + 1. Store the result in memory.
#
# we just ignore this for now
u_cmt(o, 'SHY (not impl)')
o.src += '_RD();'
#-------------------------------------------------------------------------------
def x_shs(o):
# undocumented SHS
# AND X register with accumulator and store result in stack pointer, then
# AND stack pointer with the high byte of the target address of the
# argument + 1. Store result in memory.
#
# we just ignore this for now
u_cmt(o, 'SHS (not impl)')
o.src += '_RD();'
#-------------------------------------------------------------------------------
def x_anc(o):
# undocumented ANC
# AND byte with accumulator. If result is negative then carry is set.
#
u_cmt(o, 'ANC')
o.src += '_RD();'
o.src += 'c.A&=_GD();'
o.src += '_NZ(c.A);'
o.src += 'if(c.A&0x80){c.P|=M6502_CF;}else{c.P&=~M6502_CF;}'
#-------------------------------------------------------------------------------
def x_las(o):
# undocumented LAS
# AND memory with stack pointer, transfer result to accumulator, X
# register and stack pointer.
#
# we just ignore this for now
u_cmt(o, 'LAS (not impl)')
o.src += '_RD();'
#-------------------------------------------------------------------------------
def i_bit(o):
cmt(o,'BIT')
o.src += '_RD();'
o.src += 'l=_GD();h=c.A&l;'
o.src += 'c.P&=~(M6502_NF|M6502_VF|M6502_ZF);'
o.src += 'if(!h){c.P|=M6502_ZF;}'
o.src += 'c.P|=l&(M6502_NF|M6502_VF);'
#-------------------------------------------------------------------------------
def enc_op(op):
o = opcode(op)
if invalid_opcode(op):
o.cmt = 'INVALID'
o.src = ''
return o
# addressing mode decoder
o.src = enc_addr(op)
# instruction decoding
cc = op & 3
bbb = (op>>2) & 7
aaa = (op>>5) & 7
if cc == 0:
if aaa == 0:
if bbb == 0: i_brk(o)
elif bbb == 2: i_php(o)
elif bbb == 4: i_br(o, NF, 0) # BPL
elif bbb == 6: i_cl(o, CF)
else: u_nop(o)
elif aaa == 1:
if bbb == 0: i_jsr(o)
elif bbb == 2: i_plp(o)
elif bbb == 4: i_br(o, NF, NF) # BMI
elif bbb == 6: i_se(o, CF)
elif bbb in [5, 7]: u_nop(o)
else: i_bit(o)
elif aaa == 2:
if bbb == 0: i_rti(o)
elif bbb == 2: i_pha(o)
elif bbb == 3: i_jmp(o)
elif bbb == 4: i_br(o, VF, 0) # BVC
elif bbb == 6: i_cl(o, IF)
else: u_nop(o)
elif aaa == 3:
if bbb == 0: i_rts(o)
elif bbb == 2: i_pla(o)
elif bbb == 3: i_jmpi(o)
elif bbb == 4: i_br(o, VF, VF) # BVS
elif bbb == 6: i_se(o, IF)
else: u_nop(o)
elif aaa == 4:
if bbb == 0: u_nop(o)
elif bbb == 2: i_dey(o)
elif bbb == 4: i_br(o, CF, 0) # BCC
elif bbb == 6: i_tya(o)
elif bbb == 7: x_shy(o)
else: i_sty(o)
elif aaa == 5:
if bbb == 2: i_tay(o)
elif bbb == 4: i_br(o, CF, CF) # BCS
elif bbb == 6: i_cl(o, VF)
else: i_ldy(o)
elif aaa == 6:
if bbb == 2: i_iny(o)
elif bbb == 4: i_br(o, ZF, 0) # BNE
elif bbb == 6: i_cl(o, DF)
elif bbb in [5, 7]: u_nop(o)
else: i_cpy(o)
elif aaa == 7:
if bbb == 2: i_inx(o)
elif bbb == 4: i_br(o, ZF, ZF) # BEQ
elif bbb == 6: i_se(o, DF)
elif bbb in [5, 7]: u_nop(o)
else: i_cpx(o)
elif cc == 1:
if aaa == 0: i_ora(o)
elif aaa == 1: i_and(o)
elif aaa == 2: i_eor(o)
elif aaa == 3: i_adc(o)
elif aaa == 4:
if bbb == 2: u_nop(o)
else: i_sta(o)
elif aaa == 5: i_lda(o)
elif aaa == 6: i_cmp(o)
else: i_sbc(o)
elif cc == 2:
if aaa == 0:
if bbb == 2: i_asla(o)
elif bbb == 6: u_nop(o)
else: i_asl(o)
elif aaa == 1:
| |
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright (c) 2013, Battelle Memorial Instituate
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization
# that has cooperated in the development of these materials, makes
# any warranty, express or implied, or assumes any legal liability
# or responsibility for the accuracy, completeness, or usefulness or
# any information, apparatus, product, software, or process disclosed,
# or represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does
# not necessarily constitute or imply its endorsement, recommendation,
# r favoring by the United States Government or any agency thereof,
# or Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
#Author : Payyoh
#}}}
from datetime import datetime
import logging
import sys
import json
import time
import random
from volttron.platform.agent import BaseAgent, PublishMixin, periodic
from volttron.platform.agent import utils, matching
from volttron.platform.messaging import headers as headers_mod
import settings
utils.setup_logging()
_log = logging.getLogger(__name__)
class ListenerAgent(PublishMixin, BaseAgent):
# '''Listens to everything and publishes a heartbeat according to the
# heartbeat period specified in the settings module.
# '''
# matching_topic = '/agent/ui/lighting/update_response/bemoss/999/2HUE0017881cab4b'
def __init__(self, config_path, **kwargs):
super(ListenerAgent, self).__init__(**kwargs)
self.config = utils.load_config(config_path)
self.mode = ''
self.mode2 = ''
self.actor = ''
def setup(self):
# Demonstrate accessing a value from the config file
_log.info(self.config['message'])
self._agent_id = self.config['agentid']
super(ListenerAgent, self).setup()
def main(self):
print ""
@matching.match_start('/ui/agent/select_mode/')
def on_matchmode(self, topic, headers, message, match):
# '''Use match_all to receive all messages and print them out.'''
# _log.debug("Topic: {topic}, Headers: {headers}, "
# "Message: {message}".format(
# topic=topic, headers=headers, message=message))
# print "MODE---------"
# print "Topic: {}".format(topic)
# print "Headers: {}".format(headers)
print "Message: {}".format(message)
event = json.loads(message[0])
# print type(event)
# print event
self.event_status = event["status"]
event_mode = event["mode"]
self.mode = event["mode"]
# print "event_status: {} ".format(event_status)
# print "event_mode: {} ".format(event_mode)
print "UI select mode : .. {}".format(event_mode)
if (event_mode == "comfort"):
self.publish_Comfort()
elif (event_mode == "eco"):
self.publish_ECO()
elif (event_mode == "dr") and (self.event_status == "enable"):
self.publish_DR()
else:
print "Finish Select mode"
@matching.match_exact('/ui/agent/lighting/update/bemoss/999/2HUE0017881cab4b')
def on_matchlight(self, topic, headers, message, match):
# '''Use match_all to receive all messages and print them out.'''
# _log.debug("Topic: {topic}, Headers: {headers}, "
# "Message: {message}".format(
# topic=topic, headers=headers, message=message))
# print "Topic: {}".format(topic)
# print "Headers: {}".format(headers)
# print "Message: {}".format(message)
received_message = json.loads(message[0])
try:
self.actor = received_message["actor"]
print "Now, Control action by : {}".format(self.actor)
except:
print "Brightness data sent by comfort, eco ,DR mode"
self.actor = "3Modecontrol"
print"---------------------------------------------------"
@matching.match_exact('/agent/ui/BH1750_SHT15/device_status_response/bemoss/999/1NP221445K1200112')
# @matching.match_exact('/agent/ui/MultiSensor/device_status_response/bemoss/999/1MS221445K1200132')
def on_matchmulti(self, topic, headers, message, match):
# '''Use match_all to receive all messages and print them out.'''
# _log.debug("Topic: {topic}, Headers: {headers}, "
# "Message: {message}".format(
# topic=topic, headers=headers, message=message))
# print "MultiSensor----------"
# print "Topic: {}".format(topic)
# print "Headers: {}".format(headers)
print "Message: {}".format(message)
received_message = json.loads(message[0])
# self.illu = received_message["illuminance"] # for fibaro
self.illu = received_message["Lux"]
print "now brightness from multisensor is : {}".format(self.illu)
# if self.actor == "ui":
# self.mode = "custom_mode"
#
# print "now working at : {}".format(self.mode)
# else:
# print ""
print "***************************"
print self.actor
if self.actor != "ui":
print "+++++++++++++++++++++++++++++++++++++++++"
# if self.mode == ("eco" or "dr" or "comfort"):
print "now working at : {}".format(self.mode)
self.brightness = 100
if self.illu > 500:
self.brightness = 1
elif self.illu > 400:
self.brightness = 10
elif self.illu > 350:
self.brightness = 10
elif self.illu > 300:
self.brightness = 20
elif self.illu > 250:
self.brightness = 30
elif self.illu > 210:
self.brightness = 40
# elif self.illu > 180:
# self.brightness = 50
elif self.illu > 150:
self.brightness = 70
# elif self.illu > 120:
# self.brightness = 80
elif self.illu > 100:
self.brightness = 100
else:
self.brightness = 100
if(self.mode == "comfort"):
self.brightness = 100
else:
print""
self.HUE_DIM(self.brightness)
print "calculate brightness to (%){}".format(self.brightness)
print "-------------------------------------------------------------------------"
# else:
print "-------------------------------------------------------------------------"
else:
print "now working at custom mode"
# total Load 1100 kW
def publish_DR(self):
try:
self.HUE_DIM(self.brightness)
print "use brightness from multi sensor"
print "self.brightness: {}".format(self.brightness)
except:
self.HUE_DIM(50)
print "manual change brightness of hue to 50%"
self.AC1_OFF()
time.sleep(5)
self.AC1_OFF()
time.sleep(5)
self.AC2_OFF()
time.sleep(5)
self.AC2_OFF()
time.sleep(5)
self.AC3_OFF()
time.sleep(5)
self.AC3_OFF()
time.sleep(5)
self.FAN_ON()
time.sleep(2)
self.Plug_OFF()
time.sleep(2)
#
# total Load 1200 kW
def publish_ECO(self):
try:
self.HUE_DIM(self.brightness)
print "use brightness from multi sensor"
print "self.brightness: {}".format(self.brightness)
except:
self.HUE_DIM(50)
print "manual change brightness of hue to 50%"
self.AC1_temp27()
time.sleep(5)
self.AC1_temp27()
time.sleep(5)
self.AC2_temp27()
time.sleep(5)
self.AC2_temp27()
time.sleep(5)
self.AC3_temp27()
time.sleep(5)
self.AC3_temp27()
time.sleep(2)
self.FAN_ON()
self.Plug_ON()
#
# total Load 4500 kW
def publish_Comfort(self):
try:
self.brightness = 100
self.HUE_DIM(self.brightness)
print "use brightness from multi sensor"
print "self.brightness: {}".format(self.brightness)
except:
self.HUE_DIM(100)
print "manual change brightness of hue to 100%"
self.FAN_OFF()
self.Plug_ON()
self.AC1_temp20()
time.sleep(2)
self.AC1_temp20()
time.sleep(2)
self.AC2_temp20()
time.sleep(2)
self.AC2_temp20()
time.sleep(2)
self.AC3_temp20()
time.sleep(2)
self.AC3_temp20()
time.sleep(2)
# print self.mode
def AC1_temp20(self):
# TODO this is example how to write an app to control AC
topic = '/ui/agent/airconditioner/update/bemoss/999/1TH20000000000001'
now = datetime.utcnow().isoformat(' ') + 'Z'
headers = {
'AgentID': self._agent_id,
headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.PLAIN_TEXT,
headers_mod.DATE: now,
}
message = json.dumps({"LivingroomAir1":"0","status": "ON", "temp": "20","fan_speed": "4"})
self.publish(topic, headers, message)
print ("AC1 turned on : temp 20")
def AC2_temp20(self):
# TODO this is example how to write an app to control AC
topic = '/ui/agent/airconditioner/update/bemoss/999/1TH20000000000002'
now = datetime.utcnow().isoformat(' ') + 'Z'
headers = {
'AgentID': self._agent_id,
headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.PLAIN_TEXT,
headers_mod.DATE: now,
}
message = json.dumps({"LivingroomAir2":"0","status": "ON", "temp": "20","fan_speed": "4"})
self.publish(topic, headers, message)
print ("AC2 turned on : temp 20")
def AC3_temp20(self):
# TODO this is example how to write an app to control AC
topic = '/ui/agent/airconditioner/update/bemoss/999/1TH20000000000003'
now = datetime.utcnow().isoformat(' ') + 'Z'
headers = {
'AgentID': self._agent_id,
headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.PLAIN_TEXT,
headers_mod.DATE: now,
}
message = json.dumps({"BedroomAir":"0","status": "ON", "temp": "20","fan_speed": "4"})
# print ("message{}".format(message))
self.publish(topic, headers, message)
print ("AC3 turned on : temp 20")
def AC1_temp27(self):
# TODO this is example how to write an app to control AC
topic = '/ui/agent/airconditioner/update/bemoss/999/1TH20000000000001'
now = datetime.utcnow().isoformat(' ') + 'Z'
headers = {
'AgentID': self._agent_id,
headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.PLAIN_TEXT,
headers_mod.DATE: now,
}
message = json.dumps({"LivingroomAir1":"0","status": "ON", "temp": "27","fan_speed": "1"})
self.publish(topic, headers, message)
print ("AC1 turned on : temp 27")
def AC1_OFF(self):
# TODO this is example how to write an app to control AC
topic = '/ui/agent/airconditioner/update/bemoss/999/1TH20000000000001'
now = datetime.utcnow().isoformat(' ') + 'Z'
headers = {
'AgentID': self._agent_id,
headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.PLAIN_TEXT,
headers_mod.DATE: now,
}
| |
self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/sites".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def sites_bulk_config_state_query(self, data, tenant_id=None, api_version="v2.0"):
"""
Get site config/state info for queried site from NB
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/sites/bulk_config_state/query".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def site_ipfixlocalprefixes(self, site_id, data, tenant_id=None, api_version="v2.0"):
"""
POST Site_ipfixlocalprefixes API Function
**Parameters:**:
- **site_id**: Site ID
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/sites/{}/ipfixlocalprefixes".format(api_version,
tenant_id,
site_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def sites_query(self, data, tenant_id=None, api_version="v4.5"):
"""
Queries db for limit number of sites that match query params.
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v4.5)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/sites/query".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def sitesecurityzones(self, site_id, data, tenant_id=None, api_version="v2.0"):
"""
Create an association between site and security zone.
**Parameters:**:
- **site_id**: Site ID
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/sites/{}/sitesecurityzones".format(api_version,
tenant_id,
site_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def sitesecurityzones_query(self, site_id, data, tenant_id=None, api_version="v2.0"):
"""
Query security zone for NB API.
**Parameters:**:
- **site_id**: Site ID
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/sites/{}/sitesecurityzones/query".format(api_version,
tenant_id,
site_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def snmpagents(self, site_id, element_id, data, tenant_id=None, api_version="v2.0"):
"""
Create SNMP Agent
**Parameters:**:
- **site_id**: Site ID
- **element_id**: Element (Device) ID
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/sites/{}/elements/{}/snmpagents".format(api_version,
tenant_id,
site_id,
element_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def snmptraps(self, site_id, element_id, data, tenant_id=None, api_version="v2.0"):
"""
Create SNMP Trap
**Parameters:**:
- **site_id**: Site ID
- **element_id**: Element (Device) ID
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/sites/{}/elements/{}/snmptraps".format(api_version,
tenant_id,
site_id,
element_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def software_current_status_query(self, data, tenant_id=None, api_version="v2.1"):
"""
Get the current image status of all the element
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.1)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/software/current_status/query".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def software_status_query(self, data, tenant_id=None, api_version="v2.1"):
"""
Query the software upgrade status of all tenant elements
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.1)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/software/status/query".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def softwarehistory_query(self, data, tenant_id=None, api_version="v2.0"):
"""
Queries db for all software download done by a tenant
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/softwarehistory/query".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def spokeclusters(self, site_id, data, tenant_id=None, api_version="v2.0"):
"""
Create Spoke Cluster
**Parameters:**:
- **site_id**: Site ID
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/sites/{}/spokeclusters".format(api_version,
tenant_id,
site_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def spokeclusters_ops(self, site_id, spokecluster_id, data, tenant_id=None, api_version="v2.0"):
"""
Handle operations on spokecluster.
**Parameters:**:
- **site_id**: Site ID
- **spokecluster_id**: Spoke Cluster ID
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/sites/{}/spokeclusters/{}/operations".format(api_version,
tenant_id,
site_id,
spokecluster_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def spokeclusters_query(self, site_id, data, tenant_id=None, api_version="v2.0"):
"""
Query Spoke Clusters.
**Parameters:**:
- **site_id**: Site ID
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if | |
from itertools import chain
import logging
import plistlib
import random
from urllib.parse import urlencode
from django.contrib import messages
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.core.cache import cache
from django.core.exceptions import PermissionDenied
from django.core.files.storage import default_storage
from django.db.models import ProtectedError
from django.urls import reverse_lazy
from django.http import (FileResponse,
Http404,
HttpResponse, HttpResponseForbidden, HttpResponseNotFound, HttpResponseRedirect)
from django.shortcuts import get_object_or_404, redirect
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.functional import cached_property
from django.views.generic import DetailView, ListView, TemplateView, View
from django.views.generic.edit import CreateView, DeleteView, FormView, UpdateView
from zentral.contrib.inventory.exceptions import EnrollmentSecretVerificationFailed
from zentral.contrib.inventory.forms import EnrollmentSecretForm
from zentral.contrib.inventory.models import EnrollmentSecret, MachineTag, MetaMachine, Tag
from zentral.contrib.inventory.utils import verify_enrollment_secret
from zentral.core.stores import frontend_store, stores
from zentral.core.stores.views import EventsView, FetchEventsView, EventsStoreRedirectView
from zentral.utils.http import user_agent_and_ip_address_from_request
from zentral.utils.storage import file_storage_has_signed_urls
from zentral.utils.text import get_version_sort_key, shard as compute_shard, encode_args
from .conf import monolith_conf
from .events import (post_monolith_enrollment_event,
post_monolith_munki_request, post_monolith_repository_updates)
from .forms import (AddManifestCatalogForm, EditManifestCatalogForm, DeleteManifestCatalogForm,
AddManifestEnrollmentPackageForm,
AddManifestSubManifestForm, EditManifestSubManifestForm, DeleteManifestSubManifestForm,
EnrollmentForm,
ManifestForm, ManifestPrinterForm, ManifestSearchForm,
PkgInfoSearchForm,
SubManifestForm, SubManifestSearchForm,
SubManifestPkgInfoForm, SubManifestAttachmentForm, SubManifestScriptForm,
UploadPPDForm)
from .models import (MunkiNameError, parse_munki_name,
Catalog, CacheServer,
EnrolledMachine, Enrollment,
Manifest, ManifestEnrollmentPackage, PkgInfo, PkgInfoName,
Printer, PrinterPPD,
Condition,
SUB_MANIFEST_PKG_INFO_KEY_CHOICES, SubManifest, SubManifestAttachment, SubManifestPkgInfo)
from .utils import (build_configuration_plist, build_configuration_profile,
filter_catalog_data, filter_sub_manifest_data,
test_monolith_object_inclusion, test_pkginfo_catalog_inclusion)
logger = logging.getLogger('zentral.contrib.monolith.views')
# inventory machine subview
class InventoryMachineSubview:
template_name = "monolith/_inventory_machine_subview.html"
source_key = ("zentral.contrib.munki", "Munki")
err_message = None
enrolled_machine = None
def __init__(self, serial_number, user):
self.user = user
qs = (EnrolledMachine.objects.select_related("enrollment__manifest")
.filter(serial_number=serial_number).order_by("-created_at"))
count = qs.count()
if count > 1:
self.err_message = f"{count} machines found!!!"
if count > 0:
self.enrolled_machine = qs.first()
def render(self):
em = self.enrolled_machine
ctx = {"enrolled_machine": em,
"err_message": self.err_message}
if em and self.user.has_perms(ManifestMachineInfoView.permission_required):
manifest = em.enrollment.manifest
ctx["manifest"] = manifest
ctx["url"] = (reverse("monolith:manifest_machine_info", args=(manifest.pk,))
+ "?serial_number=" + em.serial_number)
return render_to_string(self.template_name, ctx)
# pkg infos
class PkgInfosView(PermissionRequiredMixin, TemplateView):
permission_required = "monolith.view_pkginfo"
template_name = "monolith/pkg_info_list.html"
def get_context_data(self, **kwargs):
ctx = super(PkgInfosView, self).get_context_data(**kwargs)
form = PkgInfoSearchForm(self.request.GET)
form.is_valid()
ctx['form'] = form
ctx['name_number'], ctx['info_number'], ctx['pkg_names'] = PkgInfo.objects.alles(**form.cleaned_data)
if not form.is_initial():
bc = [(reverse("monolith:pkg_infos"), "Monolith pkg infos"),
(None, "Search")]
else:
bc = [(None, "Monolith pkg infos")]
ctx["breadcrumbs"] = bc
return ctx
class UpdatePkgInfoCatalogView(PermissionRequiredMixin, UpdateView):
permission_required = "monolith.change_pkginfo"
model = PkgInfo
fields = ['catalogs']
def form_valid(self, form):
old_catalogs = set(self.model.objects.get(pk=self.object.pk).catalogs.all())
response = super().form_valid(form)
new_catalogs = set(self.object.catalogs.all())
if old_catalogs != new_catalogs:
attr_diff = {}
removed = old_catalogs - new_catalogs
if removed:
attr_diff["removed"] = sorted(str(c) for c in removed)
added = new_catalogs - old_catalogs
if added:
attr_diff["added"] = sorted(str(c) for c in added)
post_monolith_repository_updates(monolith_conf.repository,
[{"pkg_info": {"name": self.object.name.name,
"version": self.object.version,
"diff": {"catalogs": attr_diff}},
"type": "pkg_info",
"action": "updated"}],
self.request)
return response
class PkgInfoNameView(PermissionRequiredMixin, DetailView):
permission_required = "monolith.view_pkginfoname"
model = PkgInfoName
template_name = "monolith/pkg_info_name.html"
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
pkg_info_name = ctx["object"]
# events
if self.request.user.has_perms(("monolith.view_pkginfo", "monolith.view_pkginfoname")):
ctx["show_events_link"] = frontend_store.object_events
store_links = []
for store in stores.iter_events_url_store_for_user("object", self.request.user):
url = "{}?{}".format(
reverse("monolith:pkg_info_name_events_store_redirect", args=(self.object.pk,)),
urlencode({"es": store.name,
"tr": PkgInfoNameEventsView.default_time_range})
)
store_links.append((url, store.name))
ctx["store_links"] = store_links
# sub manifests
sub_manifests = []
for smpi in pkg_info_name.submanifestpkginfo_set.select_related("sub_manifest").order_by("sub_manifest__name"):
sub_manifests.append((smpi.sub_manifest, smpi.get_key_display()))
ctx["sub_manifests"] = sub_manifests
# pkg infos
_, _, pkg_name_list = PkgInfo.objects.alles(name_id=pkg_info_name.pk)
try:
ctx["pkg_infos"] = pkg_name_list[0]["pkg_infos"]
except IndexError:
# should never happen
logger.error("Could not get pkg infos for name ID %d", pkg_info_name.pk)
ctx["pkg_infos"] = []
# to display update catalog links or not
ctx["manual_catalog_management"] = monolith_conf.repository.manual_catalog_management
return ctx
class EventsMixin:
store_method_scope = "object"
def get_object(self, **kwargs):
return get_object_or_404(PkgInfoName, pk=kwargs["pk"])
def get_fetch_kwargs_extra(self):
return {"key": "munki_pkginfo_name", "val": encode_args((self.object.name,))}
def get_fetch_url(self):
return reverse("monolith:fetch_pkg_info_name_events", args=(self.object.pk,))
def get_redirect_url(self):
return reverse("monolith:pkg_info_name_events", args=(self.object.pk,))
def get_store_redirect_url(self):
return reverse("monolith:pkg_info_name_events_store_redirect", args=(self.object.pk,))
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["monolith"] = True
ctx["object"] = self.object
return ctx
class PkgInfoNameEventsView(EventsMixin, EventsView):
permission_required = ("monolith.view_pkginfo", "monolith.view_pkginfoname")
template_name = "monolith/pkg_info_name_events.html"
class FetchPkgInfoNameEventsView(EventsMixin, FetchEventsView):
permission_required = ("monolith.view_pkginfo", "monolith.view_pkginfoname")
class PkgInfoNameEventsStoreRedirectView(EventsMixin, EventsStoreRedirectView):
permission_required = ("monolith.view_pkginfo", "monolith.view_pkginfoname")
# PPDs
class PPDsView(PermissionRequiredMixin, ListView):
permission_required = "monolith.view_printerppd"
model = PrinterPPD
class UploadPPDView(PermissionRequiredMixin, CreateView):
permission_required = "monolith.add_printerppd"
model = PrinterPPD
form_class = UploadPPDForm
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["title"] = "Upload PPD file"
return ctx
class PPDView(PermissionRequiredMixin, DetailView):
permission_required = "monolith.view_printerppd"
model = PrinterPPD
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["printers"] = list(ctx["object"].printer_set.filter(trashed_at__isnull=True))
return ctx
# catalogs
class CatalogsView(PermissionRequiredMixin, ListView):
permission_required = "monolith.view_catalog"
model = Catalog
def get_queryset(self):
qs = super().get_queryset()
if not monolith_conf.repository.manual_catalog_management:
qs = qs.filter(archived_at__isnull=True)
return qs
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["manual_catalog_management"] = monolith_conf.repository.manual_catalog_management
if monolith_conf.repository.manual_catalog_management:
ctx["can_create_catalog"] = self.request.user.has_perm("monolith.add_catalog")
else:
ctx["can_create_catalog"] = False
return ctx
class CatalogView(PermissionRequiredMixin, DetailView):
permission_required = "monolith.view_catalog"
model = Catalog
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
catalog = ctx["object"]
# edit view
if monolith_conf.repository.manual_catalog_management:
ctx["edit_catalog_view"] = "monolith:update_catalog"
else:
ctx["edit_catalog_view"] = "monolith:update_catalog_priority"
# manifests
manifests = []
for mc in (catalog.manifestcatalog_set.select_related("manifest__meta_business_unit")
.prefetch_related("tags")
.all()
.order_by("manifest__meta_business_unit__name")):
manifests.append((mc.manifest, mc.tags.all()))
ctx["manifests"] = manifests
# pkg infos
ctx["pkg_infos"] = list(catalog.pkginfo_set.filter(archived_at__isnull=True))
return ctx
class ManualCatalogManagementRequiredMixin(PermissionRequiredMixin):
def dispatch(self, request, *args, **kwargs):
self.manual_catalog_management = monolith_conf.repository.manual_catalog_management
if not self.manual_catalog_management:
raise PermissionDenied("Automatic catalog management. "
"See configuration. "
"You can't create catalogs.")
return super().dispatch(request, *args, **kwargs)
class CreateCatalogView(ManualCatalogManagementRequiredMixin, CreateView):
permission_required = "monolith.add_catalog"
model = Catalog
fields = ['name', 'priority']
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['title'] = "Create catalog"
return ctx
def form_valid(self, form):
response = super().form_valid(form)
post_monolith_repository_updates(monolith_conf.repository,
[{"catalog": {"name": self.object.name,
"id": self.object.id,
"priority": self.object.priority},
"type": "catalog",
"action": "added"}],
self.request)
return response
class UpdateCatalogMixin(object):
def form_valid(self, form):
before_object = self.model.objects.get(pk=self.object.pk)
before = {f: getattr(before_object, f) for f in self.fields}
response = super().form_valid(form)
diff = {}
for f in self.fields:
before_val = before[f]
after_val = getattr(self.object, f)
if after_val != before_val:
diff[f] = {"removed": before_val,
"added": after_val}
if diff:
post_monolith_repository_updates(monolith_conf.repository,
[{"catalog": {"name": self.object.name,
"id": self.object.id,
"diff": diff},
"type": "catalog",
"action": "updated"}],
self.request)
return response
class UpdateCatalogView(ManualCatalogManagementRequiredMixin, UpdateCatalogMixin, UpdateView):
permission_required = "monolith.change_catalog"
model = Catalog
fields = ['name', 'priority']
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['title'] = "Update catalog {}".format(ctx["object"])
return ctx
class UpdateCatalogPriorityView(PermissionRequiredMixin, UpdateCatalogMixin, UpdateView):
permission_required = "monolith.change_catalog"
model = Catalog
fields = ['priority']
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['title'] = "Update catalog {} priority".format(ctx["object"])
return ctx
class DeleteCatalogView(PermissionRequiredMixin, DeleteView):
permission_required = "monolith.delete_catalog"
model = Catalog
success_url = reverse_lazy("monolith:catalogs")
def get_object(self, queryset=None):
obj = super().get_object(queryset)
if not obj.can_be_deleted():
raise Http404("Catalog {} can't be deleted".format(obj))
return obj
def delete(self, request, *args, **kwargs):
response = super().delete(request, *args, **kwargs)
post_monolith_repository_updates(monolith_conf.repository,
[{"catalog": {"name": self.object.name},
"type": "catalog",
"action": "deleted"}],
request)
return response
# conditions
class ConditionsView(PermissionRequiredMixin, ListView):
permission_required = "monolith.view_condition"
model = Condition
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['monolith'] = True
return context
class CreateConditionView(PermissionRequiredMixin, CreateView):
permission_required = "monolith.add_condition"
model = Condition
fields = ["name", "predicate"]
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['monolith'] = True
context['title'] = "Create condition"
return context
class ConditionView(PermissionRequiredMixin, DetailView):
permission_required = "monolith.view_condition"
model = Condition
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['monolith'] = True
condition = context["object"]
pkg_infos = []
for smp in condition.submanifestpkginfo_set.select_related("sub_manifest", "pkg_info_name"):
pkg_infos.append((smp.sub_manifest, smp.pkg_info_name.name,
smp.get_absolute_url(),
"repository package", smp.get_key_display()))
for sma in condition.submanifestattachment_set.select_related("sub_manifest"):
pkg_infos.append((sma.sub_manifest, sma.name,
sma.get_absolute_url(),
sma.get_type_display(), sma.get_key_display()))
pkg_infos.sort(key=lambda t: (t[0].name, t[1], t[3], t[4]))
context['pkg_infos'] = pkg_infos
return context
class UpdateConditionView(PermissionRequiredMixin, UpdateView):
permission_required = "monolith.change_condition"
model = Condition
fields = ["name", "predicate"]
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['monolith'] = True
condition = context["object"]
context['title'] = "Update condition {}".format(condition.name)
return context
def form_valid(self, form):
condition = form.save()
for manifest in condition.manifests():
manifest.bump_version()
return redirect(condition)
class DeleteConditionView(PermissionRequiredMixin, TemplateView):
permission_required = "monolith.delete_condition"
template_name = "monolith/condition_confirm_delete.html"
def dispatch(self, request, *args, **kwargs):
self.condition = get_object_or_404(Condition, pk=kwargs["pk"])
if not self.condition.can_be_deleted():
messages.warning(request, "This condition cannot be deleted")
return redirect(self.condition)
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["object"] = self.condition
return context
def post(self, request, *args, **kwargs):
try:
self.condition.delete()
except ProtectedError:
messages.warning(request, "This condition cannot be deleted")
return redirect(self.condition)
else:
return redirect("monolith:conditions")
# sub manifests
class SubManifestsView(PermissionRequiredMixin, ListView):
permission_required = "monolith.view_submanifest"
model = SubManifest
template_name = "monolith/sub_manifest_list.html"
paginate_by = 10
def get(self, request, *args, **kwargs):
self.form = SubManifestSearchForm(request.GET)
self.form.is_valid()
return super().get(request, *args, **kwargs)
def get_queryset(self):
return self.form.get_queryset()
def get_context_data(self, **kwargs):
context = super(SubManifestsView, self).get_context_data(**kwargs)
context['monolith'] = True
context['form'] = self.form
# pagination
page = context['page_obj']
if page.has_next():
qd = self.request.GET.copy()
qd['page'] = page.next_page_number()
context['next_url'] = "?{}".format(qd.urlencode())
if page.has_previous():
qd = self.request.GET.copy()
qd['page'] = page.previous_page_number()
context['previous_url'] = "?{}".format(qd.urlencode())
return context
class CreateSubManifestView(PermissionRequiredMixin, CreateView):
permission_required = "monolith.add_submanifest"
model = SubManifest
form_class = SubManifestForm
template_name = "monolith/edit_sub_manifest.html"
def get_context_data(self, **kwargs):
context = super(CreateSubManifestView, self).get_context_data(**kwargs)
context['monolith'] = True
return context
class SubManifestView(PermissionRequiredMixin, DetailView):
permission_required = "monolith.view_submanifest"
model = SubManifest
template_name = "monolith/sub_manifest.html"
def get_context_data(self, **kwargs):
context = super(SubManifestView, self).get_context_data(**kwargs)
sub_manifest = context['object']
context['monolith'] = True
pkg_info_dict = sub_manifest.pkg_info_dict(include_trashed_attachments=True)
keys = pkg_info_dict.pop("keys")
sorted_keys = []
for key, _ in SUB_MANIFEST_PKG_INFO_KEY_CHOICES:
value = keys.get(key, None)
if value:
sorted_keys.append((value['key_display'], value['key_list']))
context["keys"] = sorted_keys
| |
recurrence.
:type from_property: ~datetime.datetime
:param to: The end date of recurrence.
:type to: ~datetime.datetime
"""
_validation = {
'from_property': {'required': True},
}
_attribute_map = {
'from_property': {'key': 'from', 'type': 'iso-8601'},
'to': {'key': 'to', 'type': 'iso-8601'},
}
def __init__(
self,
*,
from_property: datetime.datetime,
to: Optional[datetime.datetime] = None,
**kwargs
):
super(ExportRecurrencePeriod, self).__init__(**kwargs)
self.from_property = from_property
self.to = to
class ExportSchedule(msrest.serialization.Model):
"""The schedule associated with the export.
All required parameters must be populated in order to send to Azure.
:param status: The status of the export's schedule. If 'Inactive', the export's schedule is
paused. Possible values include: "Active", "Inactive".
:type status: str or ~azure.mgmt.costmanagement.models.StatusType
:param recurrence: Required. The schedule recurrence. Possible values include: "Daily",
"Weekly", "Monthly", "Annually".
:type recurrence: str or ~azure.mgmt.costmanagement.models.RecurrenceType
:param recurrence_period: Has start and end date of the recurrence. The start date must be in
future. If present, the end date must be greater than start date.
:type recurrence_period: ~azure.mgmt.costmanagement.models.ExportRecurrencePeriod
"""
_validation = {
'recurrence': {'required': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'recurrence': {'key': 'recurrence', 'type': 'str'},
'recurrence_period': {'key': 'recurrencePeriod', 'type': 'ExportRecurrencePeriod'},
}
def __init__(
self,
*,
recurrence: Union[str, "RecurrenceType"],
status: Optional[Union[str, "StatusType"]] = None,
recurrence_period: Optional["ExportRecurrencePeriod"] = None,
**kwargs
):
super(ExportSchedule, self).__init__(**kwargs)
self.status = status
self.recurrence = recurrence
self.recurrence_period = recurrence_period
class ExportTimePeriod(msrest.serialization.Model):
"""The date range for data in the export. This should only be specified with timeFrame set to 'Custom'. The maximum date range is 3 months.
All required parameters must be populated in order to send to Azure.
:param from_property: Required. The start date for export data.
:type from_property: ~datetime.datetime
:param to: Required. The end date for export data.
:type to: ~datetime.datetime
"""
_validation = {
'from_property': {'required': True},
'to': {'required': True},
}
_attribute_map = {
'from_property': {'key': 'from', 'type': 'iso-8601'},
'to': {'key': 'to', 'type': 'iso-8601'},
}
def __init__(
self,
*,
from_property: datetime.datetime,
to: datetime.datetime,
**kwargs
):
super(ExportTimePeriod, self).__init__(**kwargs)
self.from_property = from_property
self.to = to
class ForecastDataset(msrest.serialization.Model):
"""The definition of data present in the forecast.
:param granularity: The granularity of rows in the forecast. Possible values include: "Daily".
:type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType
:param configuration: Has configuration information for the data in the export. The
configuration will be ignored if aggregation and grouping are provided.
:type configuration: ~azure.mgmt.costmanagement.models.QueryDatasetConfiguration
:param aggregation: Dictionary of aggregation expression to use in the forecast. The key of
each item in the dictionary is the alias for the aggregated column. forecast can have up to 2
aggregation clauses.
:type aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation]
:param filter: Has filter expression to use in the forecast.
:type filter: ~azure.mgmt.costmanagement.models.QueryFilter
"""
_attribute_map = {
'granularity': {'key': 'granularity', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'QueryDatasetConfiguration'},
'aggregation': {'key': 'aggregation', 'type': '{QueryAggregation}'},
'filter': {'key': 'filter', 'type': 'QueryFilter'},
}
def __init__(
self,
*,
granularity: Optional[Union[str, "GranularityType"]] = None,
configuration: Optional["QueryDatasetConfiguration"] = None,
aggregation: Optional[Dict[str, "QueryAggregation"]] = None,
filter: Optional["QueryFilter"] = None,
**kwargs
):
super(ForecastDataset, self).__init__(**kwargs)
self.granularity = granularity
self.configuration = configuration
self.aggregation = aggregation
self.filter = filter
class ForecastDefinition(msrest.serialization.Model):
"""The definition of a forecast.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of the forecast. Possible values include: "Usage",
"ActualCost", "AmortizedCost".
:type type: str or ~azure.mgmt.costmanagement.models.ForecastType
:param timeframe: Required. The time frame for pulling data for the forecast. If custom, then a
specific time period must be provided. Possible values include: "MonthToDate",
"BillingMonthToDate", "TheLastMonth", "TheLastBillingMonth", "WeekToDate", "Custom".
:type timeframe: str or ~azure.mgmt.costmanagement.models.ForecastTimeframeType
:param time_period: Has time period for pulling data for the forecast.
:type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod
:param dataset: Has definition for data in this forecast.
:type dataset: ~azure.mgmt.costmanagement.models.ForecastDataset
:param include_actual_cost: a boolean determining if actualCost will be included.
:type include_actual_cost: bool
:param include_fresh_partial_cost: a boolean determining if FreshPartialCost will be included.
:type include_fresh_partial_cost: bool
"""
_validation = {
'type': {'required': True},
'timeframe': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'timeframe': {'key': 'timeframe', 'type': 'str'},
'time_period': {'key': 'timePeriod', 'type': 'QueryTimePeriod'},
'dataset': {'key': 'dataset', 'type': 'ForecastDataset'},
'include_actual_cost': {'key': 'includeActualCost', 'type': 'bool'},
'include_fresh_partial_cost': {'key': 'includeFreshPartialCost', 'type': 'bool'},
}
def __init__(
self,
*,
type: Union[str, "ForecastType"],
timeframe: Union[str, "ForecastTimeframeType"],
time_period: Optional["QueryTimePeriod"] = None,
dataset: Optional["ForecastDataset"] = None,
include_actual_cost: Optional[bool] = None,
include_fresh_partial_cost: Optional[bool] = None,
**kwargs
):
super(ForecastDefinition, self).__init__(**kwargs)
self.type = type
self.timeframe = timeframe
self.time_period = time_period
self.dataset = dataset
self.include_actual_cost = include_actual_cost
self.include_fresh_partial_cost = include_fresh_partial_cost
class KpiProperties(msrest.serialization.Model):
"""Each KPI must contain a 'type' and 'enabled' key.
:param type: KPI type (Forecast, Budget). Possible values include: "Forecast", "Budget".
:type type: str or ~azure.mgmt.costmanagement.models.KpiType
:param id: ID of resource related to metric (budget).
:type id: str
:param enabled: show the KPI in the UI?.
:type enabled: bool
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'enabled': {'key': 'enabled', 'type': 'bool'},
}
def __init__(
self,
*,
type: Optional[Union[str, "KpiType"]] = None,
id: Optional[str] = None,
enabled: Optional[bool] = None,
**kwargs
):
super(KpiProperties, self).__init__(**kwargs)
self.type = type
self.id = id
self.enabled = enabled
class Operation(msrest.serialization.Model):
"""A Cost management REST API operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Operation name: {provider}/{resource}/{operation}.
:vartype name: str
:param display: The object that represents the operation.
:type display: ~azure.mgmt.costmanagement.models.OperationDisplay
"""
_validation = {
'name': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
}
def __init__(
self,
*,
display: Optional["OperationDisplay"] = None,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = None
self.display = display
class OperationDisplay(msrest.serialization.Model):
"""The object that represents the operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provider: Service provider: Microsoft.CostManagement.
:vartype provider: str
:ivar resource: Resource on which the operation is performed: Dimensions, Query.
:vartype resource: str
:ivar operation: Operation type: Read, write, delete, etc.
:vartype operation: str
"""
_validation = {
'provider': {'readonly': True},
'resource': {'readonly': True},
'operation': {'readonly': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = None
self.resource = None
self.operation = None
class OperationListResult(msrest.serialization.Model):
"""Result of listing cost management operations. It contains a list of operations and a URL link to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of cost management operations supported by the Microsoft.CostManagement
resource provider.
:vartype value: list[~azure.mgmt.costmanagement.models.Operation]
:ivar next_link: URL to get the next set of operation list results if there are any.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class PivotProperties(msrest.serialization.Model):
"""Each pivot must contain a 'type' and 'name'.
:param type: Data type to show in view. Possible values include: "Dimension", "TagKey".
:type type: str or ~azure.mgmt.costmanagement.models.PivotType
:param name: Data field to show in view.
:type name: str
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
type: Optional[Union[str, "PivotType"]] = None,
name: Optional[str] = None,
**kwargs
):
super(PivotProperties, self).__init__(**kwargs)
self.type = type
self.name = name
class QueryAggregation(msrest.serialization.Model):
"""The aggregation expression to be used in the query.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the column to aggregate.
:type name: str
:param function: Required. The name of the aggregation function to use. Possible values
include: "Sum".
:type function: str or ~azure.mgmt.costmanagement.models.FunctionType
"""
_validation = {
'name': {'required': True},
'function': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'function': {'key': 'function', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
function: Union[str, "FunctionType"],
**kwargs
):
super(QueryAggregation, self).__init__(**kwargs)
self.name = name
self.function = function
class QueryColumn(msrest.serialization.Model):
"""QueryColumn.
:param name: The name of column.
:type name: str
:param type: The type | |
shape to use.
use_latest_image (bool): Whether to use the latest compute image
compartment_id (str): OCID of the parent compartment.
config (dict): An OCI config object or None
config_profile (str): The name of an OCI config profile
interactive (bool): Indicates whether to execute in interactive mode
raise_exceptions (bool): If true exceptions are raised
return_formatted (bool): If true a human readable string is returned
return_python_object (bool): Used for internal plugin calls
Returns:
a compute image object
"""
operating_system = kwargs.get("operating_system")
operating_system_version = kwargs.get("operating_system_version")
image_caption = kwargs.get("image_caption")
shape = kwargs.get("shape")
use_latest_image = kwargs.get("use_latest_image", False)
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
config_profile = kwargs.get("config_profile")
interactive = kwargs.get("interactive", core.get_interactive_default())
raise_exceptions = kwargs.get("raise_exceptions", not interactive)
return_formatted = kwargs.get("return_formatted", interactive)
return_python_object = kwargs.get("return_python_object", False)
# Get the active config and compartment
try:
# Get the active config and compartment
config = configuration.get_current_config(
config=config, config_profile=config_profile,
interactive=interactive)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
images = list_images(
operating_system=operating_system,
operating_system_version=operating_system_version,
image_caption=image_caption,
shape=shape,
compartment_id=compartment_id, config=config,
interactive=interactive, raise_exceptions=raise_exceptions,
return_python_object=True)
if len(images) == 0:
raise ValueError(
"No compute image found using the given parameters."
"Operation cancelled.")
# If there is only one image, return it
image = None
if len(images) == 1 or use_latest_image or not interactive:
image = images[0]
else:
# Let the user choose from the image list
print(f"\nPlease choose a compute image from this list.\n")
image = core.prompt_for_list_item(
item_list=images, prompt_caption=(
"Please enter the name or index of the "
"compute image: "),
item_name_property="display_name",
given_value=image_caption,
print_list=True)
return core.return_oci_object(
oci_object=image,
return_formatted=return_formatted,
return_python_object=return_python_object,
format_function=format_compute_images)
except Exception as e:
if raise_exceptions:
raise
print(f"ERROR: {str(e)}")
@plugin_function('mds.get.computeImageId')
def get_image_id(operating_system=None, operating_system_version=None,
image_caption=None, shape=None, compartment_id=None,
config=None, interactive=True):
"""Gets a compute image id
Args:
operating_system (str): The name of the operating system
operating_system_version (str): The version of the operating system
image_caption (str): The caption of the compute image to use
shape (str): The name of the shape to use.
compartment_id (str): OCID of the parent compartment.
config (object): An OCI config object or None.
interactive (bool): Whether expections are raised
Returns:
an OCID
"""
image = get_image(
operating_system=operating_system,
operating_system_version=operating_system_version,
image_caption=operating_system_version, shape=shape,
compartment_id=compartment_id, config=config,
interactive=interactive)
return None if image is None else image.id
def format_vnic_listing(vnics):
"""Returns a formated list of vnics.
Args:
vnics (list): A list of vnics objects.
Returns:
The formated list as string
"""
import re
out = ""
i = 1
for v in vnics:
# Shorten to 24 chars max, remove linebreaks
name = re.sub(r'[\n\r]', ' ',
v.display_name[:22] + '..'
if len(v.display_name) > 24
else v.display_name)
private_ip = v.private_ip if v.private_ip else ""
public_ip = v.public_ip if v.public_ip else ""
out += (f"{i:>4} {name:24} {private_ip:15} {public_ip:15} "
f"{v.lifecycle_state[:8]:8} {v.time_created:%Y-%m-%d %H:%M}\n")
i += 1
return out
@plugin_function('mds.list.computeInstanceVnics')
def list_vnics(**kwargs):
"""Lists all available vnics for the given compartment and
availability_domain
Args:
**kwargs: Optional parameters
Keyword Args:
instance_id (str): OCID of the compute instance
availability_domain (str): The name of the availability_domain to use.
ignore_current (bool): Whether the current instance should be ignored
compartment_id (str): OCID of the parent compartment
config (dict): An OCI config object or None
config_profile (str): The name of an OCI config profile
interactive (bool): Indicates whether to execute in interactive mode
raise_exceptions (bool): If true exceptions are raised
return_formatted (bool): If true a human readable string is returned
return_python_object (bool): Used for internal plugin calls
Returns:
The list of shapes in either JSON or human readable format
"""
instance_id = kwargs.get("instance_id")
availability_domain = kwargs.get("availability_domain")
ignore_current = kwargs.get("ignore_current", False)
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
config_profile = kwargs.get("config_profile")
interactive = kwargs.get("interactive", core.get_interactive_default())
raise_exceptions = kwargs.get("raise_exceptions", not interactive)
return_formatted = kwargs.get("return_formatted", interactive)
return_python_object = kwargs.get("return_python_object", False)
try:
config = configuration.get_current_config(
config=config, config_profile=config_profile,
interactive=interactive)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
current_instance_id = configuration.get_current_instance_id(
config=config)
if not ignore_current and not instance_id:
instance_id = current_instance_id
if not instance_id and interactive:
instance_id = get_instance_id(
compartment_id=compartment_id,
config=config, interactive=interactive)
if not instance_id:
raise ValueError("No instance_id given."
"Cancelling operation")
import oci.exceptions
try:
# Initialize the oci client
compute = core.get_oci_compute_client(config=config)
network = core.get_oci_virtual_network_client(config=config)
vnic_attachments = compute.list_vnic_attachments(
compartment_id=compartment_id,
availability_domain=availability_domain,
instance_id=instance_id).data
vnics = []
for vnic_att in vnic_attachments:
vnics.append(network.get_vnic(vnic_att.vnic_id).data)
return core.return_oci_object(
oci_object=vnics,
return_formatted=return_formatted,
return_python_object=return_python_object,
format_function=format_vnic_listing)
except oci.exceptions.ServiceError as e:
if raise_exceptions:
raise
print(f'ERROR: {e.message}. (Code: {e.code}; Status: {e.status})')
except Exception as e:
if raise_exceptions:
raise
print(f'ERROR: {e}')
@plugin_function('mds.create.computeInstance')
def create_instance(**kwargs):
"""Creates a new compute instance
This function will create a new compartment.
Args:
**kwargs: Additional options
Keyword Args:
instance_name (str): The name used for the new compartment.
availability_domain (str): The name of the availability_domain to use
shape (str): The compute shape used for the instance
subnet_id (str): The OCID of the subnet to use
public_subnet (bool): Whether the subnet should be public or private
operating_system (str): The name of the operating system,
e.g. "Oracle Linux"
operating_system_version (str): The version of the operating system,
e.g. 8
use_latest_image (bool): Whether to use the latest compute image
ssh_public_key_path (str): The path to the public ssh key,
default is ~/.ssh/id_rsa.pub
init_script (str): A string holding the commands to execute at first
instance startup, starting with #!/bin/bash and separated by
linebreaks
init_script_file_path (str): The path to an init script to be
executed at first instance startup. If specified, this file
will be used instead of the script passed in the init_script
parameter
defined_tags (dict): The defined_tags of the dynamic group.
freeform_tags (dict): The freeform_tags of the dynamic group
compartment_id (str): OCID of the parent compartment.
config (object): An OCI config object or None.
config_profile (str): The name of an OCI config profile
interactive (bool): Indicates whether to execute in interactive mode
raise_exceptions (bool): If true exceptions are raised
return_formatted (bool): If true a human readable string is returned
return_python_object (bool): Used for internal plugin calls
Returns:
The new instance if interactive is set to false
"""
instance_name = kwargs.get("instance_name")
availability_domain = kwargs.get("availability_domain")
shape = kwargs.get("shape")
subnet_id = kwargs.get("subnet_id")
public_subnet = kwargs.get("public_subnet")
operating_system = kwargs.get("operating_system")
operating_system_version = kwargs.get("operating_system_version")
use_latest_image = kwargs.get("use_latest_image", False)
ssh_public_key_path = kwargs.get(
"ssh_public_key_path", "~/.ssh/id_rsa.pub")
init_script = kwargs.get("init_script")
init_script_file_path = kwargs.get("init_script_file_path")
defined_tags = kwargs.get("defined_tags")
# Manual conversion from Shell Dict type until this is automatically done
if defined_tags:
defined_tags = dict(defined_tags)
freeform_tags = kwargs.get("freeform_tags")
# Manual conversion from Shell Dict type until this is automatically done
if freeform_tags:
freeform_tags = dict(freeform_tags)
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
config_profile = kwargs.get("config_profile")
interactive = kwargs.get("interactive", core.get_interactive_default())
raise_exceptions = kwargs.get("raise_exceptions", not interactive)
return_formatted = kwargs.get("return_formatted", interactive)
return_python_object = kwargs.get("return_python_object", False)
# Get the active config and compartment
try:
# Get the active config and compartment
config = configuration.get_current_config(
config=config, config_profile=config_profile,
interactive=interactive)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
import oci.core.models
import oci.exceptions
import oci.pagination
import os.path
import pathlib
import base64
from pathlib import Path
import mysqlsh
from mds_plugin import compartment
from mds_plugin import network
try:
if interactive:
print("Creating a new compute instance ...\n")
# Get a name
if instance_name is None and interactive:
instance_name = mysqlsh.globals.shell.prompt(
"Please enter the name for the new instance: ",
{'defaultValue': ''}).strip()
if not instance_name:
raise ValueError(
"No instance name given. Operation cancelled.")
# Get the availability_domain name
availability_domain_obj = compartment.get_availability_domain(
compartment_id=compartment_id,
availability_domain=availability_domain,
random_selection=True,
config=config,
interactive=interactive,
return_python_object=True)
if availability_domain_obj is None:
raise ValueError("No availability domain given. "
"Operation cancelled.")
else:
availability_domain = availability_domain_obj.name
if interactive:
print(f"Using availability domain {availability_domain}.")
# Get list of available shapes
shape_name = get_shape_name(
shape_name=shape, compartment_id=compartment_id,
availability_domain=availability_domain, config=config,
interactive=interactive)
if not shape_name:
print("Operation cancelled.")
return
if interactive:
print(f"Using shape {shape_name}.")
# Get id of compute image
image = get_image(
operating_system=operating_system,
operating_system_version=operating_system_version,
use_latest_image=use_latest_image,
shape=shape_name, compartment_id=compartment_id, config=config,
interactive=interactive,
return_python_object=True)
if image is None:
print("Operation cancelled.")
return
image_id = image.id
if interactive:
print(f"Using image {image.display_name}.")
# Convert Unix path to Windows
ssh_public_key_path = os.path.abspath(
os.path.expanduser(ssh_public_key_path))
# Check if there is a key available
if os.path.exists(ssh_public_key_path):
with open(ssh_public_key_path, mode='r') as file:
public_key = file.read()
else:
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
import stat
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
# cSpell:ignore PKCS
private_key = key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption())
public_key = key.public_key().public_bytes(
serialization.Encoding.OpenSSH,
serialization.PublicFormat.OpenSSH
)
# Build ssh_private_key_path from ssh_public_key_path by
# removing extension
| |
= (m_det_dis_desired, f_det_dis_desired, num_attenuators)
direction = _s(d.metadata.get("he3_back.direction", "UNPOLARIZED"))
if direction != "UNPOLARIZED" and (scan_id - previous_scan_id) == 1:
p = previous_transmission
#print('previous transmission: ', p)
#print(p.get("CellTimeIdentifier", None), tstart,
# p.get("m_det_dis_desired", None), m_det_dis_desired,
# p.get("f_det_dis_desired", None), f_det_dis_desired,
# p.get("num_attenuators", None), num_attenuators)
if p.get("CellTimeIdentifier", None) == cellstart and \
p.get("m_det_dis_desired", None) == m_det_dis_desired and \
p.get("f_det_dis_desired", None) == f_det_dis_desired and \
p.get("num_attenuators", None) == num_attenuators:
p["HE3_IN_file"] = filename
p["HE3_IN_counts"] = detector_counts
p["HE3_IN_count_time"] = count_time
p["HE3_IN_mon"] = monitor_counts
p["HE3_IN_timestamp"] = middle_timestamp
if t_key in BlockedBeams:
bb = BlockedBeams[t_key]
BlockBeamRate = bb['counts_per_second']
BlockBeam_filename = bb['filename']
else:
BlockBeamRate = 0
BlockBeam_filename = "missing"
p["BlockedBeam_filename"] = BlockBeam_filename
HE3_transmission_IN = (p["HE3_IN_counts"] - BlockBeamRate*p["HE3_IN_count_time"])/p["HE3_IN_mon"]
HE3_transmission_OUT = (p["HE3_OUT_counts"] - BlockBeamRate*p["HE3_OUT_count_time"])/p["HE3_OUT_mon"]
HE3_transmission = HE3_transmission_IN / HE3_transmission_OUT
p['transmission'] = HE3_transmission
p['atomic_pol'] = np.arccosh(HE3_transmission / (Te * np.exp(-Mu))) / Mu
mapping_trans.append(deepcopy(p))
else:
previous_transmission = {
"CellTimeIdentifier": cellstart,
"HE3_OUT_file": filename,
"HE3_OUT_counts": detector_counts,
"HE3_OUT_count_time": count_time,
"HE3_OUT_mon": monitor_counts,
"m_det_dis_desired": m_det_dis_desired,
"f_det_dis_desired": f_det_dis_desired,
"num_attenuators": num_attenuators
}
previous_scan_id = scan_id
# catch back-to-back
bb_out = _toDictItem(list(BlockedBeams.values()))
trans_1d = []
atomic_pol_1d = []
for m in mappings.values():
transmissions = []
atomic_pols = []
timestamps = []
for c in m["Transmissions"]:
t = c['transmission']
ap = c['atomic_pol']
if t > 0:
transmissions.append(t)
atomic_pols.append(ap)
timestamps.append(c['HE3_IN_timestamp'])
x = np.array(timestamps)
x0 = m['Insert_time']/1000.0
xa = (x-x0)/(3600)
dx = np.zeros_like(x)
v = np.array(transmissions)
dv = np.zeros_like(v)
va = np.array(atomic_pols)
dva = np.zeros_like(va)
if (len(timestamps) > 1):
ginv, logP = np.polyfit(xa, np.log(va), 1)
m['P0'] = np.exp(logP)
m['Gamma'] = -1/ginv
else:
m['P0'] = va[0]
ordering = np.argsort(x)
trans_1d.append(VSans1dData(x[ordering] - x0, v[ordering], dx=dx, dv=dv, xlabel="timestamp (s)", vlabel="Transmission", metadata={"title": _s(m["Cell_name"])}))
atomic_pol_1d.append(VSans1dData(xa[ordering], va[ordering], dx=dx, dv=dva, xlabel="timestamp (h)", vlabel="Atomic Polarization", metadata={"title": _s(m["Cell_name"])}))
return he3data, trans_1d, atomic_pol_1d, [Parameters({"cells": mappings, "blocked_beams": bb_out})]
def get_transmission_sum(detectors, panel_name="auto"):
from .vsansdata import short_detectors
total_counts = -np.inf
if panel_name == 'auto':
for sn in short_detectors:
detname = "detector_{sn}".format(sn=sn)
if 'data' in detectors[detname]:
counts = detectors[detname]['data']['value'].sum()
if counts > total_counts:
total_counts = counts
else:
detname = "detector_{sn}".format(sn=panel_name)
total_counts = detectors[detname]['data']['value'].sum()
return total_counts
@nocache
@module
def patch(data, patches=None):
"""
loads a data file into a VSansData obj and returns that.
**Inputs**
data (raw): datafiles with metadata to patch
patches (patch_metadata[]:run.filename): patches to be applied, with run.filename used as unique key
**Returns**
patched (raw): datafiles with patched metadata
2019-07-26 <NAME>
"""
if patches is None:
return data
from jsonpatch import JsonPatch
from collections import OrderedDict
# make a master dict of metadata from provided key:
key="run.filename"
master = OrderedDict([(_s(d.metadata[key]), d.metadata) for d in data])
to_apply = JsonPatch(patches)
to_apply.apply(master, in_place=True)
return data
@nocache
@module
def sort_sample(raw_data):
"""
categorize data files
**Inputs**
raw_data (raw[]): datafiles in
**Returns**
blocked_beam (raw[]): datafiles with "blocked beam" intent
2018-04-27 <NAME>
"""
blocked_beam = [f for f in raw_data if _s(f.metadata.get('analysis.intent', '')).lower().startswith('bl')]
return blocked_beam
@nocache
@module
def calculate_XY(raw_data, solid_angle_correction=True):
"""
from embedded detector metadata, calculates the x,y,z values for each detector.
**Inputs**
raw_data (raw): raw datafiles
solid_angle_correction (bool): Divide by solid angle
**Returns**
realspace_data (realspace): datafiles with realspace information
| 2018-04-28 <NAME>
| 2019-09-19 Added monitor normalization
| 2019-09-22 Separated monitor and dOmega norm
| 2020-10-02 <NAME> ignore back detector when data missing
"""
from .vsansdata import VSansDataRealSpace, short_detectors
from collections import OrderedDict
metadata = deepcopy(raw_data.metadata)
monitor_counts = metadata['run.moncnt']
new_detectors = OrderedDict()
for sn in short_detectors:
detname = 'detector_{short_name}'.format(short_name=sn)
det = deepcopy(raw_data.detectors[detname])
dimX = int(det['pixel_num_x']['value'][0])
dimY = int(det['pixel_num_y']['value'][0])
z_offset = det.get('setback', {"value": [0.0]})['value'][0]
z = det['distance']['value'][0] + z_offset
if sn == "B":
# special handling for back detector
total = det['integrated_count']['value'][0] if 'integrated_count' in det else 0
if total < 1:
# don't load the back detector if it has no counts (turned off)
continue
beam_center_x_pixels = det['beam_center_x']['value'][0] # in pixels
beam_center_y_pixels = det['beam_center_y']['value'][0]
cal_x = det['cal_x']['value'] # in cm
cal_y = det['cal_y']['value']
x_pixel_size = cal_x[0] # cm
y_pixel_size = cal_y[0] # cm
beam_center_x = x_pixel_size * beam_center_x_pixels
beam_center_y = y_pixel_size * beam_center_y_pixels
# lateral_offset = det['lateral_offset']['value'][0] # # already cm
realDistX = 0.5 * x_pixel_size
realDistY = 0.5 * y_pixel_size
data = det['data']['value']
if 'linear_data_error' in det and 'value' in det['linear_data_error']:
data_variance = np.sqrt(det['linear_data_error']['value'])
else:
data_variance = data
udata = Uncertainty(data, data_variance)
else:
orientation = det['tube_orientation']['value'][0].decode().upper()
coeffs = det['spatial_calibration']['value']
lateral_offset = 0
vertical_offset = 0
beam_center_x = det['beam_center_x']['value'][0]
beam_center_y = det['beam_center_y']['value'][0]
panel_gap = det['panel_gap']['value'][0]/10.0 # mm to cm
if (orientation == "VERTICAL"):
x_pixel_size = det['x_pixel_size']['value'][0] / 10.0 # mm to cm
y_pixel_size = coeffs[1][0] / 10.0 # mm to cm
lateral_offset = det['lateral_offset']['value'][0] # # already cm
else:
x_pixel_size = coeffs[1][0] / 10.0
y_pixel_size = det['y_pixel_size']['value'][0] / 10.0 # mm to cm
vertical_offset = det['vertical_offset']['value'][0] # already cm
#solid_angle_correction = z*z / 1e6
data = det['data']['value']
if 'linear_data_error' in det and 'value' in det['linear_data_error']:
data_variance = np.sqrt(det['linear_data_error']['value'])
else:
data_variance = data
udata = Uncertainty(data, data_variance)
position_key = sn[-1]
if position_key == 'T':
# FROM IGOR: (q,p = 0 for lower-left pixel)
# if(cmpstr("T",detStr[1]) == 0)
# data_realDistY[][] = tube_width*(q+1/2) + offset + gap/2
# data_realDistX[][] = coefW[0][q] + coefW[1][q]*p + coefW[2][q]*p*p
realDistX = coeffs[0][0]/10.0 # to cm
realDistY = 0.5 * y_pixel_size + vertical_offset + panel_gap/2.0
elif position_key == 'B':
# FROM IGOR: (q,p = 0 for lower-left pixel)
# if(cmpstr("B",detStr[1]) == 0)
# data_realDistY[][] = offset - (dimY - q - 1/2)*tube_width - gap/2
# data_realDistX[][] = coefW[0][q] + coefW[1][q]*p + coefW[2][q]*p*p
realDistX = coeffs[0][0]/10.0
realDistY = vertical_offset - (dimY - 0.5)*y_pixel_size - panel_gap/2.0
elif position_key == 'L':
# FROM IGOR: (q,p = 0 for lower-left pixel)
# if(cmpstr("L",detStr[1]) == 0)
# data_realDistY[][] = coefW[0][p] + coefW[1][p]*q + coefW[2][p]*q*q
# data_realDistX[][] = offset - (dimX - p - 1/2)*tube_width - gap/2
realDistX = lateral_offset - (dimX - 0.5)*x_pixel_size - panel_gap/2.0
realDistY = coeffs[0][0]/10.0
elif position_key == 'R':
# FROM IGOR: (q,p = 0 for lower-left pixel)
# data_realDistY[][] = coefW[0][p] + coefW[1][p]*q + coefW[2][p]*q*q
# data_realDistX[][] = tube_width*(p+1/2) + offset + gap/2
realDistX = x_pixel_size*(0.5) + lateral_offset + panel_gap/2.0
realDistY = coeffs[0][0]/10.0
#x_pos = size_x/2.0 # place panel with lower-right corner at center of view
#y_pos = size_y/2.0 #
x0_pos = realDistX - beam_center_x # then move it the 'real' distance away from the origin,
y0_pos = realDistY - beam_center_y # which is the beam center
#metadata['det_' + short_name + '_x0_pos'] = x0_pos
#metadata['det_' + short_name + '_y0_pos'] = y0_pos
X,Y = np.indices((dimX, dimY))
X = X * x_pixel_size + x0_pos
Y = Y * y_pixel_size + y0_pos
det['data'] = udata
det['X'] = X
det['dX'] = x_pixel_size
det['Y'] = Y
det['dY'] = y_pixel_size
det['Z'] = z
det['dOmega'] = x_pixel_size * y_pixel_size / z**2
if solid_angle_correction:
det['data'] /= det['dOmega']
new_detectors[detname] = det
output = VSansDataRealSpace(metadata=metadata, detectors=new_detectors)
return output
@cache
@module
def oversample_XY(realspace_data, oversampling=3, exclude_back_detector=True):
"""
Split each pixel into subpixels in realspace
**Inputs**
realspace_data (realspace): data in XY coordinates
oversampling (int): how many subpixels to create along x and y
(e.g. oversampling=3 results in 9 subpixels per input pixel)
exclude_back_detector {exclude back detector} (bool): Skip oversampling for the back detector when true
**Returns**
oversampled (realspace): datasets with oversampled pixels
| 2019-10-29 <NAME>
"""
from .vsansdata import short_detectors
rd = realspace_data.copy()
for sn in short_detectors:
detname = 'detector_{short_name}'.format(short_name=sn)
if detname == 'detector_B' and exclude_back_detector:
continue
if not detname in rd.detectors:
continue
det = rd.detectors[detname]
X = det['X']
Y = det['Y']
dX = det['dX']
dY = det['dY']
x_min = X.min() - dX/2.0
y_min = Y.min() - dY/2.0
data = det['data']
dimX, dimY = data.shape
dimX *= oversampling
dimY *= oversampling
dX /= oversampling
dY /= oversampling
X,Y = np.indices((dimX, dimY))
X = X * dX + x_min + dX/2.0
Y = Y * dY + y_min + dY/2.0
det['data'] = np.repeat(np.repeat(data, oversampling, 0), oversampling, 1) / oversampling**2
det['X'] = X
det['dX'] = dX
det['Y'] = Y
det['dY'] = dY
det['dOmega'] /= oversampling**2
det['oversampling'] = det.get('oversampling', 1.0) * oversampling
return rd
@module
def monitor_normalize(qdata, mon0=1e8):
""""
Given a SansData object, normalize the data to the provided monitor
**Inputs**
qdata (qspace): data in
mon0 (float): provided monitor
**Returns**
| |
posSair[1], 100, 40))
pygame.draw.rect(tela, cor["preto"], (posSair[0], posSair[1], 100, 40), 1)
tela.blit(aviso, (largura // 2 - 155, altura // 2 - 120))
tela.blit(reiniciar, (posReiniciar[0] + 5, posReiniciar[1] + 10))
tela.blit(sair, (posSair[0] + 30, posSair[1] + 10))
pygame.display.update()
return 1
def pausa(self):
aviso = self.fonteAviso.render("Pausa", True, cor["preto"])
reiniciar = self.fonte.render("Reiniciar", True, cor["preto"])
continuar = self.fonte.render("Continuar", True, cor["preto"])
sair = self.fonte.render("Sair", True, cor["preto"])
posContinuar = [largura // 2 - 55, altura // 2 - 50]
posReiniciar = [largura // 2 - 165, altura // 2 - 50]
posSair = [largura // 2 + 65, altura // 2 - 50]
sairPausa = False
while sairPausa != True:
pygame.draw.rect(tela, (189, 196, 202), (largura // 2 - 180, altura // 2 - 150, 360, 160))
pygame.draw.rect(tela, cor["preto"], (largura // 2 - 180, altura // 2 - 150, 360, 160), 3)
mouse = pygame.mouse.get_pos()
clique = pygame.mouse.get_pressed()
for event in pygame.event.get():
if event.type == pygame.QUIT:
sairPausa = True
return 1
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_p:
sairPausa = True
return 0
if posContinuar[0] + 110 > mouse[0] > posContinuar[0] and posContinuar[1] + 40 > mouse[1] > posContinuar[1]:
pygame.draw.rect(tela, cor["azul"], (posContinuar[0], posContinuar[1], 110, 40))
pygame.draw.rect(tela, cor["preto"], (posContinuar[0], posContinuar[1], 110, 40), 1)
if clique[0] == 1:
sairPausa = True
return 0
else:
pygame.draw.rect(tela, cor["azul_escuro"], (posContinuar[0], posContinuar[1], 110, 40))
pygame.draw.rect(tela, cor["preto"], (posContinuar[0], posContinuar[1], 110, 40), 1)
if posReiniciar[0] + 100 > mouse[0] > posReiniciar[0] and posReiniciar[1] + 40 > mouse[1] > posReiniciar[1]:
pygame.draw.rect(tela, cor["verde"], (posReiniciar[0], posReiniciar[1], 100, 40))
pygame.draw.rect(tela, cor["preto"], (posReiniciar[0], posReiniciar[1], 100, 40), 1)
if clique[0] == 1:
inicializar()
else:
pygame.draw.rect(tela, cor["verde_escuro"], (posReiniciar[0], posReiniciar[1], 100, 40))
pygame.draw.rect(tela, cor["preto"], (posReiniciar[0], posReiniciar[1], 100, 40), 1)
if posSair[0] + 100 > mouse[0] > posSair[0] and posSair[1] + 40 > mouse[1] > posSair[1]:
pygame.draw.rect(tela, cor["vermelho"], (posSair[0], posSair[1], 100, 40))
pygame.draw.rect(tela, cor["preto"], (posSair[0], posSair[1], 100, 40), 1)
if clique[0] == 1:
sairPausa = True
return 1
else:
pygame.draw.rect(tela, cor["vermelho_escuro"], (posSair[0], posSair[1], 100, 40))
pygame.draw.rect(tela, cor["preto"], (posSair[0], posSair[1], 100, 40), 1)
tela.blit(aviso, (largura // 2 - 70, altura // 2 - 120))
tela.blit(continuar, (posContinuar[0] + 5, posContinuar[1] + 10))
tela.blit(reiniciar, (posReiniciar[0] + 5, posReiniciar[1] + 10))
tela.blit(sair, (posSair[0] + 30, posSair[1] + 10))
pygame.display.update()
def vitoria(self):
aviso = self.fonteAviso.render("Você ganhou!", True, cor["preto"])
pontos = self.fonte.render("Pontos: " + str(jogador.pontos), True, cor["preto"])
movimentos = self.fonte.render("Movimentos: " + str(len(jogador.mov)), True, cor["preto"])
wumpusStatus = self.fonte.render("Wumpus: " + wumpus.status, True, cor["preto"])
ouroStatus = self.fonte.render("Ouro: " + ouro.status, True, cor["preto"])
reiniciar = self.fonte.render("Reiniciar", True, cor["preto"])
sair = self.fonte.render("Sair", True, cor["preto"])
posReiniciar = [largura // 2 - 125, altura // 2 + 30]
posSair = [largura // 2 + 20, altura // 2 + 30]
sairVitoria = False
while sairVitoria != True:
pygame.draw.rect(tela, cor["cinza_claro"], (largura // 2 - 170, altura // 2 - 150, 340, 250))
pygame.draw.rect(tela, cor["preto"], (largura // 2 - 170, altura // 2 - 150, 340, 250), 3)
mouse = pygame.mouse.get_pos()
clique = pygame.mouse.get_pressed()
for event in pygame.event.get():
if event.type == pygame.QUIT:
sairVitoria = True
if posReiniciar[0] + 100 > mouse[0] > posReiniciar[0] and posReiniciar[1] + 40 > mouse[1] > posReiniciar[1]:
pygame.draw.rect(tela, cor["verde"], (posReiniciar[0], posReiniciar[1], 100, 40))
pygame.draw.rect(tela, cor["preto"], (posReiniciar[0], posReiniciar[1], 100, 40), 1)
if clique[0] == 1:
inicializar()
else:
pygame.draw.rect(tela, cor["verde_escuro"], (posReiniciar[0], posReiniciar[1], 100, 40))
pygame.draw.rect(tela, cor["preto"], (posReiniciar[0], posReiniciar[1], 100, 40), 1)
if posSair[0] + 100 > mouse[0] > posSair[0] and posSair[1] + 40 > mouse[1] > posSair[1]:
pygame.draw.rect(tela, cor["vermelho"], (posSair[0], posSair[1], 100, 40))
pygame.draw.rect(tela, cor["preto"], (posSair[0], posSair[1], 100, 40), 1)
if clique[0] == 1:
sairVitoria = True
else:
pygame.draw.rect(tela, cor["vermelho_escuro"], (posSair[0], posSair[1], 100, 40))
pygame.draw.rect(tela, cor["preto"], (posSair[0], posSair[1], 100, 40), 1)
tela.blit(aviso, (largura // 2 - 160, altura // 2 - 120))
tela.blit(pontos, (largura // 2 - 160, altura // 2 - 50))
tela.blit(movimentos, (largura // 2 - 160, altura // 2 - 30))
tela.blit(wumpusStatus, (largura // 2 + 5, altura // 2 - 50))
tela.blit(ouroStatus, (largura // 2 + 5, altura // 2 - 30))
tela.blit(reiniciar, (posReiniciar[0] + 5, posReiniciar[1] + 10))
tela.blit(sair, (posSair[0] + 30, posSair[1] + 10))
pygame.display.update()
return 1
def config():
global x, y, jogador, flecha, mapa, wumpus, ouro, abismo, status, sair
x, y = 0, 0
jogador = Jogador()
flecha = Flecha()
mapa = Mapa()
wumpus = Wumpus()
ouro = Ouro()
abismo = Abismos()
abismo.gerar()
status = Status()
sair = False
return x, y, jogador, flecha, mapa, wumpus, ouro, abismo, status, sair
def game():
global sair
teclas = [pygame.K_w, pygame.K_s, pygame.K_a, pygame.K_d, pygame.K_f, pygame.K_p, pygame.K_DOWN, pygame.K_UP, pygame.K_LEFT, pygame.K_RIGHT]
while sair != True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sair = True
if event.type == pygame.KEYDOWN:
if event.key not in teclas:
status.msg = "Tecla inválida!"
else:
jogador.pontos -= 1
if event.key == pygame.K_s or event.key == pygame.K_DOWN:
if y < (altura - 200):
jogador.moverBaixo()
if event.key == pygame.K_w or event.key == pygame.K_UP:
if y >= 100:
jogador.moverCima()
if event.key == pygame.K_d or event.key == pygame.K_RIGHT:
if x < (largura - 100):
jogador.moverDireita()
if event.key == pygame.K_a or event.key == pygame.K_LEFT:
if x >= 100:
jogador.moverEsquerda()
if event.key == pygame.K_f:
flecha.calcularTiro()
if event.key == pygame.K_p:
jogador.pontos += 1
pausa = status.pausa()
if pausa == 1:
sair = True
mapa.exibirMapa()
ouro.exibirBrilho()
ouro.exibirOuro()
abismo.exibirVento()
abismo.exibirAbismo()
wumpus.exibirWumpus()
wumpus.exibirPegadas()
tela.blit(jogador.jogador, (x, y))
status.todos()
morte = jogador.morte()
if morte == 1:
sair = True
vitoria = jogador.vitoria()
if vitoria == 1:
sair = True
pygame.display.update()
menu.fundo()
class Menu(object):
def __init__(self):
self.fonte = pygame.font.SysFont(None, 30)
self.fontePrincipal = pygame.font.SysFont(None, 50)
self.logo = pygame.image.load("logo.png")
self.cor = cor["branco"]
self.imagemFundo = pygame.image.load("terra.png")
def fundo(self):
for i in range(tamanho):
for j in range(tamanho + 1):
x = 100 * i
y = 100 * j
tela.blit(self.imagemFundo, (x, y))
def recordes(self):
recordes = self.fontePrincipal.render("Recordes", True, cor["preto"])
posRet = [largura // 2 - 200, altura // 2 - 200]
arq = open("pontos.txt", "r")
dados = []
for i in arq:
k = i.split()
g = [int(k[0]), int(k[1])]
dados.append(g)
arq.close()
dados.sort()
dados = dados[::-1]
pontos0 = self.fonte.render("Pontos: " + str(dados[0][0]), True, cor["preto"])
mov0 = self.fonte.render("Movimentos: " + str(dados[0][1]), True, cor["preto"])
pontos1 = self.fonte.render("Pontos: " + str(dados[1][0]), True, cor["preto"])
mov1 = self.fonte.render("Movimentos: " + str(dados[1][1]), True, cor["preto"])
pontos2 = self.fonte.render("Pontos: " + str(dados[2][0]), True, cor["preto"])
mov2 = self.fonte.render("Movimentos: " + str(dados[2][1]), True, cor["preto"])
pontos3 = self.fonte.render("Pontos: " + str(dados[3][0]), True, cor["preto"])
mov3 = self.fonte.render("Movimentos: " + str(dados[3][1]), True, cor["preto"])
pontos4 = self.fonte.render("Pontos: " + str(dados[4][0]), True, cor["preto"])
mov4 = self.fonte.render("Movimentos: " + str(dados[4][1]), True, cor["preto"])
sair = False
while sair != True:
self.fundo()
pygame.draw.rect(tela, (189, 196, 202), (posRet[0], posRet[1], 400, 400))
pygame.draw.rect(tela, cor["preto"], (posRet[0], posRet[1], 400, 400), 3)
for event in pygame.event.get():
if event.type == pygame.QUIT:
sair = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
sair = True
tela.blit(recordes, (posRet[0] + 120, posRet[1] + 30))
tela.blit(pontos0, (posRet[0] + 40, posRet[1] + 100))
tela.blit(mov0, (posRet[0] + 200, posRet[1] + 100))
tela.blit(pontos1, (posRet[0] + 40, posRet[1] + 150))
tela.blit(mov1, (posRet[0] + 200, posRet[1] + 150))
tela.blit(pontos2, (posRet[0] + 40, posRet[1] + 200))
tela.blit(mov2, (posRet[0] + 200, posRet[1] + 200))
tela.blit(pontos3, (posRet[0] + 40, posRet[1] + 250))
tela.blit(mov3, (posRet[0] + 200, posRet[1] + 250))
tela.blit(pontos4, (posRet[0] + 40, posRet[1] + 300))
tela.blit(mov4, (posRet[0] + 200, posRet[1] + 300))
pygame.display.update()
def menu(self):
self.fundo()
iniciar = self.fonte.render("Iniciar", True, cor["preto"])
recordes = self.fonte.render("Recordes", True, cor["preto"])
sair = self.fonte.render("Sair", True, cor["preto"])
creditos = self.fonte.render("Criado por <NAME> & <NAME>", True, cor["preto"])
posIniciar = [largura // 2 - 165, altura // 2 + 140]
posRecordes = [largura // 2 - 55, altura // 2 + 140]
posSair = [largura // 2 + 65, altura // 2 + 140]
posCreditos = [largura // 2 - 215, altura // 2 + 200]
sairMenu = False
while sairMenu != True:
tela.blit(self.logo, (largura // 2 - 200, altura // 2 - 250))
mouse = pygame.mouse.get_pos()
clique = pygame.mouse.get_pressed()
for event in pygame.event.get():
if event.type == pygame.QUIT:
sairMenu = | |
not exist since the pod is not computed.
CanNotForceNromError
if the problem comes form saved files and n_rom is not None.
Setting n_rom not supported for problem from saved files.
Returns
-------
None.
"""
if not self._is_pod_computed:
raise PodNotComputedError("Pod is not computed. Can not solve.")
if self._is_form_files and n_rom is not None:
raise CanNotForceNromError("Can not force n_rom. Not implemented for LinearElasticity2DProblem for files.")
# set n_rom to n_rom-true if it is None
if n_rom is None:
n_rom = self._rb_data.n_rom
# compute the rom matrices and load vectors if n_rom is different from the last used n_rom n_rom_last
if n_rom != self._rb_data.last_n_rom and not self._is_form_files:
self._rb_data.compute_rb_matrices_and_vectors(n_rom, self._hf_data, self._has_neumann,
self._has_non_homo_dirichlet, self._has_non_homo_neumann)
# set last n_rom
self._rb_data.last_n_rom = self._rb_data.n_rom
# compute the matrix a and the load vector
a_rom = self.compute_a_free_rom(e_young, nu_poisson)
f_load_rom = self.compute_f_load_rom(e_young, nu_poisson) # e_young, nu_poisson needed if non homo. dirichlet
# initialize uh
uh_rom = np.zeros(self._hf_data.n_full)
start_time = perf_counter()
# solve and project rb solution
uh_rom[self._hf_data.expanded_free_index] = self._rb_data.v @ np.linalg.solve(a_rom, f_load_rom)
if print_info:
print("Solved a_rom @ uh_rom = f_load_rom in {:.6f} sec".format(perf_counter() - start_time))
if self._has_non_homo_dirichlet:
# lifting function
uh_rom[self._hf_data.expanded_dirichlet_edge_index] = self._hf_data.rg
# set uh_rom, save it in a nice way.
self._uh_rom = SolutionFunctionValues2D.from_1x2n(uh_rom)
self._uh_rom.set_e_young_and_nu_poisson(e_young, nu_poisson)
if print_info:
print("Get solution by the property uh_rom, uh_rom_free or uh_rom_full of the class.\n" +
"The property uh_rom, extra properties values, x and y are available.")
def build_rb_model(self, grid=rb_grid, mode=pod_sampling_mode, e_young_range=e_young_range,
nu_poisson_range=nu_poisson_range, eps_pod=eps_pod, n_rom_cut=n_rom_cut,
print_info=True):
"""
Build a reduced-order model, i.e compute pod and the rom matrices and load vectors
Parameters
----------
grid : int, tuple, list optional
The grid size for the sampling, if int us it for both parameters.
If None set to rb_grid from default_constants
The default is None.
mode : str, optional
sampling mode.
If None set to pod_sampling_mode from default_constants
The default is None.
e_young_range : tuple, optional
range for the Young's module E.
If None set to e_young_range from default_constants
The default is None.
nu_poisson_range : tuple, optional
range for the poisson ratio nu.
If None set to nu_poisson_range from default_constants
The default is None.
eps_pod : float, optional
tolerance for the POD algorithm.
If None set to eps_POD from default_constants
The default is None.
n_rom_cut : int, str, optional
value to cut n_rom at, may be string "rank" fro the rank of the solution matrix.
If None set to n_rom_cut from default_constants
The default is None.
print_info : bool, optional
print builder info. The default is True.
Raises
------
NotImplementedError
if the functions is called on a problem from saved files.
Returns
-------
None.
"""
if self._is_form_files:
error_text = "Computing the reduced basis form saved data gives different results, " \
+ "and is therefore not implemented. (Most likely because instability in " \
+ "singular_values_squared computation in POD algorithm)."
raise NotImplementedError(error_text)
# set the parameters for building the reduce model
self._rb_data.set_rb_model_params(grid, e_young_range, nu_poisson_range, eps_pod, mode, n_rom_cut)
start_time = perf_counter()
# compute a reduced model by the POD algorithm using the energy norm.
pod_with_energy_norm(self, self._rb_data)
# compute the rb matrices and vectors
self._rb_data.compute_rb_matrices_and_vectors(self._rb_data.n_rom, self._hf_data, self._has_neumann,
self._has_non_homo_dirichlet, self._has_non_homo_neumann)
# save the last n_rom
self._rb_data.last_n_rom = self._rb_data.n_rom
# do not allow computation of v for larger n_rom
self._rb_data.set_n_rom_max()
if print_info:
print("Built reduced order model in {:.6f} sec".format(perf_counter() - start_time))
# reset uh
self._uh = self._uh = SolutionFunctionValues2D()
self._is_pod_computed = True
def error_a_rb(self, e_young, nu_poisson, n_rom=None, compute_again=False, print_info=False):
"""
compute the error between the high-fidelity solution and
the recovered reduced-order solution in the energy norm
given a Young's module and poisson ratio
Parameters
----------
e_young : float
Young's module.
nu_poisson : float
poisson ratio.
n_rom : int, optional
if set, the desired pod dept, else use n_rom-true. The default is None.
compute_again : bool, optional
force to compute the solutions again, i.e do not use the snapshot matrix.
The default is False.
print_info : bool, optional
compute error info. The default is False.
Raises
------
CanNotForceNromError
if the problem comes form saved files and n_rom is not None.
Setting n_rom not supported for problem from saved files.
Returns
-------
error_a : float
the error between the high-fidelity solution and
the recovered reduced-order solution in the energy norm.
"""
if self._is_form_files and n_rom is not None:
raise CanNotForceNromError("Can not force n_rom. Not implemented for LinearElasticity2DProblem for files.")
# set n_rom to n_rom-true if it is None
if n_rom is None:
n_rom = self._rb_data.n_rom
if compute_again:
# solve new
self.hfsolve(e_young, nu_poisson, print_info=print_info)
self.rbsolve(e_young, nu_poisson, n_rom=n_rom, print_info=print_info)
else:
# check if e_young and nu_young where used when solving the hf system
if not self._uh.check_e_young_and_nu_poisson(e_young, nu_poisson):
# check if the solution matrix does not exist
if self._rb_data.s_mat is None:
# solve new
self.hfsolve(e_young, nu_poisson, print_info=print_info)
else:
# get solution from s_mat
e_nu_mat = self.e_young_nu_poisson_mat
index = np.argwhere((e_nu_mat[:, 0] == e_young) & (e_nu_mat[:, 1] == nu_poisson)).ravel()
# check if e_young and nu_poisson where not used in pod alfgorithm
if len(index) == 0:
# solve new
self.hfsolve(e_young, nu_poisson, print_info=print_info)
else:
# build from s_mat
uh = np.zeros(self._hf_data.n_full)
start_time = perf_counter()
uh[self._hf_data.expanded_free_index] = self._rb_data.s_mat[:, index].flatten()
if self._has_non_homo_dirichlet:
uh[self._hf_data.expanded_dirichlet_edge_index] = self._hf_data.rg
# set uh
self._uh = SolutionFunctionValues2D.from_1x2n(uh)
self._uh.set_e_young_and_nu_poisson(e_young, nu_poisson)
if print_info:
print("Loaded a @ uh = f_load from s_mat in {:.6f} sec".format(perf_counter() - start_time))
# check if e_young and nu_young where used when solving the rb system or n_rom is different from the last
# used n_rom
if not self._uh_rom.check_e_young_and_nu_poisson(e_young, nu_poisson) \
or n_rom != self._rb_data.last_n_rom:
self.rbsolve(e_young, nu_poisson, n_rom=n_rom, print_info=print_info)
# compute the error in the energy norm
err = self.uh.flatt_values - self.uh_rom.flatt_values
error_a = np.sqrt(err.T @ self.compute_a_full(e_young, nu_poisson) @ err)
return error_a
def f_func(self, x_vec, y_vec):
"""
The vectorized body force function
Parameters
----------
x_vec : np.array
array of x-values.
y_vec : np.array
array of y-values.
Raises
------
MissingInputFunctionPointerError
if we have no body force function.
Returns
-------
np.array
function values in the (x,y)-values.
"""
if self._f_func_vec is None:
raise MissingInputFunctionPointerError(
"f_func is not given, set it in the Linear Elasticity 2D Problem first.")
return self._f_func_vec(x_vec, y_vec)
def neumann_bc_func(self, x_vec, y_vec):
"""
The vectorized prescribed traction function for the Neumann boundary
Parameters
----------
x_vec : np.array
array of x-values.
y_vec : np.array
array of y-values.
Raises
------
MissingInputFunctionPointerError
if we have no prescribed traction function for the Neumann boundary.
Returns
-------
np.array
function values in the (x,y)-values.
"""
if self._neumann_bc_func_vec is None:
raise MissingInputFunctionPointerError(
"neumann_bc_func is not given, set it in the Linear Elasticity 2D Problem first.")
return self._neumann_bc_func_vec(x_vec, y_vec)
def dirichlet_bc_func(self, x_vec, y_vec):
"""
The vectorized prescribed displacement function for the Dirichlet boundary
Parameters
----------
x_vec : np.array
array of x-values.
y_vec : np.array
array of y-values.
Raises
------
MissingInputFunctionPointerError
if we have no prescribed displacement function for the Dirichlet boundary.
Returns
-------
np.array
function values in the (x,y)-values.
"""
if self._dirichlet_bc_func_vec is None:
raise MissingInputFunctionPointerError(
"dirichlet_bc_func is not given, set it in the Linear Elasticity 2D Problem first.")
return self._dirichlet_bc_func_vec(x_vec, y_vec)
def hf_nodal_stress(self, print_info=True):
"""
Compute the high-fidelity nodal stress
Parameters
----------
print_info : bool, optional
give user info. The default is False.
Raises
------
LinearElasticity2DProblemNotSolved
if the high-fidelity system has not been solved by hfsolve.
Returns
-------
None.
"""
if self._uh.values is None:
raise LinearElasticity2DProblemNotSolved("High fidelity Linear Elasticity 2D Problem has not been solved.")
get_nodal_stress(self._uh, self._hf_data.p, self._hf_data.tri)
if print_info:
print("Get nodal stress by the property uh.nodal_stress of the class.")
def nodal_stress(self, print_info=True):
"""
Default compute the stress, computes the high-fidelity nodal stress
Parameters
----------
print_info : bool, optional
give user info. The default is False.
Returns
-------
None.
"""
self.hf_nodal_stress(print_info=print_info)
def rb_nodal_stress(self, print_info=True):
"""
Compute the reduced-order nodal stress
Parameters
----------
print_info : bool, optional
give user info. The default is False.
Raises
------
LinearElasticity2DProblemNotSolved
if the reduced-order system has not been solved by rbsolve.
Returns
-------
None.
"""
if self._uh_rom.values is None:
raise LinearElasticity2DProblemNotSolved("Reduced order Linear Elasticity 2D Problem has not been solved.")
get_nodal_stress(self._uh_rom, | |
WGS72BE_UTM_zone_49S = 32549
WGS72BE_UTM_zone_50S = 32550
WGS72BE_UTM_zone_51S = 32551
WGS72BE_UTM_zone_52S = 32552
WGS72BE_UTM_zone_53S = 32553
WGS72BE_UTM_zone_54S = 32554
WGS72BE_UTM_zone_55S = 32555
WGS72BE_UTM_zone_56S = 32556
WGS72BE_UTM_zone_57S = 32557
WGS72BE_UTM_zone_58S = 32558
WGS72BE_UTM_zone_59S = 32559
WGS72BE_UTM_zone_60S = 32560
WGS84_UTM_zone_1N = 32601
WGS84_UTM_zone_2N = 32602
WGS84_UTM_zone_3N = 32603
WGS84_UTM_zone_4N = 32604
WGS84_UTM_zone_5N = 32605
WGS84_UTM_zone_6N = 32606
WGS84_UTM_zone_7N = 32607
WGS84_UTM_zone_8N = 32608
WGS84_UTM_zone_9N = 32609
WGS84_UTM_zone_10N = 32610
WGS84_UTM_zone_11N = 32611
WGS84_UTM_zone_12N = 32612
WGS84_UTM_zone_13N = 32613
WGS84_UTM_zone_14N = 32614
WGS84_UTM_zone_15N = 32615
WGS84_UTM_zone_16N = 32616
WGS84_UTM_zone_17N = 32617
WGS84_UTM_zone_18N = 32618
WGS84_UTM_zone_19N = 32619
WGS84_UTM_zone_20N = 32620
WGS84_UTM_zone_21N = 32621
WGS84_UTM_zone_22N = 32622
WGS84_UTM_zone_23N = 32623
WGS84_UTM_zone_24N = 32624
WGS84_UTM_zone_25N = 32625
WGS84_UTM_zone_26N = 32626
WGS84_UTM_zone_27N = 32627
WGS84_UTM_zone_28N = 32628
WGS84_UTM_zone_29N = 32629
WGS84_UTM_zone_30N = 32630
WGS84_UTM_zone_31N = 32631
WGS84_UTM_zone_32N = 32632
WGS84_UTM_zone_33N = 32633
WGS84_UTM_zone_34N = 32634
WGS84_UTM_zone_35N = 32635
WGS84_UTM_zone_36N = 32636
WGS84_UTM_zone_37N = 32637
WGS84_UTM_zone_38N = 32638
WGS84_UTM_zone_39N = 32639
WGS84_UTM_zone_40N = 32640
WGS84_UTM_zone_41N = 32641
WGS84_UTM_zone_42N = 32642
WGS84_UTM_zone_43N = 32643
WGS84_UTM_zone_44N = 32644
WGS84_UTM_zone_45N = 32645
WGS84_UTM_zone_46N = 32646
WGS84_UTM_zone_47N = 32647
WGS84_UTM_zone_48N = 32648
WGS84_UTM_zone_49N = 32649
WGS84_UTM_zone_50N = 32650
WGS84_UTM_zone_51N = 32651
WGS84_UTM_zone_52N = 32652
WGS84_UTM_zone_53N = 32653
WGS84_UTM_zone_54N = 32654
WGS84_UTM_zone_55N = 32655
WGS84_UTM_zone_56N = 32656
WGS84_UTM_zone_57N = 32657
WGS84_UTM_zone_58N = 32658
WGS84_UTM_zone_59N = 32659
WGS84_UTM_zone_60N = 32660
WGS84_UTM_zone_1S = 32701
WGS84_UTM_zone_2S = 32702
WGS84_UTM_zone_3S = 32703
WGS84_UTM_zone_4S = 32704
WGS84_UTM_zone_5S = 32705
WGS84_UTM_zone_6S = 32706
WGS84_UTM_zone_7S = 32707
WGS84_UTM_zone_8S = 32708
WGS84_UTM_zone_9S = 32709
WGS84_UTM_zone_10S = 32710
WGS84_UTM_zone_11S = 32711
WGS84_UTM_zone_12S = 32712
WGS84_UTM_zone_13S = 32713
WGS84_UTM_zone_14S = 32714
WGS84_UTM_zone_15S = 32715
WGS84_UTM_zone_16S = 32716
WGS84_UTM_zone_17S = 32717
WGS84_UTM_zone_18S = 32718
WGS84_UTM_zone_19S = 32719
WGS84_UTM_zone_20S = 32720
WGS84_UTM_zone_21S = 32721
WGS84_UTM_zone_22S = 32722
WGS84_UTM_zone_23S = 32723
WGS84_UTM_zone_24S = 32724
WGS84_UTM_zone_25S = 32725
WGS84_UTM_zone_26S = 32726
WGS84_UTM_zone_27S = 32727
WGS84_UTM_zone_28S = 32728
WGS84_UTM_zone_29S = 32729
WGS84_UTM_zone_30S = 32730
WGS84_UTM_zone_31S = 32731
WGS84_UTM_zone_32S = 32732
WGS84_UTM_zone_33S = 32733
WGS84_UTM_zone_34S = 32734
WGS84_UTM_zone_35S = 32735
WGS84_UTM_zone_36S = 32736
WGS84_UTM_zone_37S = 32737
WGS84_UTM_zone_38S = 32738
WGS84_UTM_zone_39S = 32739
WGS84_UTM_zone_40S = 32740
WGS84_UTM_zone_41S = 32741
WGS84_UTM_zone_42S = 32742
WGS84_UTM_zone_43S = 32743
WGS84_UTM_zone_44S = 32744
WGS84_UTM_zone_45S = 32745
WGS84_UTM_zone_46S = 32746
WGS84_UTM_zone_47S = 32747
WGS84_UTM_zone_48S = 32748
WGS84_UTM_zone_49S = 32749
WGS84_UTM_zone_50S = 32750
WGS84_UTM_zone_51S = 32751
WGS84_UTM_zone_52S = 32752
WGS84_UTM_zone_53S = 32753
WGS84_UTM_zone_54S = 32754
WGS84_UTM_zone_55S = 32755
WGS84_UTM_zone_56S = 32756
WGS84_UTM_zone_57S = 32757
WGS84_UTM_zone_58S = 32758
WGS84_UTM_zone_59S = 32759
WGS84_UTM_zone_60S = 32760
# New
GGRS87_Greek_Grid = 2100
KKJ_Finland_zone_1 = 2391
KKJ_Finland_zone_2 = 2392
KKJ_Finland_zone_3 = 2393
KKJ_Finland_zone_4 = 2394
RT90_2_5_gon_W = 2400
Lietuvos_Koordinoei_Sistema_1994 = 2600
Estonian_Coordinate_System_of_1992 = 3300
HD72_EOV = 23700
Dealul_Piscului_1970_Stereo_70 = 31700
# Newer
Hjorsey_1955_Lambert = 3053
ISN93_Lambert_1993 = 3057
ETRS89_Poland_CS2000_zone_5 = 2176
ETRS89_Poland_CS2000_zone_6 = 2177
ETRS89_Poland_CS2000_zone_7 = 2177
ETRS89_Poland_CS2000_zone_8 = 2178
ETRS89_Poland_CS92 = 2180
class GCSE(enum.IntEnum):
"""Unspecified GCS based on ellipsoid."""
Undefined = 0
User_Defined = 32767
Airy1830 = 4001
AiryModified1849 = 4002
AustralianNationalSpheroid = 4003
Bessel1841 = 4004
BesselModified = 4005
BesselNamibia = 4006
Clarke1858 = 4007
Clarke1866 = 4008
Clarke1866Michigan = 4009
Clarke1880_Benoit = 4010
Clarke1880_IGN = 4011
Clarke1880_RGS = 4012
Clarke1880_Arc = 4013
Clarke1880_SGA1922 = 4014
Everest1830_1937Adjustment = 4015
Everest1830_1967Definition = 4016
Everest1830_1975Definition = 4017
Everest1830Modified = 4018
GRS1980 = 4019
Helmert1906 = 4020
IndonesianNationalSpheroid = 4021
International1924 = 4022
International1967 = 4023
Krassowsky1940 = 4024
NWL9D = 4025
NWL10D = 4026
Plessis1817 = 4027
Struve1860 = 4028
WarOffice = 4029
WGS84 = 4030
GEM10C = 4031
OSU86F = 4032
OSU91A = 4033
Clarke1880 = 4034
Sphere = 4035
class GCS(enum.IntEnum):
"""Geographic CS Type Codes."""
Undefined = 0
User_Defined = 32767
Adindan = 4201
AGD66 = 4202
AGD84 = 4203
Ain_el_Abd = 4204
Afgooye = 4205
Agadez = 4206
Lisbon = 4207
Aratu = 4208
Arc_1950 = 4209
Arc_1960 = 4210
Batavia = 4211
Barbados = 4212
Beduaram = 4213
Beijing_1954 = 4214
Belge_1950 = 4215
Bermuda_1957 = 4216
Bern_1898 = 4217
Bogota = 4218
Bukit_Rimpah = 4219
Camacupa = 4220
Campo_Inchauspe = 4221
Cape = 4222
Carthage = 4223
Chua = 4224
Corrego_Alegre = 4225
Cote_d_Ivoire = 4226
Deir_ez_Zor = 4227
Douala = 4228
Egypt_1907 = 4229
ED50 = 4230
ED87 = 4231
Fahud = 4232
Gandajika_1970 = 4233
Garoua = 4234
Guyane_Francaise = 4235
Hu_Tzu_Shan = 4236
HD72 = 4237
ID74 = 4238
Indian_1954 = 4239
Indian_1975 = 4240
Jamaica_1875 = 4241
JAD69 = 4242
Kalianpur = 4243
Kandawala = 4244
Kertau = 4245
KOC = 4246
La_Canoa = 4247
PSAD56 = 4248
Lake = 4249
Leigon = 4250
Liberia_1964 = 4251
Lome = 4252
Luzon_1911 = 4253
Hito_XVIII_1963 = 4254
Herat_North = 4255
Mahe_1971 = 4256
Makassar = 4257
EUREF89 = 4258
Malongo_1987 = 4259
Manoca = 4260
Merchich = 4261
Massawa = 4262
Minna = 4263
Mhast = 4264
Monte_Mario = 4265
M_poraloko = 4266
NAD27 = 4267
NAD_Michigan = 4268
NAD83 = 4269
Nahrwan_1967 = 4270
Naparima_1972 = 4271
GD49 = 4272
NGO_1948 = 4273
Datum_73 = 4274
NTF = 4275
NSWC_9Z_2 = 4276
OSGB_1936 = 4277
OSGB70 = 4278
OS_SN80 = 4279
Padang = 4280
Palestine_1923 = 4281
Pointe_Noire = 4282
GDA94 = 4283
Pulkovo_1942 = 4284
Qatar = 4285
Qatar_1948 = 4286
Qornoq = 4287
Loma_Quintana = 4288
Amersfoort = 4289
RT38 = 4290
SAD69 = 4291
Sapper_Hill_1943 = 4292
Schwarzeck = 4293
Segora = 4294
Serindung = 4295
Sudan = 4296
Tananarive = 4297
Timbalai_1948 = 4298
TM65 = 4299
TM75 = 4300
Tokyo = 4301
Trinidad_1903 = 4302
TC_1948 = 4303
Voirol_1875 = 4304
Voirol_Unifie = 4305
Bern_1938 = 4306
Nord_Sahara_1959 = 4307
Stockholm_1938 = 4308
Yacare = 4309
Yoff = 4310
Zanderij = 4311
MGI = 4312
Belge_1972 = 4313
DHDN = 4314
Conakry_1905 = 4315
WGS_72 = 4322
WGS_72BE = 4324
WGS_84 = 4326
Bern_1898_Bern = 4801
Bogota_Bogota = 4802
Lisbon_Lisbon = 4803
Makassar_Jakarta = 4804
MGI_Ferro = 4805
Monte_Mario_Rome = 4806
NTF_Paris = 4807
Padang_Jakarta = 4808
Belge_1950_Brussels = 4809
Tananarive_Paris = 4810
Voirol_1875_Paris = 4811
Voirol_Unifie_Paris = 4812
Batavia_Jakarta = 4813
ATF_Paris = 4901
NDG_Paris = 4902
# New GCS
Greek = 4120
GGRS87 = 4121
KKJ = 4123
RT90 = 4124
EST92 = 4133
Dealul_Piscului_1970 = 4317
Greek_Athens = 4815
class Ellipse(enum.IntEnum):
"""Ellipsoid Codes."""
Undefined = 0
User_Defined = 32767
Airy_1830 = 7001
Airy_Modified_1849 = 7002
Australian_National_Spheroid = 7003
Bessel_1841 = 7004
Bessel_Modified = 7005
Bessel_Namibia = 7006
Clarke_1858 = 7007
Clarke_1866 = 7008
Clarke_1866_Michigan = 7009
Clarke_1880_Benoit = 7010
Clarke_1880_IGN = 7011
Clarke_1880_RGS = 7012
Clarke_1880_Arc = 7013
Clarke_1880_SGA_1922 = 7014
Everest_1830_1937_Adjustment = 7015
Everest_1830_1967_Definition = 7016
Everest_1830_1975_Definition = 7017
Everest_1830_Modified = 7018
GRS_1980 = 7019
Helmert_1906 = 7020
Indonesian_National_Spheroid = 7021
International_1924 = 7022
International_1967 = 7023
Krassowsky_1940 = 7024
NWL_9D = 7025
NWL_10D = 7026
Plessis_1817 = 7027
Struve_1860 = 7028
War_Office = 7029
WGS_84 = 7030
GEM_10C = 7031
OSU86F = 7032
OSU91A = 7033
Clarke_1880 = 7034
Sphere = 7035
class DatumE(enum.IntEnum):
"""Ellipsoid-Only Geodetic Datum Codes."""
Undefined = 0
User_Defined = 32767
Airy1830 = 6001
AiryModified1849 = 6002
AustralianNationalSpheroid = 6003
Bessel1841 = 6004
BesselModified = 6005
BesselNamibia = 6006
Clarke1858 = 6007
Clarke1866 = 6008
Clarke1866Michigan = 6009
Clarke1880_Benoit = 6010
Clarke1880_IGN = 6011
Clarke1880_RGS = 6012
Clarke1880_Arc = 6013
Clarke1880_SGA1922 = 6014
Everest1830_1937Adjustment = 6015
Everest1830_1967Definition = 6016
Everest1830_1975Definition = 6017
Everest1830Modified = 6018
GRS1980 = 6019
Helmert1906 = 6020
IndonesianNationalSpheroid = 6021
International1924 = 6022
International1967 = 6023
Krassowsky1960 = 6024
NWL9D = 6025
NWL10D = 6026
Plessis1817 = 6027
Struve1860 = 6028
WarOffice = 6029
WGS84 = 6030
GEM10C = 6031
OSU86F = 6032
OSU91A = 6033
Clarke1880 = 6034
Sphere = 6035
class Datum(enum.IntEnum):
"""Geodetic Datum Codes."""
Undefined = 0
User_Defined = 32767
Adindan = 6201
Australian_Geodetic_Datum_1966 = 6202
Australian_Geodetic_Datum_1984 = 6203
Ain_el_Abd_1970 = 6204
Afgooye = 6205
Agadez = 6206
Lisbon = 6207
Aratu = 6208
Arc_1950 = 6209
Arc_1960 = 6210
Batavia = 6211
Barbados = 6212
Beduaram = | |
is unknown. It is never used in Blizzard scripts.'),
('if_dif','The definition of this command is unknown. It is never used in Blizzard scripts.'),
('if_towns','The definition of this command is unknown. It is never used in Blizzard scripts.'),
('implode','The definition of this command is unknown. It is never used in Blizzard scripts.'),
('prep_down','The definition of this command is unknown. It is never used in Blizzard scripts.'),
('quick_attack','The definition of this command is unknown. It is never used in Blizzard scripts.'),
('wait_bunkers','The definition of this command is unknown. It is never used in Blizzard scripts.'),
('wait_secure','The definition of this command is unknown. It is never used in Blizzard scripts.'),
('wait_turrets','The definition of this command is unknown. It is never used in Blizzard scripts.'),
('wait_upgrades','The definition of this command is unknown. It is never used in Blizzard scripts.'),
]),
]
CMD_HELP = odict()
for s,cmdl in cmds:
CMD_HELP[s] = odict()
for c,h in cmdl:
CMD_HELP[s][c] = h
#
class FindReplaceDialog(PyMSDialog):
def __init__(self, parent):
self.resettimer = None
PyMSDialog.__init__(self, parent, 'Find/Replace', grabwait=False)
def widgetize(self):
self.resizable(True, False)
self.find = StringVar()
self.replacewith = StringVar()
self.replace = IntVar()
self.inselection = IntVar()
self.casesens = IntVar()
self.regex = IntVar()
self.multiline = IntVar()
self.updown = IntVar()
self.updown.set(1)
l = Frame(self)
f = Frame(l)
s = Frame(f)
Label(s, text='Find:', anchor=E, width=12).pack(side=LEFT)
self.findentry = TextDropDown(s, self.find, self.parent.parent.findhistory, 30)
self.findentry.c = self.findentry['bg']
self.findentry.pack(fill=X)
self.findentry.entry.selection_range(0, END)
self.findentry.focus_set()
s.pack(fill=X)
s = Frame(f)
Label(s, text='Replace With:', anchor=E, width=12).pack(side=LEFT)
self.replaceentry = TextDropDown(s, self.replacewith, self.parent.parent.replacehistory, 30)
self.replaceentry.pack(fill=X)
s.pack(fill=X)
f.pack(side=TOP, fill=X, pady=2)
f = Frame(l)
self.selectcheck = Checkbutton(f, text='In Selection', variable=self.inselection, anchor=W)
self.selectcheck.pack(fill=X)
Checkbutton(f, text='Case Sensitive', variable=self.casesens, anchor=W).pack(fill=X)
Checkbutton(f, text='Regular Expression', variable=self.regex, anchor=W, command=lambda i=1: self.check(i)).pack(fill=X)
self.multicheck = Checkbutton(f, text='Multi-Line', variable=self.multiline, anchor=W, state=DISABLED, command=lambda i=2: self.check(i))
self.multicheck.pack(fill=X)
f.pack(side=LEFT, fill=BOTH)
f = Frame(l)
lf = LabelFrame(f, text='Direction')
self.up = Radiobutton(lf, text='Up', variable=self.updown, value=0, anchor=W)
self.up.pack(fill=X)
self.down = Radiobutton(lf, text='Down', variable=self.updown, value=1, anchor=W)
self.down.pack()
lf.pack()
f.pack(side=RIGHT, fill=Y)
l.pack(side=LEFT, fill=BOTH, pady=2, expand=1)
l = Frame(self)
Button(l, text='Find Next', command=self.findnext).pack(fill=X, pady=1)
Button(l, text='Count', command=self.count).pack(fill=X, pady=1)
self.replacebtn = Button(l, text='Replace', command=lambda i=1: self.findnext(replace=i))
self.replacebtn.pack(fill=X, pady=1)
self.repallbtn = Button(l, text='Replace All', command=self.replaceall)
self.repallbtn.pack(fill=X, pady=1)
Button(l, text='Close', command=self.ok).pack(fill=X, pady=4)
l.pack(side=LEFT, fill=Y, padx=2)
self.bind('<Return>', self.findnext)
self.bind('<FocusIn>', lambda e,i=3: self.check(i))
if 'findreplacewindow' in self.parent.parent.settings:
loadsize(self, self.parent.parent.settings, 'findreplacewindow')
return self.findentry
def check(self, i):
if i == 1:
if self.regex.get():
self.multicheck['state'] = NORMAL
else:
self.multicheck['state'] = DISABLED
self.multiline.set(0)
if i in [1,2]:
s = [NORMAL,DISABLED][self.multiline.get()]
self.up['state'] = s
self.down['state'] = s
if s == DISABLED:
self.updown.set(1)
elif i == 3:
if self.parent.text.tag_ranges('Selection'):
self.selectcheck['state'] = NORMAL
else:
self.selectcheck['state'] = DISABLED
self.inselection.set(0)
def findnext(self, key=None, replace=0):
f = self.find.get()
if not f in self.parent.parent.findhistory:
self.parent.parent.findhistory.append(f)
if f:
regex = f
if not self.regex.get():
regex = re.escape(regex)
try:
r = re.compile(regex, [re.I,0][self.casesens.get()] | [0,re.M | re.S][self.multiline.get()])
except:
self.resettimer = self.after(1000, self.updatecolor)
self.findentry['bg'] = '#FFB4B4'
return
if replace:
rep = self.replacewith.get()
if not rep in self.parent.parent.replacehistory:
self.parent.parent.replacehistory.append(rep)
item = self.parent.text.tag_ranges('Selection')
if item and r.match(self.parent.text.get(*item)):
ins = r.sub(rep, self.parent.text.get(*item))
self.parent.text.delete(*item)
self.parent.text.insert(item[0], ins)
self.parent.text.update_range(item[0])
if self.multiline.get():
m = r.search(self.parent.text.get(INSERT, END))
if m:
self.parent.text.tag_remove('Selection', '1.0', END)
s,e = '%s +%sc' % (INSERT, m.start(0)),'%s +%sc' % (INSERT,m.end(0))
self.parent.text.tag_add('Selection', s, e)
self.parent.text.mark_set(INSERT, e)
self.parent.text.see(s)
self.check(3)
else:
p = self
if key and key.keycode == 13:
p = self.parent
askquestion(parent=p, title='Find', message="Can't find text.", type=OK)
else:
u = self.updown.get()
s,lse,rlse,e = ['-','+'][u],['lineend','linestart'][u],['linestart','lineend'][u],[self.parent.text.index('1.0 lineend'),self.parent.text.index(END)][u]
i = self.parent.text.index(INSERT)
if i == e:
return
if i == self.parent.text.index('%s %s' % (INSERT, rlse)):
i = self.parent.text.index('%s %s1lines %s' % (INSERT, s, lse))
n = -1
while not u or i != e:
if u:
m = r.search(self.parent.text.get(i, '%s %s' % (i, rlse)))
else:
m = None
a = r.finditer(self.parent.text.get('%s %s' % (i, rlse), i))
c = 0
for x,f in enumerate(a):
if x == n or n == -1:
m = f
c = x
n = c - 1
if m:
self.parent.text.tag_remove('Selection', '1.0', END)
if u:
s,e = '%s +%sc' % (i,m.start(0)),'%s +%sc' % (i,m.end(0))
self.parent.text.mark_set(INSERT, e)
else:
s,e = '%s linestart +%sc' % (i,m.start(0)),'%s linestart +%sc' % (i,m.end(0))
self.parent.text.mark_set(INSERT, s)
self.parent.text.tag_add('Selection', s, e)
self.parent.text.see(s)
self.check(3)
break
if (not u and n == -1 and self.parent.text.index('%s lineend' % i) == e) or i == e:
p = self
if key and key.keycode == 13:
p = self.parent
askquestion(parent=p, title='Find', message="Can't find text.", type=OK)
break
i = self.parent.text.index('%s %s1lines %s' % (i, s, lse))
else:
p = self
if key and key.keycode == 13:
p = self.parent
askquestion(parent=p, title='Find', message="Can't find text.", type=OK)
def count(self):
f = self.find.get()
if f:
regex = f
if not self.regex.get():
regex = re.escape(regex)
try:
r = re.compile(regex, [re.I,0][self.casesens.get()] | [0,re.M | re.S][self.multiline.get()])
except:
self.resettimer = self.after(1000, self.updatecolor)
self.findentry['bg'] = '#FFB4B4'
return
askquestion(parent=self, title='Count', message='%s matches found.' % len(r.findall(self.parent.text.get('1.0', END))), type=OK)
def replaceall(self):
f = self.find.get()
if f:
regex = f
if not self.regex.get():
regex = re.escape(regex)
try:
r = re.compile(regex, [re.I,0][self.casesens.get()] | [0,re.M | re.S][self.multiline.get()])
except:
self.resettimer = self.after(1000, self.updatecolor)
self.findentry['bg'] = '#FFB4B4'
return
text = r.subn(self.replacewith.get(), self.parent.text.get('1.0', END))
if text[1]:
self.parent.text.delete('1.0', END)
self.parent.text.insert('1.0', text[0].rstrip('\n'))
self.parent.text.update_range('1.0')
askquestion(parent=self, title='Replace Complete', message='%s matches replaced.' % text[1], type=OK)
def updatecolor(self):
if self.resettimer:
self.after_cancel(self.resettimer)
self.resettimer = None
self.findentry['bg'] = self.findentry.c
def destroy(self):
self.parent.parent.settings['findreplacewindow'] = self.winfo_geometry()
PyMSDialog.withdraw(self)
class CodeColors(PyMSDialog):
def __init__(self, parent):
self.cont = False
self.tags = dict(parent.text.tags)
self.info = odict()
self.info['Block'] = 'The color of a --block-- in the code.'
self.info['Keywords'] = 'Keywords:\n extdef aiscript bwscript'
self.info['Types'] = 'Variable types:\n ' + ' '.join(AIBIN.types)
self.info['Commands'] = 'The color of all the commands.'
self.info['Number'] = 'The color of all numbers.'
self.info['TBL Format'] = 'The color of TBL formatted characters, like null: <0>'
self.info['Info Comment'] = 'The color of a one line Extra Information Comment either for a script or block.'
self.info['MultiInfo Comment'] = 'The color of a multi-line Extra Information Comment either for a script or block.'
self.info['Comment'] = 'The color of a regular comment.'
self.info['AI ID'] = 'The color of the AI ID in the AI header.'
self.info['Header String'] = 'The color of the String index in the AI header.'
self.info['Header Flags'] = 'The color of the Flags in the AI header'
self.info['Operators'] = 'The color of the operators:\n ( ) , = :'
self.info['Error'] = 'The color of an error when compiling.'
self.info['Warning'] = 'The color of a warning when compiling.'
self.info['Selection'] = 'The color of selected text in the editor.'
PyMSDialog.__init__(self, parent, 'Color Settings')
def widgetize(self):
self.resizable(False, False)
self.listbox = Listbox(self, font=couriernew, width=20, height=16, exportselection=0, activestyle=DOTBOX)
self.listbox.bind('<ButtonRelease-1>', self.select)
for t in self.info.keys():
self.listbox.insert(END, t)
self.listbox.select_set(0)
self.listbox.pack(side=LEFT, fill=Y, padx=2, pady=2)
self.fg = IntVar()
self.bg = IntVar()
self.bold = IntVar()
self.infotext = StringVar()
r = Frame(self)
opt = LabelFrame(r, text='Style:', padx=5, pady=5)
f = Frame(opt)
c = Checkbutton(f, text='Foreground', variable=self.fg, width=20, anchor=W)
c.bind('<ButtonRelease-1>', lambda e,i=0: self.select(e,i))
c.grid(sticky=W)
c = Checkbutton(f, text='Background', variable=self.bg)
c.bind('<ButtonRelease-1>', lambda e,i=1: self.select(e,i))
c.grid(sticky=W)
c = Checkbutton(f, text='Bold', variable=self.bold)
c.bind('<ButtonRelease-1>', lambda e,i=2: self.select(e,i))
c.grid(sticky=W)
self.fgcanvas = Canvas(f, width=32, height=32, background='#000000')
self.fgcanvas.bind('<Button-1>', lambda e,i=0: self.colorselect(e, i))
self.fgcanvas.grid(column=1, row=0)
self.bgcanvas = Canvas(f, width=32, height=32, background='#000000')
self.bgcanvas.bind('<Button-1>', lambda e,i=1: self.colorselect(e, i))
self.bgcanvas.grid(column=1, row=1)
f.pack(side=TOP)
Label(opt, textvariable=self.infotext, height=6, justify=LEFT).pack(side=BOTTOM, fill=X)
opt.pack(side=TOP, fill=Y, expand=1, padx=2, pady=2)
f = Frame(r)
ok = Button(f, text='Ok', width=10, command=self.ok)
ok.pack(side=LEFT, padx=3)
Button(f, text='Cancel', width=10, command=self.cancel).pack(side=LEFT)
f.pack(side=BOTTOM, pady=2)
r.pack(side=LEFT, fill=Y)
self.select()
return ok
def select(self, e=None, n=None):
i = self.info.getkey(int(self.listbox.curselection()[0]))
s = self.tags[i.replace(' ', '')]
if n == None:
t = self.info[i].split('\n')
text = ''
if len(t) == 2:
d = ' '
text = t[0] + '\n'
else:
d = ''
text += fit(d, t[-1], 35, True)[:-1]
self.infotext.set(text)
if s['foreground'] == None:
self.fg.set(0)
self.fgcanvas['background'] = '#000000'
else:
self.fg.set(1)
self.fgcanvas['background'] = s['foreground']
if s['background'] == None:
self.bg.set(0)
self.bgcanvas['background'] = '#000000'
else:
self.bg.set(1)
self.bgcanvas['background'] = s['background']
self.bold.set(s['font'] != None)
else:
v = [self.fg,self.bg,self.bold][n].get()
if n == 2:
s['font'] = [self.parent.text.boldfont,couriernew][v]
else:
s[['foreground','background'][n]] = ['#000000',None][v]
if v:
[self.fgcanvas,self.bgcanvas][n]['background'] = '#000000'
def colorselect(self, e, i):
if [self.fg,self.bg][i].get():
v = [self.fgcanvas,self.bgcanvas][i]
g = ['foreground','background'][i]
c = tkColorChooser.askcolor(parent=self, initialcolor=v['background'], title='Select %s color' % g)
if c[1]:
v['background'] = c[1]
self.tags[self.info.getkey(int(self.listbox.curselection()[0])).replace(' ','')][g] = c[1]
self.focus_set()
def ok(self):
self.cont = self.tags
PyMSDialog.ok(self)
def cancel(self):
self.cont = False
PyMSDialog.ok(self)
class AICodeText(CodeText):
def __init__(self, parent, ai, ecallback=None, icallback=None, scallback=None, highlights=None):
self.ai = ai
self.boldfont = ('Courier New', -11, 'bold')
if highlights:
self.highlights = highlights
else:
self.highlights = {
'Block':{'foreground':'#FF00FF','background':None,'font':None},
'Keywords':{'foreground':'#0000FF','background':None,'font':self.boldfont},
'Types':{'foreground':'#0000FF','background':None,'font':self.boldfont},
'Commands':{'foreground':'#0000AA','background':None,'font':None},
'Number':{'foreground':'#FF0000','background':None,'font':None},
'TBLFormat':{'foreground':None,'background':'#E6E6E6','font':None},
'InfoComment':{'foreground':'#FF963C','background':None,'font':None},
'MultiInfoComment':{'foreground':'#FF963C','background':None,'font':None},
'Comment':{'foreground':'#008000','background':None,'font':None},
'AIID':{'foreground':'#FF00FF','background':None,'font':self.boldfont},
'HeaderString':{'foreground':'#FF0000','background':None,'font':self.boldfont},
'HeaderFlags':{'foreground':'#8000FF','background':None,'font':self.boldfont},
'Operators':{'foreground':'#0000FF','background':None,'font':self.boldfont},
'Newline':{'foreground':None,'background':None,'font':None},
'Error':{'foreground':None,'background':'#FF8C8C','font':None},
'Warning':{'foreground':None,'background':'#FFC8C8','font':None},
}
CodeText.__init__(self, parent, ecallback, icallback, scallback)
self.text.bind('<Control-q>', self.commentrange)
def setedit(self):
if self.ecallback != None:
self.ecallback()
self.edited = True
def commentrange(self, e=None):
item = self.tag_ranges('Selection')
if item:
head,tail = self.index('%s linestart' % item[0]),self.index('%s linestart' % item[1])
while self.text.compare(head, '<=', tail):
m = re.match('(\s*)(#?)(.*)', self.get(head, '%s lineend' % head))
if m.group(2):
self.tk.call(self.text.orig, 'delete', '%s +%sc' % (head, len(m.group(1))))
elif m.group(3):
self.tk.call(self.text.orig, 'insert', head, '#')
head = self.index('%s +1line' % head)
self.update_range(self.index('%s linestart' % item[0]), self.index('%s lineend' % item[1]))
def setupparser(self):
infocomment = '(?P<InfoComment>\\{[^\\n]+\\})'
multiinfocomment = '^[ \\t]*(?P<MultiInfoComment>\\{[ \\t]*(?:\\n[^}]*)?\\}?)$'
comment = '(?P<Comment>#[^\\n]*$)'
header = '^(?P<AIID>[^\n\x00,():]{4})(?=\\([^#]+,[^#]+,[^#]+\\):.+$)'
header_string = '\\b(?P<HeaderString>\\d+)(?=,[^#]+,[^#]+\\):.+$)'
header_flags = '\\b(?P<HeaderFlags>[01]{3})(?=,[^#]+\\):.+$)'
block = '^[ \\t]*(?P<Block>--[^\x00:(),\\n]+--)(?=.+$)'
cmds = '\\b(?P<Commands>%s)\\b' % '|'.join(AIBIN.AIBIN.short_labels)
num = '\\b(?P<Number>\\d+)\\b'
tbl = '(?P<TBLFormat><0*(?:25[0-5]|2[0-4]\d|1?\d?\d)?>)'
operators = '(?P<Operators>[():,=])'
kw = '\\b(?P<Keywords>extdef|aiscript|bwscript)\\b'
types = '\\b(?P<Types>%s)\\b' % '|'.join(AIBIN.types)
self.basic = re.compile('|'.join((infocomment, multiinfocomment, comment, header, header_string, header_flags, block, cmds, num, tbl, operators, kw, types, '(?P<Newline>\\n)')), re.S | re.M)
self.tooptips = [CommandCodeTooltip(self.text,self.ai),TypeCodeTooltip(self.text,self.ai),StringCodeTooltip(self.text,self.ai),FlagCodeTooltip(self.text,self.ai)]
self.tags = dict(self.highlights)
def colorize(self):
next = '1.0'
while True:
item = self.tag_nextrange("Update", next)
if not item:
break
head, tail = item
self.tag_remove('Newline', head, tail)
item = self.tag_prevrange('Newline', head)
if item:
head = item[1] + ' linestart'
else:
head = "1.0"
chars = ""
next = head
lines_to_get = 1
ok = False
while not ok:
mark = next
next = self.index(mark + '+%d lines linestart' % lines_to_get)
lines_to_get = min(lines_to_get * 2, 100)
ok = 'Newline' in self.tag_names(next + '-1c')
line = self.get(mark, next)
if not line:
return
for tag in self.tags.keys():
if tag != 'Selection':
self.tag_remove(tag, mark, next)
chars = chars + line
m = self.basic.search(chars)
while m:
for key, value in m.groupdict().items():
if value != None:
a, b = m.span(key)
self.tag_add(key, head + '+%dc' % a, head + '+%dc' % b)
m = self.basic.search(chars, m.end())
if 'Newline' in self.tag_names(next + '-1c'):
head = next
chars = ''
else:
ok = False
if not ok:
self.tag_add('Update', next)
self.update()
if not self.coloring:
return
class CodeTooltip(Tooltip):
tag = ''
def __init__(self, widget, ai):
self.ai = ai
Tooltip.__init__(self, widget)
def setupbinds(self, press):
if self.tag:
self.widget.tag_bind(self.tag, '<Enter>', self.enter, '+')
self.widget.tag_bind(self.tag, '<Leave>', self.leave, '+')
self.widget.tag_bind(self.tag, '<Motion>', self.motion, '+')
self.widget.tag_bind(self.tag, '<Button-1>', self.leave, '+')
self.widget.tag_bind(self.tag, '<ButtonPress>', self.leave)
def showtip(self):
if self.tip:
return
t = ''
if self.tag:
pos = list(self.widget.winfo_pointerxy())
head,tail = self.widget.tag_prevrange(self.tag,self.widget.index('@%s,%s+1c' % (pos[0] - self.widget.winfo_rootx(),pos[1] - self.widget.winfo_rooty())))
t = self.widget.get(head,tail)
try:
t = self.gettext(t)
self.tip = Toplevel(self.widget, relief=SOLID, borderwidth=1)
self.tip.wm_overrideredirect(1)
frame = Frame(self.tip, background='#FFFFC8', borderwidth=0)
Label(frame, text=t, justify=LEFT, font=self.font, background='#FFFFC8', relief=FLAT).pack(padx=1, pady=1)
frame.pack()
pos = list(self.widget.winfo_pointerxy())
self.tip.wm_geometry('+%d+%d' % (pos[0],pos[1]+22))
self.tip.update_idletasks()
move | |
self.toolPlay.set_sensitive( can_play)
self.toolDownload.set_sensitive( can_download)
self.toolTransfer.set_sensitive( can_transfer)
self.toolCancel.set_sensitive( can_cancel)
if can_cancel:
self.item_cancel_download.show_all()
else:
self.item_cancel_download.hide_all()
if can_download:
self.itemDownloadSelected.show_all()
else:
self.itemDownloadSelected.hide_all()
if can_play:
self.itemPlaySelected.show_all()
self.itemDeleteSelected.show_all()
self.item_toggle_played.show_all()
self.item_toggle_lock.show_all()
self.separator9.show_all()
if is_played:
self.change_menu_item(self.item_toggle_played, gtk.STOCK_CANCEL, _('Mark as unplayed'))
else:
self.change_menu_item(self.item_toggle_played, gtk.STOCK_APPLY, _('Mark as played'))
if is_locked:
self.change_menu_item(self.item_toggle_lock, gtk.STOCK_DIALOG_AUTHENTICATION, _('Allow deletion'))
else:
self.change_menu_item(self.item_toggle_lock, gtk.STOCK_DIALOG_AUTHENTICATION, _('Prohibit deletion'))
else:
self.itemPlaySelected.hide_all()
self.itemDeleteSelected.hide_all()
self.item_toggle_played.hide_all()
self.item_toggle_lock.hide_all()
self.separator9.hide_all()
if can_play or can_download or can_cancel:
self.item_episode_details.show_all()
self.separator16.show_all()
self.no_episode_selected.hide_all()
else:
self.item_episode_details.hide_all()
self.separator16.hide_all()
self.no_episode_selected.show_all()
return ( can_play, can_download, can_transfer, can_cancel )
def download_status_updated( self):
count=services.download_status_manager.count()
if count:
self.labelDownloads.set_text( _('Downloads (%d)') % count)
else:
self.labelDownloads.set_text( _('Downloads'))
for channel in self.channels:
channel.update_model()
self.updateComboBox()
def on_cbMaxDownloads_toggled(self, widget, *args):
self.spinMaxDownloads.set_sensitive(self.cbMaxDownloads.get_active())
def on_cbLimitDownloads_toggled(self, widget, *args):
self.spinLimitDownloads.set_sensitive(self.cbLimitDownloads.get_active())
def updateComboBox(self, selected_url=None):
(model, iter)=self.treeChannels.get_selection().get_selected()
if model and iter and selected_url is None:
# Get the URL of the currently-selected podcast
selected_url=model.get_value(iter, 0)
rect=self.treeChannels.get_visible_rect()
self.treeChannels.set_model(channels_to_model(self.channels))
util.idle_add(self.treeChannels.scroll_to_point, rect.x, rect.y)
try:
selected_path=(0,)
# Find the previously-selected URL in the new
# model if we have an URL (else select first)
if selected_url is not None:
model=self.treeChannels.get_model()
pos=model.get_iter_first()
while pos is not None:
url=model.get_value(pos, 0)
if url == selected_url:
selected_path=model.get_path(pos)
break
pos=model.iter_next(pos)
self.treeChannels.get_selection().select_path(selected_path)
except:
log( 'Cannot set selection on treeChannels', sender=self)
self.on_treeChannels_cursor_changed( self.treeChannels)
def updateTreeView( self):
if self.channels and self.active_channel is not None:
self.treeAvailable.set_model(self.active_channel.tree_model)
self.treeAvailable.columns_autosize()
self.play_or_download()
else:
if self.treeAvailable.get_model():
self.treeAvailable.get_model().clear()
def drag_data_received(self, widget, context, x, y, sel, ttype, time):
result=sel.data
self.add_new_channel( result)
def add_new_channel(self, result=None, ask_download_new=True):
result=util.normalize_feed_url( result)
if result:
for old_channel in self.channels:
if old_channel.url == result:
log( 'Channel already exists: %s', result)
# Select the existing channel in combo box
for i in range( len( self.channels)):
if self.channels[i] == old_channel:
self.treeChannels.get_selection().select_path( (i,))
self.on_treeChannels_cursor_changed(self.treeChannels)
break
self.show_message( _('You have already subscribed to this podcast: %s') % ( saxutils.escape( old_channel.title), ), _('Already added'))
return
log( 'Adding new channel: %s', result)
try:
channel=podcastChannel.get_by_url( url=result, force_update=True)
except:
log('Error in podcastChannel.get_by_url(%s)', result, sender=self)
channel=None
if channel:
self.channels.append( channel)
save_channels( self.channels)
# download changed channels and select the new episode in the UI afterwards
self.update_feed_cache(force_update=False, select_url_afterwards=channel.url)
(username, password)=util.username_password_from_url( result)
if username and self.show_confirmation( _('You have supplied <b>%s</b> as username and a password for this feed. Would you like to use the same authentication data for downloading episodes?') % ( saxutils.escape( username), ), _('Password authentication')):
channel.username=username
channel.password=password
log('Saving authentication data for episode downloads..', sender=self)
channel.save_settings()
if ask_download_new:
new_episodes=channel.get_new_episodes()
if len(new_episodes):
self.new_episodes_show(new_episodes)
else:
title=_('Error adding podcast')
message=_('The podcast could not be added. Please check the spelling of the URL or try again later.')
self.show_message( message, title)
else:
if result:
title=_('URL scheme not supported')
message=_('gPodder currently only supports URLs starting with <b>http://</b>, <b>feed://</b> or <b>ftp://</b>.')
self.show_message( message, title)
else:
self.show_message(_('There has been an error adding this podcast. Please see the log output for more information.'), _('Error adding podcast'))
def update_feed_cache_callback(self, progressbar, position, count, force_update):
title=self.channels[position].title
if force_update:
progression=_('Updating %s (%d/%d)')%(title, position+1, count)
else:
progression=_('Loading %s (%d/%d)')%(title, position+1, count)
progressbar.set_text(progression)
if self.tray_icon:
self.tray_icon.set_status(self.tray_icon.STATUS_UPDATING_FEED_CACHE, progression)
if count > 0:
progressbar.set_fraction(float(position)/float(count))
def update_feed_cache_finish_callback(self, force_update=False, notify_no_new_episodes=False, select_url_afterwards=None):
self.hboxUpdateFeeds.hide_all()
self.btnUpdateFeeds.show_all()
# If we want to select a specific podcast (via its URL)
# after the update, we give it to updateComboBox here to
# select exactly this podcast after updating the view
self.updateComboBox(selected_url=select_url_afterwards)
if self.tray_icon:
self.tray_icon.set_status(None)
if self.minimized and force_update:
new_episodes=[]
# look for new episodes to notify
for channel in self.channels:
for episode in channel.get_new_episodes():
if not episode in self.already_notified_new_episodes:
new_episodes.append(episode)
self.already_notified_new_episodes.append(episode)
# notify new episodes
if len(new_episodes) == 0:
if notify_no_new_episodes and self.tray_icon is not None:
msg=_('No new episodes available for download')
self.tray_icon.send_notification(msg)
return
elif len(new_episodes) == 1:
title=_('gPodder has found %s') % (_('one new episode:'),)
else:
title=_('gPodder has found %s') % (_('%i new episodes:') % len(new_episodes))
message=self.tray_icon.format_episode_list(new_episodes)
#auto download new episodes
if gl.config.auto_download_when_minimized:
message += '\n<i>(%s...)</i>' % _('downloading')
self.download_episode_list(new_episodes)
self.tray_icon.send_notification(message, title)
return
# open the episodes selection dialog
if force_update:
self.on_itemDownloadAllNew_activate( self.gPodder)
def update_feed_cache_proc( self, force_update, callback_proc=None, callback_error=None, finish_proc=None):
is_cancelled_cb=lambda: self.feed_cache_update_cancelled
self.channels=load_channels(force_update=force_update, callback_proc=callback_proc, callback_error=callback_error, offline=not force_update, is_cancelled_cb=is_cancelled_cb, old_channels=self.channels)
if finish_proc:
finish_proc()
def on_btnCancelFeedUpdate_clicked(self, widget):
self.pbFeedUpdate.set_text(_('Cancelling...'))
self.feed_cache_update_cancelled=True
def update_feed_cache(self, force_update=True, notify_no_new_episodes=False, select_url_afterwards=None):
if self.tray_icon:
self.tray_icon.set_status(self.tray_icon.STATUS_UPDATING_FEED_CACHE)
# let's get down to business..
callback_proc=lambda pos, count: util.idle_add(self.update_feed_cache_callback, self.pbFeedUpdate, pos, count, force_update)
finish_proc=lambda: util.idle_add(self.update_feed_cache_finish_callback, force_update, notify_no_new_episodes, select_url_afterwards)
self.feed_cache_update_cancelled=False
self.btnUpdateFeeds.hide_all()
self.hboxUpdateFeeds.show_all()
args=(force_update, callback_proc, self.notification, finish_proc)
thread=Thread( target=self.update_feed_cache_proc, args=args)
thread.start()
def download_podcast_by_url( self, url, want_message_dialog=True, widget=None):
if self.active_channel is None:
return
current_channel=self.active_channel
current_podcast=current_channel.find_episode( url)
filename=current_podcast.local_filename()
if widget:
if (widget.get_name() == 'itemPlaySelected' or widget.get_name() == 'toolPlay') and os.path.exists( filename):
# addDownloadedItem just to make sure the episode is marked correctly in localdb
current_channel.addDownloadedItem( current_podcast)
# open the file now
if current_podcast.file_type() != 'torrent':
self.playback_episode( current_channel, current_podcast)
return
if widget.get_name() == 'treeAvailable' or widget.get_name() == 'item_episode_details':
play_callback=lambda: self.playback_episode( current_channel, current_podcast)
download_callback=lambda: self.download_podcast_by_url( url, want_message_dialog, None)
gpe=gPodderEpisode( episode=current_podcast, channel=current_channel, download_callback=download_callback, play_callback=play_callback)
return
if not os.path.exists( filename) and not services.download_status_manager.is_download_in_progress( current_podcast.url):
download.DownloadThread( current_channel, current_podcast, self.notification).start()
else:
if want_message_dialog and os.path.exists( filename) and not current_podcast.file_type() == 'torrent':
title=_('Episode already downloaded')
message=_('You have already downloaded this episode. Click on the episode to play it.')
self.show_message( message, title)
elif want_message_dialog and not current_podcast.file_type() == 'torrent':
title=_('Download in progress')
message=_('You are currently downloading this episode. Please check the download status tab to check when the download is finished.')
self.show_message( message, title)
if os.path.exists( filename):
log( 'Episode has already been downloaded.')
current_channel.addDownloadedItem( current_podcast)
self.updateComboBox()
def on_gPodder_delete_event(self, widget, *args):
"""Called when the GUI wants to close the window
Displays a confirmation dialog (and closes/hides gPodder)
"""
downloading=services.download_status_manager.has_items()
# Only iconify if we are using the window's "X" button,
# but not when we are using "Quit" in the menu or toolbar
if not gl.config.on_quit_ask and gl.config.on_quit_systray and self.tray_icon and widget.name not in ('toolQuit', 'itemQuit'):
self.iconify_main_window()
elif gl.config.on_quit_ask or downloading:
if gpodder.interface == gpodder.MAEMO:
result=self.show_confirmation(_('Do you really want to quit gPodder now?'))
if result:
self.close_gpodder()
else:
return True
dialog=gtk.MessageDialog(self.gPodder, gtk.DIALOG_MODAL, gtk.MESSAGE_QUESTION, gtk.BUTTONS_NONE)
dialog.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
dialog.add_button(gtk.STOCK_QUIT, gtk.RESPONSE_CLOSE)
title=_('Quit gPodder')
if downloading:
message=_('You are downloading episodes. If you close gPodder now, the downloads will be aborted.')
else:
message=_('Do you really want to quit gPodder now?')
dialog.set_title(title)
dialog.set_markup('<span weight="bold" size="larger">%s</span>\n\n%s'%(title, message))
if not downloading:
cb_ask=gtk.CheckButton(_("Don't ask me again"))
dialog.vbox.pack_start(cb_ask)
cb_ask.show_all()
result=dialog.run()
dialog.destroy()
if result == gtk.RESPONSE_CLOSE:
if not downloading and cb_ask.get_active() == True:
gl.config.on_quit_ask=False
self.close_gpodder()
else:
self.close_gpodder()
return True
def close_gpodder(self):
""" clean everything and exit properly
"""
if self.channels:
if not save_channels(self.channels):
self.show_message(_('Please check your permissions and free disk space.'), _('Error saving podcast list'))
services.download_status_manager.cancel_all()
self.gtk_main_quit()
sys.exit( 0)
def get_old_episodes(self):
episodes=[]
for channel in self.channels:
for episode in channel.get_all_episodes():
if episode.is_downloaded() and episode.is_old() and not episode.is_locked() and episode.is_played():
episodes.append(episode)
return episodes
def for_each_selected_episode_url( self, callback):
( model, paths )=self.treeAvailable.get_selection().get_selected_rows()
for path in paths:
url=model.get_value( model.get_iter( path), 0)
try:
callback( url)
except:
log( 'Warning: Error in for_each_selected_episode_url for URL %s', url, sender=self)
self.active_channel.update_model()
self.updateComboBox()
def delete_episode_list( self, episodes, confirm=True):
if len(episodes) == 0:
return
if len(episodes) == 1:
message=_('Do you really want to delete this episode?')
else:
message=_('Do you really want to delete %d episodes?') % len(episodes)
if confirm and self.show_confirmation( message, _('Delete episodes')) == False:
return
for episode in episodes:
log('Deleting episode: %s', episode.title, sender=self)
episode.delete_from_disk()
self.download_status_updated()
def on_itemRemoveOldEpisodes_activate( self, widget):
columns=(
('title', _('Episode')),
('channel_prop', _('Podcast')),
('filesize_prop', _('Size')),
('pubdate_prop', _('Released')),
('played_prop', _('Status')),
('age_prop', _('Downloaded')),
)
selection_buttons={
_('Select played'): lambda episode: episode.is_played(),
_('Select older than %d days') % gl.config.episode_old_age: lambda episode: episode.is_old(),
}
instructions=_('Select the episodes you want to delete from your hard disk.')
episodes=[]
selected=[]
for channel in self.channels:
for episode in channel:
if episode.is_downloaded() and not episode.is_locked():
episodes.append( episode)
selected.append( episode.is_played())
gPodderEpisodeSelector( title=_('Remove old episodes'), instructions=instructions, \
episodes=episodes, selected=selected, columns=columns, \
stock_ok_button=gtk.STOCK_DELETE, callback=self.delete_episode_list, \
selection_buttons=selection_buttons)
def on_item_toggle_downloaded_activate( self, widget, toggle=True, new_value=False):
if toggle:
callback=lambda url: gl.history_mark_downloaded(url, not gl.history_is_downloaded(url))
else:
callback=lambda url: gl.history_mark_downloaded(url, new_value)
self.for_each_selected_episode_url( callback)
def on_item_toggle_played_activate( self, widget, toggle=True, new_value=False):
if toggle:
callback=lambda url: gl.history_mark_played(url, not gl.history_is_played(url))
else:
callback=lambda url: gl.history_mark_played(url, new_value)
self.for_each_selected_episode_url( callback)
def on_item_toggle_lock_activate(self, widget, toggle=True, new_value=False):
if toggle:
callback=lambda url: gl.history_mark_locked(url, not gl.history_is_locked(url))
else:
callback=lambda url: gl.history_mark_locked(url, new_value)
self.for_each_selected_episode_url(callback)
def on_item_email_subscriptions_activate(self, widget):
if not self.channels:
self.show_message(_('Your subscription list is | |
of each specificity score for each group/class
"""
if len(np.unique(y_true)) < 3:
return specificity_score(y_true, y_pred)
else:
overall_score = 0
unique_classes = np.unique(y_true)
for pos_class in unique_classes:
overall_score += specificity_score(
y_true, y_pred, problem="multiclass", positive_class=pos_class
)
return overall_score / len(unique_classes)
def sensitivity_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
problem: str = "Binary",
positive_class: Union[str, int] = None,
) -> float:
"""This is exactly the same as recall
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
problem: str, ['binary', 'multiclass'], default='binary'
Whether our problem is a binary classification or a multiclassification problem
positive_class: int or str, default=None
If problem=='multiclass' then the class we are denoting as 'succcess' or 'positive' (i.e., the one marked as a 1).
Returns
-------
The sensitivity score
"""
problem_true, y_true, y_pred = _check_targets(y_true, y_pred)
if problem.casefold() == "binary":
tp, fp, fn, tn = get_classification_labels(y_true, y_pred)
elif problem.casefold() == "multiclass":
if positive_class:
if isinstance(positive_class, str) or isinstance(positive_class, int):
new_y_true = np.where(y_true == positive_class, 1, 0)
new_y_pred = np.where(y_pred == positive_class, 1, 0)
tp, fp, fn, tn = get_classification_labels(new_y_true, new_y_pred)
else:
raise Exception("Cannot discern positive class for multiclass problem")
else:
raise Exception("Cannot calculate sensitivity score with None")
else:
raise ValueError("Cannot determine problem type")
return tp / (tp + fn)
def average_sensitivity_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
) -> float:
"""Calculates the average sensitivity score. Used for when we have more than 2 classes and want our models' average
performance for each class
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
Returns
-------
The average of each sensitivity score for each group/class
"""
if len(np.unique(y_true)) < 3:
return sensitivity_score(y_true, y_pred)
else:
overall_score = 0
unique_classes = np.unique(y_true)
for pos_class in unique_classes:
overall_score += sensitivity_score(
y_true, y_pred, problem="multiclass", positive_class=pos_class
)
return overall_score / len(unique_classes)
def power_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
problem: str = "Binary",
positive_class: Union[str, int] = None,
) -> float:
"""This is just another way of saying sensitivity
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
problem: str, ['binary', 'multiclass'], default='binary'
Whether our problem is a binary classification or a multiclassification problem
positive_class: int or str, default=None
If problem=='multiclass' then the class we are denoting as 'succcess' or 'positive' (i.e., the one marked as a 1).
Returns
-------
The sensitivity score
"""
return sensitivity_score(
y_true, y_pred, problem=problem, positive_class=positive_class
)
def average_power_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
) -> float:
"""This is another way of saying average_sensitivity_score
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
Returns
-------
The average of each sensitivity score for each group/class
"""
return average_sensitivity_score(y_true, y_pred)
def negative_predictive_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
problem: str = "Binary",
positive_class: Union[str, int] = None,
) -> float:
"""Also known as problem II error score. Calculates the percentage of true negatives we correctly identified compared to
the number of true negative and false negatives.
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
problem: str, ['binary', 'multiclass'], default='binary'
Whether our problem is a binary classification or a multiclassification problem
positive_class: int or str, default=None
If problem=='multiclass' then the class we are denoting as 'succcess' or 'positive' (i.e., the one marked as a 1).
Returns
-------
The negative predictive score
"""
problem_true, y_true, y_pred = _check_targets(y_true, y_pred)
if problem.casefold() == "binary":
tp, fp, fn, tn = get_classification_labels(y_true, y_pred)
elif problem.casefold() == "multiclass":
if positive_class:
if isinstance(positive_class, str) or isinstance(positive_class, int):
new_y_true = np.where(y_true == positive_class, 1, 0)
new_y_pred = np.where(y_pred == positive_class, 1, 0)
tp, fp, fn, tn = get_classification_labels(new_y_true, new_y_pred)
else:
raise Exception("Cannot discern positive class for multiclass problem")
else:
raise Exception("Cannot calculate negative predictive score with None")
else:
raise ValueError("Cannot determine problem type")
return tn / (tn + fn)
def average_negative_predictive_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
) -> float:
"""Calculates the average negative predictive score. Used for when we have more than 2 classes and want our models'
average performance for each class
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
Returns
-------
The average of each negative predictive score for each group/class
"""
if len(np.unique(y_true)) < 3:
return negative_predictive_score(y_true, y_pred)
else:
overall_score = 0
unique_classes = np.unique(y_true)
for pos_class in unique_classes:
overall_score += negative_predictive_score(
y_true, y_pred, problem="multiclass", positive_class=pos_class
)
return overall_score / len(unique_classes)
def false_negative_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
problem: str = "Binary",
positive_class: Union[str, int] = None,
) -> float:
"""The inverse of our false positive score, calculates the number of false negatives compared to the number of
false negatives and true positives.
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
problem: str, ['binary', 'multiclass'], default='binary'
Whether our problem is a binary classification or a multiclassification problem
positive_class: int or str, default=None
If problem=='multiclass' then the class we are denoting as 'succcess' or 'positive' (i.e., the one marked as a 1).
Returns
-------
The false positive score
"""
problem_true, y_true, y_pred = _check_targets(y_true, y_pred)
if problem.casefold() == "binary":
tp, fp, fn, tn = get_classification_labels(y_true, y_pred)
elif problem.casefold() == "multiclass":
if positive_class:
if isinstance(positive_class, str) or isinstance(positive_class, int):
new_y_true = np.where(y_true == positive_class, 1, 0)
new_y_pred = np.where(y_pred == positive_class, 1, 0)
tp, fp, fn, tn = get_classification_labels(new_y_true, new_y_pred)
else:
raise Exception("Cannot discern positive class for multiclass problem")
else:
raise Exception("Cannot calculate false negative score with None")
else:
raise ValueError("Cannot determine problem type")
return fn / (fn + tp)
def average_false_negative_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
) -> float:
"""Calculates the average false negative score. Used for when we have more than 2 classes and want our models'
average performance for each class
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
Returns
-------
The average of each false negative score for each group/class
"""
if len(np.unique(y_true)) < 3:
return false_negative_score(y_true, y_pred)
else:
overall_score = 0
unique_classes = np.unique(y_true)
for pos_class in unique_classes:
overall_score += false_negative_score(
y_true, y_pred, problem="multiclass", positive_class=pos_class
)
return overall_score / len(unique_classes)
def problem_two_error_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
problem: str = "Binary",
positive_class: Union[str, int] = None,
) -> float:
"""This is exactly the same as false negative score
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
problem: str, ['binary', 'multiclass'], default='binary'
Whether our problem is a binary classification or a multiclassification problem
positive_class: int or str, default=None
If problem=='multiclass' then the class we are denoting as 'succcess' or 'positive' (i.e., the one marked as a 1).
Returns
-------
The problem II error score
"""
return false_negative_score(
y_true, y_pred, problem=problem, positive_class=positive_class
)
def average_problem_two_error_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
) -> float:
"""This is exactly the same as average false negative score
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
Returns
-------
The average of each problem II error score for each group/class
"""
return average_false_negative_score(y_true, y_pred)
def false_positive_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
| |
# -*- coding: utf-8 -*-
#BEGIN_HEADER
import logging
import os
from GenericsAPI.Utils.AttributeUtils import AttributesUtil
from GenericsAPI.Utils.BIOMUtil import BiomUtil
from GenericsAPI.Utils.CorrelationUtil import CorrelationUtil
from GenericsAPI.Utils.DataUtil import DataUtil
from GenericsAPI.Utils.MatrixUtil import MatrixUtil
from GenericsAPI.Utils.NetworkUtil import NetworkUtil
from GenericsAPI.Utils.PCAUtil import PCAUtil
from GenericsAPI.Utils.DataTableUtil import DataTableUtil
from GenericsAPI.Utils.TemplateUtil import TemplateUtil
from GenericsAPI.Utils.TaxonUtil import TaxonUtil
#END_HEADER
class GenericsAPI:
'''
Module Name:
GenericsAPI
Module Description:
'''
######## WARNING FOR GEVENT USERS ####### noqa
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
######################################### noqa
VERSION = "1.0.29"
GIT_URL = "<EMAIL>:Tianhao-Gu/GenericsAPI.git"
GIT_COMMIT_HASH = "21fb232fd3822ea717e39acc32cf1bab4eedfddc"
#BEGIN_CLASS_HEADER
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
self.config = config
self.config['SDK_CALLBACK_URL'] = os.environ['SDK_CALLBACK_URL']
self.config['KB_AUTH_TOKEN'] = os.environ['KB_AUTH_TOKEN']
self.scratch = config['scratch']
self.attr_util = AttributesUtil(self.config)
self.matrix_util = MatrixUtil(self.config)
self.corr_util = CorrelationUtil(self.config)
self.data_util = DataUtil(self.config)
self.network_util = NetworkUtil(self.config)
self.biom_util = BiomUtil(self.config)
self.pca_util = PCAUtil(self.config)
self.data_table_util = DataTableUtil(self.config)
self.template_util = TemplateUtil(self.config)
self.taxon_util = TaxonUtil(self.config)
logging.basicConfig(format='%(created)s %(levelname)s: %(message)s',
level=logging.INFO)
#END_CONSTRUCTOR
pass
def fetch_data(self, ctx, params):
"""
fetch_data: fetch generics data as pandas dataframe for a generics data object
:param params: instance of type "FetchDataParams" (Input of the
fetch_data function obj_ref: generics object reference Optional
arguments: generics_module: the generics data module to be
retrieved from e.g. for an given data type like below: typedef
structure { FloatMatrix2D data; condition_set_ref
condition_set_ref; } SomeGenericsMatrix; generics_module should be
{'data': 'FloatMatrix2D', 'condition_set_ref':
'condition_set_ref'}) -> structure: parameter "obj_ref" of type
"obj_ref" (An X/Y/Z style reference), parameter "generics_module"
of mapping from String to String
:returns: instance of type "FetchDataReturn" (Ouput of the fetch_data
function data_matrix: a pandas dataframe in json format) ->
structure: parameter "data_matrix" of String
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN fetch_data
returnVal = self.data_util.fetch_data(params)
#END fetch_data
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method fetch_data return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def export_matrix(self, ctx, params):
"""
:param params: instance of type "ExportParams" (Input of the
export_matrix function obj_ref: generics object reference Optional
arguments: generics_module: select the generics data to be
retrieved from e.g. for an given data type like below: typedef
structure { FloatMatrix2D data; condition_set_ref
condition_set_ref; } SomeGenericsMatrix; and only 'FloatMatrix2D'
is needed generics_module should be {'data': FloatMatrix2D'}) ->
structure: parameter "obj_ref" of type "obj_ref" (An X/Y/Z style
reference), parameter "generics_module" of mapping from String to
String
:returns: instance of type "ExportOutput" -> structure: parameter
"shock_id" of String
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN export_matrix
returnVal = self.matrix_util.export_matrix(params)
#END export_matrix
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method export_matrix return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def validate_data(self, ctx, params):
"""
validate_data: validate data
:param params: instance of type "ValidateParams" (Input of the
validate_data function obj_type: obj type e.g.:
'KBaseMatrices.ExpressionMatrix-1.1' data: data to be validated)
-> structure: parameter "obj_type" of String, parameter "data" of
mapping from String to String
:returns: instance of type "ValidateOutput" -> structure: parameter
"validated" of type "boolean" (A boolean - 0 for false, 1 for
true.), parameter "failed_constraint" of mapping from String to
String
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN validate_data
returnVal = self.data_util.validate_data(params)
#END validate_data
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method validate_data return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def import_matrix_from_excel(self, ctx, params):
"""
import_matrix_from_excel: import matrix object from excel
:param params: instance of type "ImportMatrixParams" (Input of the
import_matrix_from_excel function obj_type: a type in
KBaseMatrices input_shock_id: file shock id input_file_path:
absolute file path input_staging_file_path: staging area file path
matrix_name: matrix object name description: optional, a
description of the matrix workspace_name: workspace name matrix
object to be saved to optional: col_attributemapping_ref: column
AttributeMapping reference row_attributemapping_ref: row
AttributeMapping reference genome_ref: genome reference
diff_expr_matrix_ref: DifferentialExpressionMatrix reference
biochemistry_ref: (for ChemicalAbundanceMatrix) reads_set_ref:
list of reads_set associated with amplicon matrix sample_set_ref:
SampleSet object reference) -> structure: parameter "obj_type" of
String, parameter "input_shock_id" of String, parameter
"input_file_path" of String, parameter "input_staging_file_path"
of String, parameter "matrix_name" of String, parameter "scale" of
String, parameter "description" of String, parameter
"workspace_name" of type "workspace_name" (workspace name of the
object), parameter "genome_ref" of type "obj_ref" (An X/Y/Z style
reference), parameter "col_attributemapping_ref" of type "obj_ref"
(An X/Y/Z style reference), parameter "row_attributemapping_ref"
of type "obj_ref" (An X/Y/Z style reference), parameter
"diff_expr_matrix_ref" of type "obj_ref" (An X/Y/Z style
reference), parameter "biochemistry_ref" of type "obj_ref" (An
X/Y/Z style reference), parameter "reads_set_ref" of list of type
"obj_ref" (An X/Y/Z style reference), parameter "sample_set_ref"
of type "obj_ref" (An X/Y/Z style reference), parameter "unit" of
String, parameter "type" of String
:returns: instance of type "ImportMatrixOutput" -> structure:
parameter "report_name" of String, parameter "report_ref" of
String, parameter "matrix_obj_ref" of type "obj_ref" (An X/Y/Z
style reference)
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN import_matrix_from_excel
returnVal = self.matrix_util.import_matrix_from_excel(params)
#END import_matrix_from_excel
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method import_matrix_from_excel return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def import_matrix_from_biom(self, ctx, params):
"""
import_matrix_from_biom: import matrix object from BIOM file format
:param params: instance of type "ImportOTUParams" -> structure:
parameter "obj_type" of String, parameter
"taxonomic_abundance_tsv" of String, parameter "taxonomic_fasta"
of String, parameter "input_local_file" of String, parameter
"matrix_name" of String, parameter "scale" of String, parameter
"description" of String, parameter "workspace_id" of Long,
parameter "genome_ref" of type "obj_ref" (An X/Y/Z style
reference), parameter "col_attributemapping_ref" of type "obj_ref"
(An X/Y/Z style reference), parameter "row_attributemapping_ref"
of type "obj_ref" (An X/Y/Z style reference), parameter
"diff_expr_matrix_ref" of type "obj_ref" (An X/Y/Z style
reference), parameter "biochemistry_ref" of type "obj_ref" (An
X/Y/Z style reference), parameter "reads_set_ref" of type
"obj_ref" (An X/Y/Z style reference), parameter "sample_set_ref"
of type "obj_ref" (An X/Y/Z style reference), parameter
"amplicon_type" of String, parameter "extraction" of String,
parameter "amplification" of String, parameter "target_gene" of
String, parameter "target_subfragment" of list of String,
parameter "pcr_primers" of String, parameter "library_kit" of
String, parameter "library_layout" of String, parameter
"library_screening_strategy" of String, parameter
"sequencing_center" of String, parameter "sequencing_date" of
String, parameter "sequencing_technology" of String, parameter
"sequencing_instrument" of String, parameter
"sequencing_quality_filter_cutoff" of Long, parameter
"read_length_cutoff" of Long, parameter "read_pairing" of String,
parameter "barcode_error_rate" of Double, parameter
"chimera_detection_and_removal" of String, parameter
"metadata_keys" of list of String, parameter "taxon_calling" of
type "TaxonCalling" -> structure: parameter "taxon_calling_method"
of list of String, parameter "denoise_method" of String, parameter
"sequence_error_cutoff" of Double, parameter "clustering_method"
of String, parameter "clustering_cutoff" of Double
:returns: instance of type "ImportMatrixOutput" -> structure:
parameter "report_name" of String, parameter "report_ref" of
String, parameter "matrix_obj_ref" of type "obj_ref" (An X/Y/Z
style reference)
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN import_matrix_from_biom
returnVal = self.biom_util.import_matrix_from_biom(params)
#END import_matrix_from_biom
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method import_matrix_from_biom return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def save_object(self, ctx, params):
"""
save_object: validate data constraints and save matrix object
:param params: instance of type "SaveObjectParams" (Input of the
import_matrix_from_excel function obj_type: saving object data
type obj_name: saving object name data: data to be saved
workspace_id: workspace id matrix object to be saved to) ->
structure: parameter "obj_type" of String, parameter "obj_name" of
String, parameter "data" of mapping from String to String,
parameter "workspace_id" of Long
:returns: instance of type "SaveObjectOutput" -> structure: parameter
"obj_ref" of type "obj_ref" (An X/Y/Z style reference)
"""
# ctx is the context object
# return | |
<reponame>sebtelko/pulumi-azure-native
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'CacheExpirationActionParametersResponse',
'DeepCreatedOriginResponse',
'DeliveryRuleCacheExpirationActionResponse',
'DeliveryRuleResponse',
'DeliveryRuleUrlFileExtensionConditionResponse',
'DeliveryRuleUrlPathConditionResponse',
'EndpointPropertiesUpdateParametersResponseDeliveryPolicy',
'GeoFilterResponse',
'SkuResponse',
'UrlFileExtensionConditionParametersResponse',
'UrlPathConditionParametersResponse',
]
@pulumi.output_type
class CacheExpirationActionParametersResponse(dict):
"""
Defines the parameters for the cache expiration action.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cacheBehavior":
suggest = "cache_behavior"
elif key == "cacheType":
suggest = "cache_type"
elif key == "odataType":
suggest = "odata_type"
elif key == "cacheDuration":
suggest = "cache_duration"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CacheExpirationActionParametersResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CacheExpirationActionParametersResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CacheExpirationActionParametersResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cache_behavior: str,
cache_type: str,
odata_type: str,
cache_duration: Optional[str] = None):
"""
Defines the parameters for the cache expiration action.
:param str cache_behavior: Caching behavior for the requests that include query strings.
:param str cache_type: The level at which the content needs to be cached.
:param str cache_duration: The duration for which the content needs to be cached. Allowed format is [d.]hh:mm:ss
"""
pulumi.set(__self__, "cache_behavior", cache_behavior)
pulumi.set(__self__, "cache_type", cache_type)
pulumi.set(__self__, "odata_type", odata_type)
if cache_duration is not None:
pulumi.set(__self__, "cache_duration", cache_duration)
@property
@pulumi.getter(name="cacheBehavior")
def cache_behavior(self) -> str:
"""
Caching behavior for the requests that include query strings.
"""
return pulumi.get(self, "cache_behavior")
@property
@pulumi.getter(name="cacheType")
def cache_type(self) -> str:
"""
The level at which the content needs to be cached.
"""
return pulumi.get(self, "cache_type")
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> str:
return pulumi.get(self, "odata_type")
@property
@pulumi.getter(name="cacheDuration")
def cache_duration(self) -> Optional[str]:
"""
The duration for which the content needs to be cached. Allowed format is [d.]hh:mm:ss
"""
return pulumi.get(self, "cache_duration")
@pulumi.output_type
class DeepCreatedOriginResponse(dict):
"""
The main origin of CDN content which is added when creating a CDN endpoint.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "hostName":
suggest = "host_name"
elif key == "httpPort":
suggest = "http_port"
elif key == "httpsPort":
suggest = "https_port"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DeepCreatedOriginResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DeepCreatedOriginResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DeepCreatedOriginResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
host_name: str,
name: str,
http_port: Optional[int] = None,
https_port: Optional[int] = None):
"""
The main origin of CDN content which is added when creating a CDN endpoint.
:param str host_name: The address of the origin. It can be a domain name, IPv4 address, or IPv6 address.
:param str name: Origin name
:param int http_port: The value of the HTTP port. Must be between 1 and 65535
:param int https_port: The value of the HTTPS port. Must be between 1 and 65535
"""
pulumi.set(__self__, "host_name", host_name)
pulumi.set(__self__, "name", name)
if http_port is not None:
pulumi.set(__self__, "http_port", http_port)
if https_port is not None:
pulumi.set(__self__, "https_port", https_port)
@property
@pulumi.getter(name="hostName")
def host_name(self) -> str:
"""
The address of the origin. It can be a domain name, IPv4 address, or IPv6 address.
"""
return pulumi.get(self, "host_name")
@property
@pulumi.getter
def name(self) -> str:
"""
Origin name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="httpPort")
def http_port(self) -> Optional[int]:
"""
The value of the HTTP port. Must be between 1 and 65535
"""
return pulumi.get(self, "http_port")
@property
@pulumi.getter(name="httpsPort")
def https_port(self) -> Optional[int]:
"""
The value of the HTTPS port. Must be between 1 and 65535
"""
return pulumi.get(self, "https_port")
@pulumi.output_type
class DeliveryRuleCacheExpirationActionResponse(dict):
"""
Defines the cache expiration action for the delivery rule.
"""
def __init__(__self__, *,
name: str,
parameters: 'outputs.CacheExpirationActionParametersResponse'):
"""
Defines the cache expiration action for the delivery rule.
:param str name: The name of the action for the delivery rule.
Expected value is 'CacheExpiration'.
:param 'CacheExpirationActionParametersResponse' parameters: Defines the parameters for the action.
"""
pulumi.set(__self__, "name", 'CacheExpiration')
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the action for the delivery rule.
Expected value is 'CacheExpiration'.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parameters(self) -> 'outputs.CacheExpirationActionParametersResponse':
"""
Defines the parameters for the action.
"""
return pulumi.get(self, "parameters")
@pulumi.output_type
class DeliveryRuleResponse(dict):
"""
A rule that specifies a set of actions and conditions
"""
def __init__(__self__, *,
actions: Sequence['outputs.DeliveryRuleCacheExpirationActionResponse'],
order: int,
conditions: Optional[Sequence[Any]] = None):
"""
A rule that specifies a set of actions and conditions
:param Sequence['DeliveryRuleCacheExpirationActionResponse'] actions: A list of actions that are executed when all the conditions of a rule are satisfied.
:param int order: The order in which the rules are applied for the endpoint. Possible values {0,1,2,3,………}. A rule with a lesser order will be applied before a rule with a greater order. Rule with order 0 is a special rule. It does not require any condition and actions listed in it will always be applied.
:param Sequence[Union['DeliveryRuleUrlFileExtensionConditionResponse', 'DeliveryRuleUrlPathConditionResponse']] conditions: A list of conditions that must be matched for the actions to be executed
"""
pulumi.set(__self__, "actions", actions)
pulumi.set(__self__, "order", order)
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
@property
@pulumi.getter
def actions(self) -> Sequence['outputs.DeliveryRuleCacheExpirationActionResponse']:
"""
A list of actions that are executed when all the conditions of a rule are satisfied.
"""
return pulumi.get(self, "actions")
@property
@pulumi.getter
def order(self) -> int:
"""
The order in which the rules are applied for the endpoint. Possible values {0,1,2,3,………}. A rule with a lesser order will be applied before a rule with a greater order. Rule with order 0 is a special rule. It does not require any condition and actions listed in it will always be applied.
"""
return pulumi.get(self, "order")
@property
@pulumi.getter
def conditions(self) -> Optional[Sequence[Any]]:
"""
A list of conditions that must be matched for the actions to be executed
"""
return pulumi.get(self, "conditions")
@pulumi.output_type
class DeliveryRuleUrlFileExtensionConditionResponse(dict):
"""
Defines the URL file extension condition for the delivery rule.
"""
def __init__(__self__, *,
name: str,
parameters: 'outputs.UrlFileExtensionConditionParametersResponse'):
"""
Defines the URL file extension condition for the delivery rule.
:param str name: The name of the condition for the delivery rule.
Expected value is 'UrlFileExtension'.
:param 'UrlFileExtensionConditionParametersResponse' parameters: Defines the parameters for the condition.
"""
pulumi.set(__self__, "name", 'UrlFileExtension')
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the condition for the delivery rule.
Expected value is 'UrlFileExtension'.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parameters(self) -> 'outputs.UrlFileExtensionConditionParametersResponse':
"""
Defines the parameters for the condition.
"""
return pulumi.get(self, "parameters")
@pulumi.output_type
class DeliveryRuleUrlPathConditionResponse(dict):
"""
Defines the URL path condition for the delivery rule.
"""
def __init__(__self__, *,
name: str,
parameters: 'outputs.UrlPathConditionParametersResponse'):
"""
Defines the URL path condition for the delivery rule.
:param str name: The name of the condition for the delivery rule.
Expected value is 'UrlPath'.
:param 'UrlPathConditionParametersResponse' parameters: Defines the parameters for the condition.
"""
pulumi.set(__self__, "name", 'UrlPath')
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the condition for the delivery rule.
Expected value is 'UrlPath'.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parameters(self) -> 'outputs.UrlPathConditionParametersResponse':
"""
Defines the parameters for the condition.
"""
return pulumi.get(self, "parameters")
@pulumi.output_type
class EndpointPropertiesUpdateParametersResponseDeliveryPolicy(dict):
"""
A policy that specifies the delivery rules to be used for an endpoint.
"""
def __init__(__self__, *,
rules: Sequence['outputs.DeliveryRuleResponse'],
description: Optional[str] = None):
"""
A policy that specifies the delivery rules to be used for an endpoint.
:param Sequence['DeliveryRuleResponse'] rules: A list of the delivery rules.
:param str description: User-friendly description of the policy.
"""
pulumi.set(__self__, "rules", rules)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def rules(self) -> Sequence['outputs.DeliveryRuleResponse']:
"""
A list of the delivery rules.
"""
return pulumi.get(self, "rules")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
User-friendly description of the policy.
"""
return pulumi.get(self, "description")
@pulumi.output_type
class GeoFilterResponse(dict):
"""
Rules defining user's geo access within a CDN endpoint.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "countryCodes":
suggest = "country_codes"
elif key == "relativePath":
| |
# -*- coding: utf-8 -*-
"""
Created by e-bug on 24/03/17.
"""
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter, FormatStrFormatter
from matplotlib2tikz import save as tikz_save
import numpy as np
# colorblind palette
colorblind_palette_dict = {'black': (0,0,0),
'orange': (0.9,0.6,0),
'sky_blue': (0.35,0.7,0.9),
'bluish_green': (0,0.6,0.5),
'yellow': (0.95,0.9,0.25),
'blue': (0,0.45,0.7),
'vermillion': (0.8,0.4,0),
'reddish_purple': (0.8,0.6,0.7)}
palette_order = {0: 'vermillion', 1: 'bluish_green', 2: 'sky_blue', 3: 'orange',
4: 'black', 5: 'yellow', 6: 'blue', 7: 'reddish_purple'}
palette_order2 = {0: 'blue', 1: 'orange', 2: 'bluish_green', 3: 'yellow',
4: 'reddish_purple', 5: 'sky_blue', 6: 'vermillion', 7: 'black'}
n_colors = len(colorblind_palette_dict)
# ideal line
ideal_color = (0.5,0.5,0.5)
# markers
markers = ['o', '^', 's', 'D', '*', 'h', '.', '+']
n_markers = len(markers)
# linestyles
linestyles = [':', '-.', '--', '-']
n_linestyles = len(linestyles)
def plot_tts(n_nodes, lines, labels=None, legend_title='Problem size', xlabel='Number of nodes', xscale='log2',
ylabel='Time to solution [s]', yscale='log', cmap_name=None, filename=None, saveas='tikz',
figureheight = '\\figureheight', figurewidth = '\\figurewidth'):
"""
Plots the time to solution as a function of the number of nodes.
:param n_nodes: values in x-axis (i.e. number of nodes)
:param lines: list of lists, each with y values for each x value
:param labels: labels of the lines
:param legend_title: title of the legend
:param xlabel: label of x-axis
:param xscale: scale of x-axis: None: normal, log: base-10 logarithm, log2: base-2 logarithm
:param ylabel: label of y-axis
:param yscale: scale of y-axis: None: normal, log: base-10 logarithm, log2: base-2 logarithm
:param cmap_name: name of colormap to be used (see: http://matplotlib.org/examples/color/colormaps_reference.html).
If None, colorblind palette is used
:param saveas:
"""
plt.figure(figsize=(12,8))
plt.grid()
# colormap
n_lines = len(lines)
line_colors = []
if cmap_name is not None:
cmap = plt.get_cmap(cmap_name)
line_colors = cmap(np.linspace(0.25, 0.9, n_lines))
else:
line_colors = [colorblind_palette_dict[palette_order[i%n_colors]] for i in range(n_lines)]
# plot lines
for i,tts in enumerate(lines):
plt.plot(n_nodes, tts,
color=line_colors[i], linestyle=linestyles[i%n_linestyles],
marker=markers[i%n_markers], markerfacecolor=line_colors[i], markersize=7)
# x-axis
if xscale == 'log2':
plt.xscale('log', basex=2)
elif xscale == 'log':
plt.xscale('log')
plt.xticks(n_nodes, fontsize='large')
plt.xlabel(xlabel, fontsize='x-large')
# y-axis
if yscale == 'log2':
plt.yscale('log', basex=2)
elif yscale == 'log':
plt.yscale('log')
plt.yticks(fontsize='large')
plt.ylabel(ylabel, fontsize='x-large')
# legend
if labels is not None:
if len(labels) == n_lines:
legend = plt.legend(labels, loc='upper right', bbox_to_anchor=[1, 1],
ncol=min(n_lines,4), shadow=False, fancybox=True,
title=legend_title, fontsize='large')
plt.setp(legend.get_title(),fontsize='x-large')
else:
raise ValueError('Number of labels does not match number of lines')
# ticks formatting
ax = plt.gca()
# ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax.xaxis.set_major_formatter(ScalarFormatter())
# ax.yaxis.set_major_formatter(ScalarFormatter())
# save figure
if (saveas is None) or (filename is None):
plt.show()
elif saveas == 'tikz':
tikz_save(filename + '.' + saveas, figureheight = figureheight, figurewidth = figurewidth)
else:
plt.savefig(filename + '.' + saveas)
def plot_speedup(n_nodes, lines, labels=None, legend_title='Problem size', xlabel='Number of nodes', xscale='log2',
ylabel='Speedup', yscale='log2', plot_ideal=True, cmap_name=None, filename=None, saveas='tikz',
figureheight = '\\figureheight', figurewidth = '\\figurewidth'):
"""
Plots the speedup as a function of the number of nodes.
:param n_nodes: values in x-axis (i.e. number of nodes)
:param lines: list of lists, each with y values for each x value
:param labels: labels of the lines
:param legend_title: title of the legend
:param xlabel: label of x-axis
:param xscale: scale of x-axis: None: normal, log: base-10 logarithm, log2: base-2 logarithm
:param ylabel: label of y-axis
:param yscale: scale of y-axis: None: normal, log: base-10 logarithm, log2: base-2 logarithm
:param plot_ideal: if True, plots ideal speedup line
:param cmap_name: name of colormap to be used (see: http://matplotlib.org/examples/color/colormaps_reference.html).
If None, colorblind palette is used
:param saveas:
"""
plt.figure(figsize=(12,8))
plt.grid()
# colormap
n_lines = len(lines)
line_colors = []
if cmap_name is not None:
cmap = plt.get_cmap(cmap_name)
line_colors = cmap(np.linspace(0.25, 0.9, n_lines))
else:
line_colors = [colorblind_palette_dict[palette_order[i%n_colors]] for i in range(n_lines)]
# plot lines
for i,tts in enumerate(lines):
plt.plot(n_nodes, tts,
color=line_colors[i], linestyle=linestyles[i%n_linestyles],
marker=markers[i%n_markers], markerfacecolor=line_colors[i], markersize=7)
if plot_ideal:
plt.plot(n_nodes, n_nodes, color=ideal_color)
plt.text(n_nodes[-2]+1, n_nodes[-2]+5, 'ideal', fontsize='x-large')
# x-axis
if xscale == 'log2':
plt.xscale('log', basex=2)
elif xscale == 'log':
plt.xscale('log')
plt.xticks(n_nodes, fontsize='large')
plt.xlabel(xlabel, fontsize='x-large')
# y-axis
if yscale == 'log2':
plt.yscale('log', basex=2)
elif yscale == 'log':
plt.yscale('log')
plt.yticks(n_nodes, fontsize='large')
plt.ylabel(ylabel, fontsize='x-large')
# legend
if labels is not None:
if len(labels) == n_lines:
legend = plt.legend(labels, loc='upper left',
ncol=min(n_lines,4), shadow=False, fancybox=True,
title=legend_title, fontsize='large')
plt.setp(legend.get_title(),fontsize='x-large')
else:
raise ValueError('Number of labels does not match number of lines')
# ticks formatting
ax = plt.gca()
# ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax.xaxis.set_major_formatter(ScalarFormatter())
ax.yaxis.set_major_formatter(ScalarFormatter())
# save figure
if (saveas is None) or (filename is None):
plt.show()
elif saveas == 'tikz':
tikz_save(filename + '.' + saveas, figureheight = figureheight, figurewidth = figurewidth)
else:
plt.savefig(filename + '.' + saveas)
def plot_efficiency(n_nodes, lines, labels=None, legend_title='Problem size', xlabel='Number of nodes', xscale='log2',
ylabel='Efficiency', yscale=None, plot_ideal=True, cmap_name=None, filename=None, saveas='tikz',
figureheight = '\\figureheight', figurewidth = '\\figurewidth'):
"""
Plots the efficiency as a function of the number of nodes.
:param n_nodes: values in x-axis (i.e. number of nodes)
:param lines: list of lists, each with y values for each x value
:param labels: labels of the lines
:param legend_title: title of the legend
:param xlabel: label of x-axis
:param xscale: scale of x-axis: None: normal, log: base-10 logarithm, log2: base-2 logarithm
:param ylabel: label of y-axis
:param yscale: scale of y-axis: None: normal, log: base-10 logarithm, log2: base-2 logarithm
:param plot_ideal: if True, plots ideal speedup line
:param cmap_name: name of colormap to be used (see: http://matplotlib.org/examples/color/colormaps_reference.html).
If None, colorblind palette is used
:param saveas:
"""
plt.figure(figsize=(12,8))
plt.grid()
# colormap
n_lines = len(lines)
line_colors = []
if cmap_name is not None:
cmap = plt.get_cmap(cmap_name)
line_colors = cmap(np.linspace(0.25, 0.9, n_lines))
else:
line_colors = [colorblind_palette_dict[palette_order[i%n_colors]] for i in range(n_lines)]
# plot lines
for i,tts in enumerate(lines):
plt.plot(n_nodes, tts,
color=line_colors[i], linestyle=linestyles[i%n_linestyles],
marker=markers[i%n_markers], markerfacecolor=line_colors[i], markersize=7)
if plot_ideal:
plt.plot(n_nodes, np.ones(len(n_nodes)), color=ideal_color)
plt.text(n_nodes[-1]-10, 0.96, 'ideal', fontsize='x-large')
# x-axis
if xscale == 'log2':
plt.xscale('log', basex=2)
elif xscale == 'log':
plt.xscale('log')
plt.xticks(n_nodes, fontsize='large')
plt.xlabel(xlabel, fontsize='x-large')
# y-axis
if yscale == 'log2':
plt.yscale('log', basex=2)
elif yscale == 'log':
plt.yscale('log')
plt.yticks(fontsize='large')
plt.ylabel(ylabel, fontsize='x-large')
# legend
if labels is not None:
if len(labels) == n_lines:
legend = plt.legend(labels, loc='lower left',
ncol=min(n_lines,4), shadow=False, fancybox=True,
title=legend_title, fontsize='large')
plt.setp(legend.get_title(),fontsize='x-large')
else:
raise ValueError('Number of labels does not match number of lines')
# ticks formatting
ax = plt.gca()
# ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax.xaxis.set_major_formatter(ScalarFormatter())
ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
#ax.yaxis.set_major_formatter(ScalarFormatter())
# save figure
if (saveas is None) or (filename is None):
plt.show()
elif saveas == 'tikz':
tikz_save(filename + '.' + saveas, figureheight = figureheight, figurewidth = figurewidth)
else:
plt.savefig(filename + '.' + saveas)
def plot_tts_bar(machines, values_lists, labels=None, width=0.35, legend_title='Program', xlabel='Machine',
ylabel='Time to solution [s]', yscale=None, cmap_name=None, filename=None, saveas='tikz',
figureheight = '\\figureheight', figurewidth = '\\figurewidth'):
"""
Plots the time to solution as a function of the number of nodes.
:param machines: list of strings for each machine
:param values_lists: list of lists of values corresponding to the passed machines and programs
:param labels: labels of the programs
:param width: the width of the bars
:param legend_title: title of the legend
:param xlabel: label of x-axis
:param ylabel: label of y-axis
:param yscale: scale of y-axis: None: normal, log: base-10 logarithm, log2: base-2 logarithm
:param cmap_name: name of colormap to be used (see: http://matplotlib.org/examples/color/colormaps_reference.html).
If None, colorblind palette is used
:param saveas:
"""
# TODO -- fix plotted values: see *O.png
plt.figure(figsize=(12,8))
plt.grid()
# colormap
n_labels = len(values_lists[0])
bar_colors = []
if cmap_name is not None:
cmap = plt.get_cmap(cmap_name)
bar_colors = cmap(np.linspace(0.25, 0.9, n_labels))
else:
bar_colors = [colorblind_palette_dict[palette_order2[i%n_colors]] for i in range(n_labels)]
# plot bars -- label by label
n_machines = len(machines)
x_values = np.arange(1, n_machines+1)
max_value = max(max(values_lists))
for i in range(n_labels):
values = [val_list[i] for val_list in values_lists]
plt.bar(x_values+i*width, values, width, align='center', color=bar_colors[i])
for idx, v in enumerate(sorted(values_lists[i], reverse=True)):
plt.text(idx+1+i*width, v+max_value/100, str(v), fontsize='large', horizontalalignment='center')
# x-axis
plt.xticks(x_values+(n_labels-1)*width/2, machines, fontsize='large')
plt.xlabel(xlabel, fontsize='x-large')
# y-axis
if yscale == 'log2':
plt.yscale('log', basex=2)
elif yscale == 'log':
plt.yscale('log')
plt.yticks(fontsize='large')
plt.ylabel(ylabel, fontsize='x-large')
# legend
if labels is not None:
if n_labels == len(labels):
legend = plt.legend(labels, loc='upper right', bbox_to_anchor=[1, 1],
ncol=min(n_labels,4), shadow=False, fancybox=True,
title=legend_title, fontsize='large')
plt.setp(legend.get_title(),fontsize='x-large')
else:
raise ValueError('Number of labels does not match number of lines')
# ticks formatting
ax = plt.gca()
# ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
# ax.xaxis.set_major_formatter(ScalarFormatter())
# ax.yaxis.set_major_formatter(ScalarFormatter())
# save figure
if (saveas is None) or (filename is None):
plt.show()
elif saveas == 'tikz':
tikz_save(filename | |
isinstance(self._cmap, dict):
# dictionary cmap
if self.constant_c() or self.array_c():
raise ValueError(
"Expected list-like `c` with dictionary cmap."
" Got {}".format(type(self._c))
)
elif not self.discrete:
raise ValueError("Cannot use dictionary cmap with " "continuous data.")
elif np.any([color not in self._cmap for color in np.unique(self._c)]):
missing = set(np.unique(self._c).tolist()).difference(self._cmap.keys())
raise ValueError(
"Dictionary cmap requires a color "
"for every unique entry in `c`. "
"Missing colors for [{}]".format(
", ".join([str(color) for color in missing])
)
)
elif self.list_cmap():
if self.constant_c() or self.array_c():
raise ValueError(
"Expected list-like `c` with list cmap. "
"Got {}".format(type(self._c))
)
def check_cmap_scale(self):
if self._cmap_scale is not None and self._cmap_scale != "linear":
if self.array_c():
warnings.warn(
"Cannot use non-linear `cmap_scale` with " "`c` as a color array.",
UserWarning,
)
self._cmap_scale = "linear"
elif self.constant_c():
warnings.warn(
"Cannot use non-linear `cmap_scale` with constant "
"`c={}`.".format(self._c),
UserWarning,
)
self._cmap_scale = "linear"
elif self.discrete:
warnings.warn(
"Cannot use non-linear `cmap_scale` with discrete data.",
UserWarning,
)
self._cmap_scale = "linear"
def _label(self, label, values, idx):
if label is False:
return None
elif label is not None:
return label
elif self._label_prefix is not None:
return self._label_prefix + str(idx)
elif label is not False and isinstance(values, pd.Series):
return values.name
else:
return None
@property
def xlabel(self):
return self._label(self._xlabel, self._x, "1")
@property
def ylabel(self):
return self._label(self._ylabel, self._y, "2")
@property
def zlabel(self):
if self._z is None:
return None
else:
return self._label(self._zlabel, self._z, "3")
@utils._with_pkg(pkg="matplotlib", min_version=3)
def scatter(
x,
y,
z=None,
c=None,
cmap=None,
cmap_scale="linear",
s=None,
mask=None,
discrete=None,
ax=None,
legend=None,
colorbar=None,
shuffle=True,
figsize=None,
ticks=True,
xticks=None,
yticks=None,
zticks=None,
ticklabels=True,
xticklabels=None,
yticklabels=None,
zticklabels=None,
label_prefix=None,
xlabel=None,
ylabel=None,
zlabel=None,
title=None,
fontsize=None,
legend_title=None,
legend_loc="best",
legend_anchor=None,
legend_ncol=None,
vmin=None,
vmax=None,
elev=None,
azim=None,
filename=None,
dpi=None,
**plot_kwargs,
):
"""Create a scatter plot.
Builds upon `matplotlib.pyplot.scatter` with nice defaults
and handles categorical colors / legends better. For easy access, use
`scatter2d` or `scatter3d`.
Parameters
----------
x : list-like
data for x axis
y : list-like
data for y axis
z : list-like, optional (default: None)
data for z axis
c : list-like or None, optional (default: None)
Color vector. Can be a single color value (RGB, RGBA, or named
matplotlib colors), an array of these of length n_samples, or a list of
discrete or continuous values of any data type. If `c` is not a single
or list of matplotlib colors, the values in `c` will be used to
populate the legend / colorbar with colors from `cmap`
cmap : `matplotlib` colormap, str, dict or None, optional (default: None)
matplotlib colormap. If None, uses `tab20` for discrete data and
`inferno` for continuous data. If a dictionary, expects one key
for every unique value in `c`, where values are valid matplotlib colors
(hsv, rbg, rgba, or named colors)
cmap_scale : {'linear', 'log', 'symlog', 'sqrt'} or `matplotlib.colors.Normalize`,
optional (default: 'linear')
Colormap normalization scale. For advanced use, see
<https://matplotlib.org/users/colormapnorms.html>
s : float, optional (default: None)
Point size. If `None`, set to 200 / sqrt(n_samples)
mask : list-like, optional (default: None)
boolean mask to hide data points
discrete : bool or None, optional (default: None)
If True, the legend is categorical. If False, the legend is a colorbar.
If None, discreteness is detected automatically. Data containing
non-numeric `c` is always discrete, and numeric data with 20 or less
unique values is discrete.
ax : `matplotlib.Axes` or None, optional (default: None)
axis on which to plot. If None, an axis is created
legend : bool, optional (default: None)
States whether or not to create a legend. If data is continuous,
the legend is a colorbar. If `None`, a legend is created where possible
colorbar : bool, optional (default: None)
Synonym for `legend`
shuffle : bool, optional (default: True)
If True. shuffles the order of points on the plot.
figsize : tuple, optional (default: None)
Tuple of floats for creation of new `matplotlib` figure. Only used if
`ax` is None.
ticks : True, False, or list-like (default: True)
If True, keeps default axis ticks. If False, removes axis ticks.
If a list, sets custom axis ticks
{x,y,z}ticks : True, False, or list-like (default: None)
If set, overrides `ticks`
ticklabels : True, False, or list-like (default: True)
If True, keeps default axis tick labels. If False, removes axis tick labels.
If a list, sets custom axis tick labels
{x,y,z}ticklabels : True, False, or list-like (default: None)
If set, overrides `ticklabels`
label_prefix : str or None (default: None)
Prefix for all axis labels. Axes will be labelled `label_prefix`1,
`label_prefix`2, etc. Can be overriden by setting `xlabel`,
`ylabel`, and `zlabel`.
{x,y,z}label : str, None or False (default : None)
Axis labels. Overrides the automatic label given by
label_prefix. If None and label_prefix is None, no label is set
unless the data is a pandas Series, in which case the series name is used.
Override this behavior with `{x,y,z}label=False`
title : str or None (default: None)
axis title. If None, no title is set.
fontsize : float or None (default: None)
Base font size.
legend_title : str (default: None)
title for the colorbar of legend
legend_loc : int or string or pair of floats, default: 'best'
Matplotlib legend location. Only used for discrete data.
See <https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html>
for details.
legend_anchor : `BboxBase`, 2-tuple, or 4-tuple
Box that is used to position the legend in conjunction with loc.
See <https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html>
for details.
legend_ncol : `int` or `None`, optimal (default: None)
Number of columns to show in the legend.
If None, defaults to a maximum of entries per column.
vmin, vmax : float, optional (default: None)
Range of values to use as the range for the colormap.
Only used if data is continuous
elev : int, optional (default: None)
Elevation angle of viewpoint from horizontal for 3D plots, in degrees
azim : int, optional (default: None)
Azimuth angle in x-y plane of viewpoint for 3D plots, in degrees
filename : str or None (default: None)
file to which the output is saved
dpi : int or None, optional (default: None)
The resolution in dots per inch. If None it will default to the value
savefig.dpi in the matplotlibrc file. If 'figure' it will set the dpi
to be the value of the figure. Only used if filename is not None.
**plot_kwargs : keyword arguments
Extra arguments passed to `matplotlib.pyplot.scatter`.
Returns
-------
ax : `matplotlib.Axes`
axis on which plot was drawn
Examples
--------
>>> import scprep
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> data = np.random.normal(0, 1, [200, 3])
>>> # Continuous color vector
>>> colors = data[:, 0]
>>> scprep.plot.scatter(x=data[:, 0], y=data[:, 1], c=colors)
>>> # Discrete color vector with custom colormap
>>> colors = np.random.choice(['a','b'], data.shape[0], replace=True)
>>> data[colors == 'a'] += 5
>>> scprep.plot.scatter(x=data[:, 0], y=data[:, 1], z=data[:, 2],
... c=colors, cmap={'a' : [1,0,0,1], 'b' : 'xkcd:sky blue'})
"""
with temp_fontsize(fontsize):
params = _ScatterParams(
x,
y,
z,
c=c,
mask=mask,
discrete=discrete,
cmap=cmap,
cmap_scale=cmap_scale,
vmin=vmin,
vmax=vmax,
s=s,
legend=legend,
colorbar=colorbar,
xlabel=xlabel,
ylabel=ylabel,
zlabel=zlabel,
label_prefix=label_prefix,
shuffle=shuffle,
)
fig, ax, show_fig = _get_figure(ax, figsize, subplot_kw=params.subplot_kw)
# plot!
sc = ax.scatter(
*(params.data),
c=params.c,
cmap=params.cmap,
norm=params.norm,
s=params.s,
vmin=params.vmin,
vmax=params.vmax,
**plot_kwargs,
)
# label axes
label_axis(
ax.xaxis,
_with_default(xticks, ticks),
_with_default(xticklabels, ticklabels),
params.xlabel,
)
label_axis(
ax.yaxis,
_with_default(yticks, ticks),
_with_default(yticklabels, ticklabels),
params.ylabel,
)
if z is not None:
label_axis(
ax.zaxis,
_with_default(zticks, ticks),
_with_default(zticklabels, ticklabels),
params.zlabel,
)
if title is not None:
ax.set_title(title, fontsize=parse_fontsize(None, "xx-large"))
# generate legend
if params.legend:
if params.discrete:
generate_legend(
{
params.labels[i]: sc.cmap(sc.norm(i))
for i in range(len(params.labels))
},
ax=ax,
loc=legend_loc,
bbox_to_anchor=legend_anchor,
title=legend_title,
ncol=legend_ncol,
)
else:
generate_colorbar(
params.cmap,
ax=ax,
vmin=params.vmin,
vmax=params.vmax,
title=legend_title,
extend=params.extend,
scale=sc.norm,
)
# set viewpoint
if z is not None:
ax.view_init(elev=elev, azim=azim)
# save and show
if show_fig:
show(fig)
if filename is not None:
fig.savefig(filename, dpi=dpi)
return ax
@utils._with_pkg(pkg="matplotlib", min_version=3)
def scatter2d(
data,
c=None,
cmap=None,
cmap_scale="linear",
s=None,
mask=None,
discrete=None,
ax=None,
legend=None,
colorbar=None,
shuffle=True,
figsize=None,
ticks=True,
xticks=None,
yticks=None,
ticklabels=True,
xticklabels=None,
yticklabels=None,
label_prefix=None,
xlabel=None,
ylabel=None,
title=None,
fontsize=None,
legend_title=None,
legend_loc="best",
legend_anchor=None,
legend_ncol=None,
filename=None,
dpi=None,
**plot_kwargs,
):
"""Create a 2D scatter plot.
Builds upon `matplotlib.pyplot.scatter` with nice defaults
and | |
3 3 3 4 4 4 4 4 4 4 5 5 5 5 6\n\
Kappa -1 -1 1 -2 -1 1 -2 2 -3 -1 1 -2 2 -3 3 -4 -1 1 -2 2 -1\n\
Occup 2 2 2 4 2 2 4 4 6 2 2 4 4 6 6 3 2 2 4 1 2\n\
Valen 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n",
"Ho":
"Iz= 67 Norb= 21 Ion= 0 Config= 4f10_5d1_6s2\n\
n 1 2 2 2 3 3 3 3 3 4 4 4 4 4 4 4 5 5 5 5 6\n\
Kappa -1 -1 1 -2 -1 1 -2 2 -3 -1 1 -2 2 -3 3 -4 -1 1 -2 2 -1\n\
Occup 2 2 2 4 2 2 4 4 6 2 2 4 4 6 6 4 2 2 4 1 2\n\
Valen 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n",
"Er":
"Iz= 68 Norb= 21 Ion= 0 Config= 4f11_5d1_6s2\n\
n 1 2 2 2 3 3 3 3 3 4 4 4 4 4 4 4 5 5 5 5 6\n\
Kappa -1 -1 1 -2 -1 1 -2 2 -3 -1 1 -2 2 -3 3 -4 -1 1 -2 2 -1\n\
Occup 2 2 2 4 2 2 4 4 6 2 2 4 4 6 6 5 2 2 4 1 2\n\
Valen 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n",
"Tm":
"Iz= 69 Norb= 21 Ion= 0 Config= 4f12_5d1_6s2\n\
n 1 2 2 2 3 3 3 3 3 4 4 4 4 4 4 4 5 5 5 5 6\n\
Kappa -1 -1 1 -2 -1 1 -2 2 -3 -1 1 -2 2 -3 3 -4 -1 1 -2 2 -1\n\
Occup 2 2 2 4 2 2 4 4 6 2 2 4 4 6 6 6 2 2 4 1 2\n\
Valen 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n",
"Yb":
"Iz= 70 Norb= 21 Ion= 0 Config= 4f14_5d1_6s1\n\
n 1 2 2 2 3 3 3 3 3 4 4 4 4 4 4 4 5 5 5 5 6\n\
Kappa -1 -1 1 -2 -1 1 -2 2 -3 -1 1 -2 2 -3 3 -4 -1 1 -2 2 -1\n\
Occup 2 2 2 4 2 2 4 4 6 2 2 4 4 6 6 8 2 2 4 1 1\n\
Valen 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n",
"Lu":
"Iz= 71 Norb= 21 Ion= 0 Config= 5d1_6s2\n\
n 1 2 2 2 3 3 3 3 3 4 4 4 4 4 4 4 5 5 5 5 6\n\
Kappa -1 -1 1 -2 -1 1 -2 2 -3 -1 1 -2 2 -3 3 -4 -1 1 -2 2 -1\n\
Occup 2 2 2 4 2 2 4 4 6 2 2 4 4 6 6 8 2 2 4 1 2\n\
Valen 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n",
"Hf":
"Iz= 72 Norb= 21 Ion= 0 Config= 5d2_6s2\n\
n 1 2 2 2 3 3 3 3 3 4 4 4 4 4 4 4 5 5 5 5 6\n\
Kappa -1 -1 1 -2 -1 1 -2 2 -3 -1 1 -2 2 -3 3 -4 -1 1 -2 2 -1\n\
Occup 2 2 2 4 2 2 4 4 6 2 2 4 4 6 6 8 2 2 4 2 2\n\
Valen 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n",
"Ta":
"Iz= 73 Norb= 21 Ion= 0 Config= 5d3_6s2\n\
n 1 2 2 2 3 3 3 3 3 4 4 4 4 4 4 4 5 5 5 5 6\n\
Kappa -1 -1 1 -2 -1 1 -2 2 -3 -1 1 -2 2 -3 3 -4 -1 1 -2 2 -1\n\
Occup 2 2 2 4 2 2 4 4 6 2 2 4 4 6 6 8 2 2 4 3 2\n\
Valen 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n",
"W":
"Iz= 74 Norb= 21 Ion= 0 Config= 5d4_6s2\n\
n 1 2 2 2 3 3 3 3 3 4 4 4 4 4 4 4 5 5 5 5 6\n\
Kappa -1 -1 1 -2 -1 1 -2 2 -3 -1 1 -2 2 -3 3 -4 -1 1 -2 2 -1\n\
Occup 2 2 2 4 2 2 4 4 6 2 2 4 4 6 6 8 2 2 4 4 2\n\
Valen 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n",
"Re":
"Iz= 75 Norb= 22 Ion= 0 Config= 5d5_6s2\n\
n 1 2 2 2 3 3 3 3 3 4 4 4 4 4 4 4 5 5 5 5 5 6\n\
Kappa -1 -1 1 -2 -1 1 -2 2 -3 -1 1 -2 2 -3 3 -4 -1 1 -2 2 -3 -1\n\
Occup 2 2 2 4 2 2 4 4 6 2 2 4 4 6 6 8 2 2 4 4 1 2\n\
Valen 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1\n",
"Os":
"Iz= 76 Norb= 22 Ion= 0 Config= 5d6_6s2\n\
n 1 2 2 2 3 3 3 3 3 4 4 4 4 4 4 4 5 5 5 5 5 6\n\
Kappa -1 -1 1 -2 -1 1 -2 2 -3 -1 1 -2 2 -3 3 -4 -1 1 -2 2 -3 -1\n\
Occup 2 2 2 4 2 2 4 4 6 2 2 4 4 6 6 8 2 2 4 4 2 2\n\
Valen 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1\n",
"Ir":
"Iz= 77 Norb= 22 Ion= 0 Config= 5d7_6s2\n\
n 1 2 2 2 3 3 3 3 3 4 4 4 4 4 4 4 5 5 5 5 5 6\n\
Kappa -1 -1 1 -2 -1 1 -2 2 -3 -1 1 -2 2 -3 3 -4 -1 1 -2 2 -3 -1\n\
Occup 2 2 2 4 2 2 4 4 6 2 2 4 4 6 6 8 2 2 4 4 3 2\n\
Valen 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1\n",
"Pt":
"Iz= 78 Norb= 22 Ion= 0 Config= 5d8_6s2\n\
n 1 2 2 2 3 3 3 3 3 4 4 4 4 4 4 4 5 5 5 5 5 6\n\
Kappa -1 -1 1 -2 -1 1 -2 2 -3 -1 1 -2 2 -3 3 -4 -1 1 -2 2 -3 -1\n\
Occup 2 2 2 4 2 2 4 4 6 2 2 4 4 6 6 8 2 2 4 4 4 2\n\
Valen 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1\n",
"Au":
"Iz= 79 Norb= 22 Ion= 0 Config= 5d10_6s1\n\
n 1 2 2 2 3 3 3 3 3 4 4 4 4 4 4 4 5 5 5 5 5 6\n\
Kappa -1 -1 1 -2 -1 1 -2 2 -3 -1 1 -2 2 -3 3 -4 -1 1 -2 2 -3 -1\n\
Occup 2 2 2 4 2 2 4 4 6 2 2 4 4 6 6 8 2 | |
# coding: utf-8
"""
flyteidl/service/admin.proto
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: version not set
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from flyteadmin.models.core_execution_error import CoreExecutionError # noqa: F401,E501
from flyteadmin.models.core_identifier import CoreIdentifier # noqa: F401,E501
from flyteadmin.models.core_node_execution_identifier import CoreNodeExecutionIdentifier # noqa: F401,E501
from flyteadmin.models.core_task_execution_phase import CoreTaskExecutionPhase # noqa: F401,E501
from flyteadmin.models.core_task_log import CoreTaskLog # noqa: F401,E501
from flyteadmin.models.event_task_execution_metadata import EventTaskExecutionMetadata # noqa: F401,E501
from flyteadmin.models.protobuf_struct import ProtobufStruct # noqa: F401,E501
class EventTaskExecutionEvent(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'task_id': 'CoreIdentifier',
'parent_node_execution_id': 'CoreNodeExecutionIdentifier',
'retry_attempt': 'int',
'phase': 'CoreTaskExecutionPhase',
'producer_id': 'str',
'logs': 'list[CoreTaskLog]',
'occurred_at': 'datetime',
'input_uri': 'str',
'output_uri': 'str',
'error': 'CoreExecutionError',
'custom_info': 'ProtobufStruct',
'phase_version': 'int',
'metadata': 'EventTaskExecutionMetadata'
}
attribute_map = {
'task_id': 'task_id',
'parent_node_execution_id': 'parent_node_execution_id',
'retry_attempt': 'retry_attempt',
'phase': 'phase',
'producer_id': 'producer_id',
'logs': 'logs',
'occurred_at': 'occurred_at',
'input_uri': 'input_uri',
'output_uri': 'output_uri',
'error': 'error',
'custom_info': 'custom_info',
'phase_version': 'phase_version',
'metadata': 'metadata'
}
def __init__(self, task_id=None, parent_node_execution_id=None, retry_attempt=None, phase=None, producer_id=None, logs=None, occurred_at=None, input_uri=None, output_uri=None, error=None, custom_info=None, phase_version=None, metadata=None): # noqa: E501
"""EventTaskExecutionEvent - a model defined in Swagger""" # noqa: E501
self._task_id = None
self._parent_node_execution_id = None
self._retry_attempt = None
self._phase = None
self._producer_id = None
self._logs = None
self._occurred_at = None
self._input_uri = None
self._output_uri = None
self._error = None
self._custom_info = None
self._phase_version = None
self._metadata = None
self.discriminator = None
if task_id is not None:
self.task_id = task_id
if parent_node_execution_id is not None:
self.parent_node_execution_id = parent_node_execution_id
if retry_attempt is not None:
self.retry_attempt = retry_attempt
if phase is not None:
self.phase = phase
if producer_id is not None:
self.producer_id = producer_id
if logs is not None:
self.logs = logs
if occurred_at is not None:
self.occurred_at = occurred_at
if input_uri is not None:
self.input_uri = input_uri
if output_uri is not None:
self.output_uri = output_uri
if error is not None:
self.error = error
if custom_info is not None:
self.custom_info = custom_info
if phase_version is not None:
self.phase_version = phase_version
if metadata is not None:
self.metadata = metadata
@property
def task_id(self):
"""Gets the task_id of this EventTaskExecutionEvent. # noqa: E501
ID of the task. In combination with the retryAttempt this will indicate the task execution uniquely for a given parent node execution. # noqa: E501
:return: The task_id of this EventTaskExecutionEvent. # noqa: E501
:rtype: CoreIdentifier
"""
return self._task_id
@task_id.setter
def task_id(self, task_id):
"""Sets the task_id of this EventTaskExecutionEvent.
ID of the task. In combination with the retryAttempt this will indicate the task execution uniquely for a given parent node execution. # noqa: E501
:param task_id: The task_id of this EventTaskExecutionEvent. # noqa: E501
:type: CoreIdentifier
"""
self._task_id = task_id
@property
def parent_node_execution_id(self):
"""Gets the parent_node_execution_id of this EventTaskExecutionEvent. # noqa: E501
:return: The parent_node_execution_id of this EventTaskExecutionEvent. # noqa: E501
:rtype: CoreNodeExecutionIdentifier
"""
return self._parent_node_execution_id
@parent_node_execution_id.setter
def parent_node_execution_id(self, parent_node_execution_id):
"""Sets the parent_node_execution_id of this EventTaskExecutionEvent.
:param parent_node_execution_id: The parent_node_execution_id of this EventTaskExecutionEvent. # noqa: E501
:type: CoreNodeExecutionIdentifier
"""
self._parent_node_execution_id = parent_node_execution_id
@property
def retry_attempt(self):
"""Gets the retry_attempt of this EventTaskExecutionEvent. # noqa: E501
:return: The retry_attempt of this EventTaskExecutionEvent. # noqa: E501
:rtype: int
"""
return self._retry_attempt
@retry_attempt.setter
def retry_attempt(self, retry_attempt):
"""Sets the retry_attempt of this EventTaskExecutionEvent.
:param retry_attempt: The retry_attempt of this EventTaskExecutionEvent. # noqa: E501
:type: int
"""
self._retry_attempt = retry_attempt
@property
def phase(self):
"""Gets the phase of this EventTaskExecutionEvent. # noqa: E501
:return: The phase of this EventTaskExecutionEvent. # noqa: E501
:rtype: CoreTaskExecutionPhase
"""
return self._phase
@phase.setter
def phase(self, phase):
"""Sets the phase of this EventTaskExecutionEvent.
:param phase: The phase of this EventTaskExecutionEvent. # noqa: E501
:type: CoreTaskExecutionPhase
"""
self._phase = phase
@property
def producer_id(self):
"""Gets the producer_id of this EventTaskExecutionEvent. # noqa: E501
:return: The producer_id of this EventTaskExecutionEvent. # noqa: E501
:rtype: str
"""
return self._producer_id
@producer_id.setter
def producer_id(self, producer_id):
"""Sets the producer_id of this EventTaskExecutionEvent.
:param producer_id: The producer_id of this EventTaskExecutionEvent. # noqa: E501
:type: str
"""
self._producer_id = producer_id
@property
def logs(self):
"""Gets the logs of this EventTaskExecutionEvent. # noqa: E501
:return: The logs of this EventTaskExecutionEvent. # noqa: E501
:rtype: list[CoreTaskLog]
"""
return self._logs
@logs.setter
def logs(self, logs):
"""Sets the logs of this EventTaskExecutionEvent.
:param logs: The logs of this EventTaskExecutionEvent. # noqa: E501
:type: list[CoreTaskLog]
"""
self._logs = logs
@property
def occurred_at(self):
"""Gets the occurred_at of this EventTaskExecutionEvent. # noqa: E501
This timestamp represents when the original event occurred, it is generated by the executor of the task. # noqa: E501
:return: The occurred_at of this EventTaskExecutionEvent. # noqa: E501
:rtype: datetime
"""
return self._occurred_at
@occurred_at.setter
def occurred_at(self, occurred_at):
"""Sets the occurred_at of this EventTaskExecutionEvent.
This timestamp represents when the original event occurred, it is generated by the executor of the task. # noqa: E501
:param occurred_at: The occurred_at of this EventTaskExecutionEvent. # noqa: E501
:type: datetime
"""
self._occurred_at = occurred_at
@property
def input_uri(self):
"""Gets the input_uri of this EventTaskExecutionEvent. # noqa: E501
URI of the input file, it encodes all the information including Cloud source provider. ie., s3://... # noqa: E501
:return: The input_uri of this EventTaskExecutionEvent. # noqa: E501
:rtype: str
"""
return self._input_uri
@input_uri.setter
def input_uri(self, input_uri):
"""Sets the input_uri of this EventTaskExecutionEvent.
URI of the input file, it encodes all the information including Cloud source provider. ie., s3://... # noqa: E501
:param input_uri: The input_uri of this EventTaskExecutionEvent. # noqa: E501
:type: str
"""
self._input_uri = input_uri
@property
def output_uri(self):
"""Gets the output_uri of this EventTaskExecutionEvent. # noqa: E501
URI to the output of the execution, it will be in a format that encodes all the information including Cloud source provider. ie., s3://... # noqa: E501
:return: The output_uri of this EventTaskExecutionEvent. # noqa: E501
:rtype: str
"""
return self._output_uri
@output_uri.setter
def output_uri(self, output_uri):
"""Sets the output_uri of this EventTaskExecutionEvent.
URI to the output of the execution, it will be in a format that encodes all the information including Cloud source provider. ie., s3://... # noqa: E501
:param output_uri: The output_uri of this EventTaskExecutionEvent. # noqa: E501
:type: str
"""
self._output_uri = output_uri
@property
def error(self):
"""Gets the error of this EventTaskExecutionEvent. # noqa: E501
:return: The error of this EventTaskExecutionEvent. # noqa: E501
:rtype: CoreExecutionError
"""
return self._error
@error.setter
def error(self, error):
"""Sets the error of this EventTaskExecutionEvent.
:param error: The error of this EventTaskExecutionEvent. # noqa: E501
:type: CoreExecutionError
"""
self._error = error
@property
def custom_info(self):
"""Gets the custom_info of this EventTaskExecutionEvent. # noqa: E501
Custom data that the task plugin sends back. This is extensible to allow various plugins in the system. # noqa: E501
:return: The custom_info of this EventTaskExecutionEvent. # noqa: E501
:rtype: ProtobufStruct
"""
return self._custom_info
@custom_info.setter
def custom_info(self, custom_info):
"""Sets the custom_info of this EventTaskExecutionEvent.
Custom data that the task plugin sends back. This is extensible to allow various plugins in the system. # noqa: E501
:param custom_info: The custom_info of this EventTaskExecutionEvent. # noqa: E501
:type: ProtobufStruct
"""
self._custom_info = custom_info
@property
def phase_version(self):
"""Gets the phase_version of this EventTaskExecutionEvent. # noqa: E501
Some phases, like RUNNING, can send multiple events with changed metadata (new logs, additional custom_info, etc) that should be recorded regardless of the lack of phase change. The version field should be incremented when metadata changes across the duration of an individual phase. # noqa: E501
:return: The phase_version of this EventTaskExecutionEvent. # noqa: E501
:rtype: int
"""
return self._phase_version
@phase_version.setter
def phase_version(self, phase_version):
"""Sets the phase_version of this EventTaskExecutionEvent.
Some phases, like RUNNING, can send multiple events with changed metadata (new logs, additional custom_info, etc) that should be recorded regardless of the lack of phase change. The version field should be incremented when metadata changes across the duration of an individual phase. # | |
#!python3
"""
Find a fractionl allocation that maximizes a social welfare function (- a monotone function of the utilities),
for agents with additive valuations.
Examples are: max-sum, max-product, max-min.
See also: [leximin.py](leximin.py)
Author: <NAME>
Since: 2021-05
"""
import cvxpy
from fairpy import ValuationMatrix, Allocation, AllocationToFamilies, convert_input_to_valuation_matrix
from fairpy.solve import maximize
from typing import Any
import logging
logger = logging.getLogger(__name__)
@convert_input_to_valuation_matrix
def max_welfare_allocation(instance:Any, welfare_function, welfare_constraint_function=None) -> Allocation:
"""
Find an allocation maximizing a given social welfare function. (aka Max Nash Welfare) allocation.
:param agents: a matrix v in which each row represents an agent, each column represents an object, and v[i][j] is the value of agent i to object j.
:param welfare_function: a monotonically-increasing function w: R -> R representing the welfare function to maximize.
:param welfare_constraint: a predicate w: R -> {true,false} representing an additional constraint on the utility of each agent.
:return allocation_matrix: a matrix alloc of a similar shape in which alloc[i][j] is the fraction allocated to agent i from object j.
For usage examples, see the functions max_sum_allocation, max_product_allocation, max_minimum_allocation.
"""
v = ValuationMatrix(instance)
allocation_vars = cvxpy.Variable((v.num_of_agents, v.num_of_objects))
feasibility_constraints = [
sum([allocation_vars[i][o] for i in v.agents()])==1
for o in v.objects()
]
positivity_constraints = [
allocation_vars[i][o] >= 0 for i in v.agents()
for o in v.objects()
]
utilities = [sum([allocation_vars[i][o]*v[i][o] for o in v.objects()]) for i in v.agents()]
if welfare_constraint_function is not None:
welfare_constraints = [welfare_constraint_function(utility) for utility in utilities]
else:
welfare_constraints = []
max_welfare = maximize(welfare_function(utilities), feasibility_constraints+positivity_constraints+welfare_constraints)
logger.info("Maximum welfare is %g",max_welfare)
allocation_matrix = allocation_vars.value
return allocation_matrix
from fairpy.families import AllocationToFamilies, map_agent_to_family
def max_welfare_allocation_for_families(instance, families:list, welfare_function, welfare_constraint_function=None) -> AllocationToFamilies:
"""
Find an allocation maximizing a given social welfare function. (aka Max Nash Welfare) allocation.
:param agents: a matrix v in which each row represents an agent, each column represents an object, and v[i][j] is the value of agent i to object j.
:param families: a list of lists. Each list represents a family and contains the indices of the agents in the family.
:param welfare_function: a monotonically-increasing function w: R -> R representing the welfare function to maximize.
:param welfare_constraint: a predicate w: R -> {true,false} representing an additional constraint on the utility of each agent.
:return allocation_matrix: a matrix alloc of a similar shape in which alloc[i][j] is the fraction allocated to agent i from object j.
For usage examples, see the function max_minimum_allocation_for_families.
"""
v = ValuationMatrix(instance)
num_of_families = len(families)
agent_to_family = map_agent_to_family(families, v.num_of_agents)
alloc = cvxpy.Variable((num_of_families, v.num_of_objects))
feasibility_constraints = [
sum([alloc[f][o] for f in range(num_of_families)])==1
for o in v.objects()
]
positivity_constraints = [
alloc[f][o] >= 0 for f in range(num_of_families)
for o in v.objects()
]
utilities = [sum([alloc[agent_to_family[i]][o]*v[i][o] for o in v.objects()]) for i in v.agents()]
if welfare_constraint_function is not None:
welfare_constraints = [welfare_constraint_function(utility) for utility in utilities]
else:
welfare_constraints = []
max_welfare = maximize(welfare_function(utilities), feasibility_constraints+positivity_constraints+welfare_constraints)
logger.info("Maximum welfare is %g",max_welfare)
return AllocationToFamilies(v, alloc.value, families)
def max_sum_allocation(instance) -> Allocation:
"""
Find the max-sum (aka Utilitarian) allocation.
:param agents: a matrix v in which each row represents an agent, each column represents an object, and v[i][j] is the value of agent i to object j.
:return allocation_matrix: a matrix alloc of a similar shape in which alloc[i][j] is the fraction allocated to agent i from object j.
The allocation should maximize the product (= sum of logs) of utilities
>>> max_sum_allocation([ [3] , [5] ]).round(3).matrix # single item
[[0.]
[1.]]
>>> max_sum_allocation([ [3,3] , [1,1] ]).round(3).matrix # two identical items
[[1. 1.]
[0. 0.]]
>>> max_sum_allocation([ [3,2] , [1,4] ]).round(3).matrix # two different items
[[1. 0.]
[0. 1.]]
"""
return max_welfare_allocation(instance,
welfare_function=lambda utilities: sum(utilities),
welfare_constraint_function=lambda utility: utility >= 0)
def max_power_sum_allocation(instance, power:float) -> Allocation:
"""
Find the maximum of sum of utility to the given power.
* When power=1, it is equivalent to max-sum;
* When power -> 0, it converges to max-product;
* When power -> -infinity, it converges to leximin.
:param agents: a matrix v in which each row represents an agent, each column represents an object, and v[i][j] is the value of agent i to object j.
:return allocation_matrix: a matrix alloc of a similar shape in which alloc[i][j] is the fraction allocated to agent i from object j.
The allocation should maximize the product (= sum of logs) of utilities
>>> max_power_sum_allocation([ [3] , [5] ], 1).round(3).matrix
[[0.]
[1.]]
>>> max_power_sum_allocation([ [3] , [5] ], 0.1).round(3).matrix
[[0.486]
[0.514]]
>>> max_power_sum_allocation([ [3] , [5] ], 0).round(3).matrix
[[0.5]
[0.5]]
>>> max_power_sum_allocation([ [3] , [5] ], -0.1).round(3).matrix
[[0.512]
[0.488]]
>>> max_power_sum_allocation([ [3] , [5] ], -1).round(3).matrix
[[0.564]
[0.436]]
"""
if power>0:
welfare_function=lambda utilities: sum([utility**power for utility in utilities])
elif power<0:
welfare_function=lambda utilities: -sum([utility**power for utility in utilities])
else:
welfare_function=lambda utilities: sum([cvxpy.log(utility) for utility in utilities])
return max_welfare_allocation(instance,
welfare_function=welfare_function,
welfare_constraint_function=lambda utility: utility >= 0)
def max_product_allocation(instance) -> Allocation:
"""
Find the max-product (aka Max Nash Welfare) allocation.
:param agents: a matrix v in which each row represents an agent, each column represents an object, and v[i][j] is the value of agent i to object j.
:return allocation_matrix: a matrix alloc of a similar shape in which alloc[i][j] is the fraction allocated to agent i from object j.
The allocation should maximize the product (= sum of logs) of utilities
>>> max_product_allocation([ [3] , [5] ]).round(3).matrix # single item
[[0.5]
[0.5]]
>>> max_product_allocation([ [3,3] , [1,1] ]).round(3).matrix # two identical items
[[0.5 0.5]
[0.5 0.5]]
>>> max_product_allocation([ [3,2] , [1,4] ]).round(3).matrix # two different items
[[1. 0.]
[0. 1.]]
"""
return max_welfare_allocation(instance,
welfare_function=lambda utilities: sum([cvxpy.log(utility) for utility in utilities]),
welfare_constraint_function=lambda utility: utility >= 0)
def max_minimum_allocation(instance) -> Allocation:
"""
Find the max-minimum (aka Egalitarian) allocation.
:param agents: a matrix v in which each row represents an agent, each column represents an object, and v[i][j] is the value of agent i to object j.
:return allocation_matrix: a matrix alloc of a similar shape in which alloc[i][j] is the fraction allocated to agent i from object j.
The allocation should maximize the leximin vector of utilities.
>>> a = max_minimum_allocation([ [3] , [5] ]) # single item
>>> a
Agent #0 gets { 62.5% of 0} with value 1.88.
Agent #1 gets { 37.5% of 0} with value 1.88.
<BLANKLINE>
>>> a.matrix
[[0.625]
[0.375]]
>>> max_minimum_allocation([ [4,2] , [1,4] ]).round(3).matrix # two different items
[[1. 0.]
[0. 1.]]
>>> alloc = max_minimum_allocation([ [3,3] , [1,1] ]).round(3).matrix # two identical items
>>> [sum(alloc[i]) for i in alloc.agents()]
[0.5, 1.5]
>>> v = [ [4,2] , [1,3] ] # two different items
>>> a = max_minimum_allocation(v).round(3)
>>> a.matrix
[[0.8 0. ]
[0.2 1. ]]
>>> print(a.utility_profile())
[3.2 3.2]
"""
return max_welfare_allocation(instance,
welfare_function=lambda utilities: cvxpy.min(cvxpy.hstack(utilities)),
welfare_constraint_function=lambda utility: utility >= 0)
def max_minimum_allocation_for_families(instance, families) -> AllocationToFamilies:
"""
Find the max-minimum (aka Egalitarian) allocation.
:param agents: a matrix v in which each row represents an agent, each column represents an object, and v[i][j] is the value of agent i to object j.
:param families: a list of lists. Each list represents a family and contains the indices of the agents in the family.
:return allocation_matrix: a matrix alloc of a similar shape in which alloc[i][j] is the fraction allocated to agent i from object j.
The allocation should maximize the leximin vector of utilities.
>>> families = [ [0], [1] ] # two singleton families
>>> max_minimum_allocation_for_families([ [3] , [5] ],families).round(3).matrix
[[0.625]
[0.375]]
>>> max_minimum_allocation_for_families([ [4,2] , [1,4] ], families).round(3).matrix # two different items
[[1. 0.]
[0. 1.]]
>>> alloc = max_minimum_allocation_for_families([ [3,3] , [1,1] ], families).round(3).matrix # two identical items
>>> [sum(alloc[i]) for i in alloc.agents()]
[0.5, 1.5]
>>> v = [ [4,2] , [1,3] ] # two different items
>>> a = max_minimum_allocation_for_families(v, families).round(3)
>>> a
Family #0 with members [0] gets { 80.0% of 0} with values [3.2].
Family #1 with members [1] gets { 20.0% of 0, 100.0% of 1} with values [3.2].
<BLANKLINE>
>>> a.matrix
[[0.8 0. ]
[0.2 1. ]]
>>> print(a.utility_profile())
[3.2 3.2]
>>> families = [ [0, 1] ] | |
SN_NOWARN)
set_name(0x8005E690, "PM_DoNewLvl__FP12PlayerStruct", SN_NOWARN)
set_name(0x8005E698, "CheckNewPath__FP12PlayerStruct", SN_NOWARN)
set_name(0x8005EAD8, "PlrDeathModeOK__Fi", SN_NOWARN)
set_name(0x8005EB40, "ValidatePlayer__Fv", SN_NOWARN)
set_name(0x8005F028, "CheckCheatStats__FP12PlayerStruct", SN_NOWARN)
set_name(0x8005F0C4, "ProcessPlayers__Fv", SN_NOWARN)
set_name(0x8005F3F8, "ClrPlrPath__FP12PlayerStruct", SN_NOWARN)
set_name(0x8005F420, "PosOkPlayer__FP12PlayerStructii", SN_NOWARN)
set_name(0x8005F5C8, "MakePlrPath__FP12PlayerStructiiUc", SN_NOWARN)
set_name(0x8005F5D0, "CheckPlrSpell__Fv", SN_NOWARN)
set_name(0x8005F9E0, "SyncInitPlrPos__FP12PlayerStruct", SN_NOWARN)
set_name(0x8005FB08, "SyncInitPlr__FP12PlayerStruct", SN_NOWARN)
set_name(0x8005FB38, "CheckStats__Fi", SN_NOWARN)
set_name(0x8005FCD4, "ModifyPlrStr__Fii", SN_NOWARN)
set_name(0x8005FDF0, "ModifyPlrMag__Fii", SN_NOWARN)
set_name(0x8005FEDC, "ModifyPlrDex__Fii", SN_NOWARN)
set_name(0x8005FFC0, "ModifyPlrVit__Fii", SN_NOWARN)
set_name(0x8006009C, "SetPlayerHitPoints__FP12PlayerStructi", SN_NOWARN)
set_name(0x800600E0, "SetPlrStr__Fii", SN_NOWARN)
set_name(0x800601BC, "SetPlrMag__Fii", SN_NOWARN)
set_name(0x8006022C, "SetPlrDex__Fii", SN_NOWARN)
set_name(0x80060308, "SetPlrVit__Fii", SN_NOWARN)
set_name(0x80060374, "InitDungMsgs__FP12PlayerStruct", SN_NOWARN)
set_name(0x8006037C, "PlayDungMsgs__Fv", SN_NOWARN)
set_name(0x800606AC, "CreatePlrItems__FP12PlayerStruct", SN_NOWARN)
set_name(0x800606D4, "WorldToOffset__FP12PlayerStructii", SN_NOWARN)
set_name(0x80060718, "SetSpdbarGoldCurs__FP12PlayerStructi", SN_NOWARN)
set_name(0x8006074C, "GetSpellLevel__FP12PlayerStructi", SN_NOWARN)
set_name(0x80060780, "BreakObject__FP12PlayerStructi", SN_NOWARN)
set_name(0x800607B4, "CalcPlrInv__FP12PlayerStructUc", SN_NOWARN)
set_name(0x800607E8, "RemoveSpdBarItem__FP12PlayerStructi", SN_NOWARN)
set_name(0x8006081C, "M_StartKill__FiP12PlayerStruct", SN_NOWARN)
set_name(0x80060854, "SetGoldCurs__FP12PlayerStructi", SN_NOWARN)
set_name(0x80060888, "HealStart__FP12PlayerStruct", SN_NOWARN)
set_name(0x800608B0, "HealotherStart__FP12PlayerStruct", SN_NOWARN)
set_name(0x800608D8, "CalculateGold__FP12PlayerStruct", SN_NOWARN)
set_name(0x80060900, "M_StartHit__FiP12PlayerStructi", SN_NOWARN)
set_name(0x80060948, "TeleStart__FP12PlayerStruct", SN_NOWARN)
set_name(0x80060970, "PhaseStart__FP12PlayerStruct", SN_NOWARN)
set_name(0x80060998, "RemoveInvItem__FP12PlayerStructi", SN_NOWARN)
set_name(0x800609CC, "PhaseEnd__FP12PlayerStruct", SN_NOWARN)
set_name(0x800609F4, "OperateObject__FP12PlayerStructiUc", SN_NOWARN)
set_name(0x80060A38, "TryDisarm__FP12PlayerStructi", SN_NOWARN)
set_name(0x80060A6C, "TalkToTowner__FP12PlayerStructi", SN_NOWARN)
set_name(0x80060AA0, "PosOkPlayer__Fiii", SN_NOWARN)
set_name(0x80060AEC, "CalcStatDiff__Fi", SN_NOWARN)
set_name(0x80060B38, "StartNewLvl__Fiii", SN_NOWARN)
set_name(0x80060B84, "CreatePlayer__Fic", SN_NOWARN)
set_name(0x80060BD8, "StartStand__Fii", SN_NOWARN)
set_name(0x80060C24, "SetPlayerHitPoints__Fii", SN_NOWARN)
set_name(0x80060C70, "MakePlrPath__FiiiUc", SN_NOWARN)
set_name(0x80060CC0, "StartWarpLvl__Fii", SN_NOWARN)
set_name(0x80060D0C, "SyncPlrKill__Fii", SN_NOWARN)
set_name(0x80060D58, "StartPlrKill__Fii", SN_NOWARN)
set_name(0x80060DA4, "NewPlrAnim__Fiiii", SN_NOWARN)
set_name(0x80060DF0, "AddPlrExperience__Fiil", SN_NOWARN)
set_name(0x80060E3C, "StartPlrBlock__Fii", SN_NOWARN)
set_name(0x80060E88, "StartPlrHit__FiiUc", SN_NOWARN)
set_name(0x80060ED8, "StartSpell__Fiiii", SN_NOWARN)
set_name(0x80060F24, "InitPlayer__FiUc", SN_NOWARN)
set_name(0x80060F74, "PM_ChangeLightOff__Fi", SN_NOWARN)
set_name(0x80060FC0, "CheckNewPath__Fi", SN_NOWARN)
set_name(0x8006100C, "FreePlayerGFX__Fi", SN_NOWARN)
set_name(0x80061058, "InitDungMsgs__Fi", SN_NOWARN)
set_name(0x800610A4, "InitPlayerGFX__Fi", SN_NOWARN)
set_name(0x800610F0, "SyncInitPlrPos__Fi", SN_NOWARN)
set_name(0x8006113C, "SetPlrAnims__Fi", SN_NOWARN)
set_name(0x80061188, "ClrPlrPath__Fi", SN_NOWARN)
set_name(0x800611D4, "SyncInitPlr__Fi", SN_NOWARN)
set_name(0x80061220, "RestartTownLvl__Fi", SN_NOWARN)
set_name(0x8006126C, "SetPlayerOld__Fi", SN_NOWARN)
set_name(0x800612B8, "GetGoldSeed__FP12PlayerStructP10ItemStruct", SN_NOWARN)
set_name(0x800612EC, "PRIM_GetPrim__FPP8POLY_FT4_addr_800612EC", SN_NOWARN)
set_name(0x80061368, "GetPlayer__7CPlayeri_addr_80061368", SN_NOWARN)
set_name(0x800613B8, "GetLastOtPos__C7CPlayer_addr_800613B8", SN_NOWARN)
set_name(0x800613C4, "GetLastScrY__C7CPlayer", SN_NOWARN)
set_name(0x800613D0, "GetLastScrX__C7CPlayer", SN_NOWARN)
set_name(0x800613DC, "TSK_Lava2Water__FP4TASK", SN_NOWARN)
set_name(0x80061628, "CheckQuests__Fv", SN_NOWARN)
set_name(0x80061ADC, "ForceQuests__Fv", SN_NOWARN)
set_name(0x80061C80, "QuestStatus__Fi", SN_NOWARN)
set_name(0x80061D14, "CheckQuestKill__FiUc", SN_NOWARN)
set_name(0x800622F4, "SetReturnLvlPos__Fv", SN_NOWARN)
set_name(0x80062404, "GetReturnLvlPos__Fv", SN_NOWARN)
set_name(0x80062458, "ResyncMPQuests__Fv", SN_NOWARN)
set_name(0x80062594, "ResyncQuests__Fv", SN_NOWARN)
set_name(0x80062AF4, "PrintQLString__FiiUcPcc", SN_NOWARN)
set_name(0x80062D20, "DrawQuestLog__Fv", SN_NOWARN)
set_name(0x80062EE8, "DrawQuestLogTSK__FP4TASK", SN_NOWARN)
set_name(0x80062F80, "StartQuestlog__Fv", SN_NOWARN)
set_name(0x80063098, "QuestlogUp__Fv", SN_NOWARN)
set_name(0x800630EC, "QuestlogDown__Fv", SN_NOWARN)
set_name(0x80063158, "RemoveQLog__Fv", SN_NOWARN)
set_name(0x800631D0, "QuestlogEnter__Fv", SN_NOWARN)
set_name(0x80063294, "QuestlogESC__Fv", SN_NOWARN)
set_name(0x800632BC, "SetMultiQuest__FiiUci", SN_NOWARN)
set_name(0x8006333C, "_GLOBAL__D_questlog", SN_NOWARN)
set_name(0x80063364, "_GLOBAL__I_questlog", SN_NOWARN)
set_name(0x8006338C, "GetBlockTexDat__7CBlocks", SN_NOWARN)
set_name(0x80063398, "SetRGB__6DialogUcUcUc_addr_80063398", SN_NOWARN)
set_name(0x800633B8, "SetBack__6Dialogi_addr_800633B8", SN_NOWARN)
set_name(0x800633C0, "SetBorder__6Dialogi_addr_800633C0", SN_NOWARN)
set_name(0x800633C8, "___6Dialog_addr_800633C8", SN_NOWARN)
set_name(0x800633F0, "__6Dialog_addr_800633F0", SN_NOWARN)
set_name(0x8006344C, "GetPal__7TextDati_addr_8006344C", SN_NOWARN)
set_name(0x80063468, "GetFr__7TextDati_addr_80063468", SN_NOWARN)
set_name(0x80063484, "DrawView__Fii", SN_NOWARN)
set_name(0x8006364C, "DrawAndBlit__Fv", SN_NOWARN)
set_name(0x80063778, "FreeStoreMem__Fv", SN_NOWARN)
set_name(0x80063780, "DrawSTextBack__Fv", SN_NOWARN)
set_name(0x800637F0, "PrintSString__FiiUcPcci", SN_NOWARN)
set_name(0x80063BE4, "DrawSLine__Fi", SN_NOWARN)
set_name(0x80063C78, "ClearSText__Fii", SN_NOWARN)
set_name(0x80063D10, "AddSLine__Fi", SN_NOWARN)
set_name(0x80063D60, "AddSTextVal__Fii", SN_NOWARN)
set_name(0x80063D88, "AddSText__FiiUcPccUc", SN_NOWARN)
set_name(0x80063E3C, "PrintStoreItem__FPC10ItemStructic", SN_NOWARN)
set_name(0x800642C4, "StoreAutoPlace__Fv", SN_NOWARN)
set_name(0x800648E4, "S_StartSmith__Fv", SN_NOWARN)
set_name(0x80064A6C, "S_ScrollSBuy__Fi", SN_NOWARN)
set_name(0x80064C24, "S_StartSBuy__Fv", SN_NOWARN)
set_name(0x80064D54, "S_ScrollSPBuy__Fi", SN_NOWARN)
set_name(0x80064F74, "S_StartSPBuy__Fv", SN_NOWARN)
set_name(0x800650C4, "SmithSellOk__Fi", SN_NOWARN)
set_name(0x800651A8, "S_ScrollSSell__Fi", SN_NOWARN)
set_name(0x800653D0, "S_StartSSell__Fv", SN_NOWARN)
set_name(0x80065800, "SmithRepairOk__Fi", SN_NOWARN)
set_name(0x800658A4, "AddStoreHoldRepair__FP10ItemStructi", SN_NOWARN)
set_name(0x80065A84, "S_StartSRepair__Fv", SN_NOWARN)
set_name(0x80065F54, "S_StartWitch__Fv", SN_NOWARN)
set_name(0x80066094, "S_ScrollWBuy__Fi", SN_NOWARN)
set_name(0x8006626C, "S_StartWBuy__Fv", SN_NOWARN)
set_name(0x80066398, "WitchSellOk__Fi", SN_NOWARN)
set_name(0x800664BC, "S_StartWSell__Fv", SN_NOWARN)
set_name(0x80066B14, "WitchRechargeOk__Fi", SN_NOWARN)
set_name(0x80066B9C, "AddStoreHoldRecharge__FG10ItemStructi", SN_NOWARN)
set_name(0x80066D1C, "S_StartWRecharge__Fv", SN_NOWARN)
set_name(0x8006713C, "S_StartNoMoney__Fv", SN_NOWARN)
set_name(0x800671A4, "S_StartNoRoom__Fv", SN_NOWARN)
set_name(0x80067204, "S_StartConfirm__Fv", SN_NOWARN)
set_name(0x8006757C, "S_StartBoy__Fv", SN_NOWARN)
set_name(0x8006770C, "S_StartBBoy__Fv", SN_NOWARN)
set_name(0x80067894, "S_StartHealer__Fv", SN_NOWARN)
set_name(0x80067A68, "S_ScrollHBuy__Fi", SN_NOWARN)
set_name(0x80067BD4, "S_StartHBuy__Fv", SN_NOWARN)
set_name(0x80067CF4, "S_StartStory__Fv", SN_NOWARN)
set_name(0x80067DE4, "IdItemOk__FP10ItemStruct", SN_NOWARN)
set_name(0x80067E18, "AddStoreHoldId__FG10ItemStructi", SN_NOWARN)
set_name(0x80067EEC, "S_StartSIdentify__Fv", SN_NOWARN)
set_name(0x8006894C, "S_StartIdShow__Fv", SN_NOWARN)
set_name(0x80068B20, "S_StartTalk__Fv", SN_NOWARN)
set_name(0x80068D50, "S_StartTavern__Fv", SN_NOWARN)
set_name(0x80068E48, "S_StartBarMaid__Fv", SN_NOWARN)
set_name(0x80068F1C, "S_StartDrunk__Fv", SN_NOWARN)
set_name(0x80068FF0, "StartStore__Fc", SN_NOWARN)
set_name(0x800692D8, "DrawSText__Fv", SN_NOWARN)
set_name(0x80069318, "DrawSTextTSK__FP4TASK", SN_NOWARN)
set_name(0x800693E0, "DoThatDrawSText__Fv", SN_NOWARN)
set_name(0x8006958C, "STextESC__Fv", SN_NOWARN)
set_name(0x80069700, "STextUp__Fv", SN_NOWARN)
set_name(0x80069898, "STextDown__Fv", SN_NOWARN)
set_name(0x80069A48, "S_SmithEnter__Fv", SN_NOWARN)
set_name(0x80069B1C, "SetGoldCurs__Fii", SN_NOWARN)
set_name(0x80069B98, "SetSpdbarGoldCurs__Fii", SN_NOWARN)
set_name(0x80069C14, "TakePlrsMoney__Fl", SN_NOWARN)
set_name(0x8006A060, "SmithBuyItem__Fv", SN_NOWARN)
set_name(0x8006A254, "S_SBuyEnter__Fv", SN_NOWARN)
set_name(0x8006A478, "SmithBuyPItem__Fv", SN_NOWARN)
set_name(0x8006A600, "S_SPBuyEnter__Fv", SN_NOWARN)
set_name(0x8006A830, "StoreGoldFit__Fi", SN_NOWARN)
set_name(0x8006AAE8, "PlaceStoreGold__Fl", SN_NOWARN)
set_name(0x8006AD4C, "StoreSellItem__Fv", SN_NOWARN)
set_name(0x8006B040, "S_SSellEnter__Fv", SN_NOWARN)
set_name(0x8006B144, "SmithRepairItem__Fv", SN_NOWARN)
set_name(0x8006B3B4, "S_SRepairEnter__Fv", SN_NOWARN)
set_name(0x8006B510, "S_WitchEnter__Fv", SN_NOWARN)
set_name(0x8006B5C0, "WitchBuyItem__Fv", SN_NOWARN)
set_name(0x8006B7C0, "S_WBuyEnter__Fv", SN_NOWARN)
set_name(0x8006B9AC, "S_WSellEnter__Fv", SN_NOWARN)
set_name(0x8006BAB0, "WitchRechargeItem__Fv", SN_NOWARN)
set_name(0x8006BC28, "S_WRechargeEnter__Fv", SN_NOWARN)
set_name(0x8006BD84, "S_BoyEnter__Fv", SN_NOWARN)
set_name(0x8006BEBC, "BoyBuyItem__Fv", SN_NOWARN)
set_name(0x8006BF40, "HealerBuyItem__Fv", SN_NOWARN)
set_name(0x8006C1E4, "S_BBuyEnter__Fv", SN_NOWARN)
set_name(0x8006C3CC, "StoryIdItem__Fv", SN_NOWARN)
set_name(0x8006C718, "S_ConfirmEnter__Fv", SN_NOWARN)
set_name(0x8006C834, "S_HealerEnter__Fv", SN_NOWARN)
set_name(0x8006C8CC, "S_HBuyEnter__Fv", SN_NOWARN)
set_name(0x8006CAD8, "S_StoryEnter__Fv", SN_NOWARN)
set_name(0x8006CB70, "S_SIDEnter__Fv", SN_NOWARN)
set_name(0x8006CCEC, "S_TalkEnter__Fv", SN_NOWARN)
set_name(0x8006CEE4, "S_TavernEnter__Fv", SN_NOWARN)
set_name(0x8006CF54, "S_BarmaidEnter__Fv", SN_NOWARN)
set_name(0x8006CFC4, "S_DrunkEnter__Fv", SN_NOWARN)
set_name(0x8006D034, "STextEnter__Fv", SN_NOWARN)
set_name(0x8006D1F8, "CheckStoreBtn__Fv", SN_NOWARN)
set_name(0x8006D2D0, "ReleaseStoreBtn__Fv", SN_NOWARN)
set_name(0x8006D2E4, "_GLOBAL__D_pSTextBoxCels", SN_NOWARN)
set_name(0x8006D30C, "_GLOBAL__I_pSTextBoxCels", SN_NOWARN)
set_name(0x8006D334, "GetDown__C4CPad_addr_8006D334", SN_NOWARN)
set_name(0x8006D35C, "SetRGB__6DialogUcUcUc_addr_8006D35C", SN_NOWARN)
set_name(0x8006D37C, "SetBorder__6Dialogi_addr_8006D37C", SN_NOWARN)
set_name(0x8006D384, "___6Dialog_addr_8006D384", SN_NOWARN)
set_name(0x8006D3AC, "__6Dialog_addr_8006D3AC", SN_NOWARN)
set_name(0x8006D408, "T_DrawView__Fii", SN_NOWARN)
set_name(0x8006D5B8, "T_FillSector__FPUcT0iiiib", SN_NOWARN)
set_name(0x8006D7B0, "T_FillTile__FPUciii", SN_NOWARN)
set_name(0x8006D8A0, "T_Pass3__Fv", SN_NOWARN)
set_name(0x8006DC60, "CreateTown__Fi", SN_NOWARN)
set_name(0x8006DDC8, "GRL_LoadFileInMemSig__FPCcPUl", SN_NOWARN)
set_name(0x8006DEAC, "GRL_StripDir__FPcPCc", SN_NOWARN)
set_name(0x8006DF44, "InitVPTriggers__Fv", SN_NOWARN)
set_name(0x8006DF8C, "ForceTownTrig__Fv", SN_NOWARN)
set_name(0x8006E2A4, "ForceL1Trig__Fv", SN_NOWARN)
set_name(0x8006E554, "ForceL2Trig__Fv", SN_NOWARN)
set_name(0x8006E9B4, "ForceL3Trig__Fv", SN_NOWARN)
set_name(0x8006EE30, "ForceL4Trig__Fv", SN_NOWARN)
set_name(0x8006F33C, "Freeupstairs__Fv", SN_NOWARN)
set_name(0x8006F3FC, "ForceSKingTrig__Fv", SN_NOWARN)
set_name(0x8006F4F0, "ForceSChambTrig__Fv", SN_NOWARN)
set_name(0x8006F5E4, "ForcePWaterTrig__Fv", SN_NOWARN)
set_name(0x8006F6D8, "CheckTrigForce__Fv", SN_NOWARN)
set_name(0x8006F9E0, "FadeGameOut__Fv", SN_NOWARN)
set_name(0x8006FA7C, "IsTrigger__Fii", SN_NOWARN)
set_name(0x8006FAE0, "CheckTriggers__Fi", SN_NOWARN)
set_name(0x8006FFFC, "GetManaAmount__Fii", SN_NOWARN)
set_name(0x800702C4, "UseMana__Fii", SN_NOWARN)
set_name(0x80070408, "CheckSpell__FiicUc", SN_NOWARN)
set_name(0x800704A8, "CastSpell__Fiiiiiiii", SN_NOWARN)
set_name(0x80070754, "DoResurrect__Fii", SN_NOWARN)
set_name(0x80070A08, "DoHealOther__Fii", SN_NOWARN)
set_name(0x80070C6C, "snd_update__FUc", SN_NOWARN)
set_name(0x80070C74, "snd_get_volume__FPCcPl", SN_NOWARN)
set_name(0x80070CDC, "snd_stop_snd__FP4TSnd", SN_NOWARN)
set_name(0x80070CFC, "snd_play_snd__FP4TSFXll", SN_NOWARN)
set_name(0x80070D5C, "snd_play_msnd__FUsll", SN_NOWARN)
set_name(0x80070DEC, "snd_init__FUl", SN_NOWARN)
set_name(0x80070E3C, "music_stop__Fv", SN_NOWARN)
set_name(0x80070E80, "music_fade__Fv", SN_NOWARN)
set_name(0x80070EC0, "music_start__Fi", SN_NOWARN)
set_name(0x80070F44, "music_hold__Fv", SN_NOWARN)
set_name(0x80070FA4, "music_release__Fv", SN_NOWARN)
set_name(0x80070FF4, "snd_playing__Fi", SN_NOWARN)
set_name(0x80071014, "ClrCursor__Fi", SN_NOWARN)
set_name(0x80071064, "flyabout__7GamePad", SN_NOWARN)
set_name(0x80071520, "CloseInvChr__Fv", SN_NOWARN)
set_name(0x80071570, "LeftOf__Fi", SN_NOWARN)
set_name(0x80071588, "RightOf__Fi", SN_NOWARN)
set_name(0x800715A4, "WorldToOffset__Fiii", SN_NOWARN)
set_name(0x80071650, "pad_UpIsUpRight__Fic", SN_NOWARN)
set_name(0x80071714, "__7GamePadi", SN_NOWARN)
set_name(0x80071808, "SetMoveStyle__7GamePadc", SN_NOWARN)
set_name(0x80071810, "SetDownButton__7GamePadiPFi_v", SN_NOWARN)
set_name(0x80071854, "SetComboDownButton__7GamePadiPFi_v", SN_NOWARN)
set_name(0x80071898, "SetAllButtons__7GamePadP11KEY_ASSIGNS", SN_NOWARN)
set_name(0x80071AF8, "GetAllButtons__7GamePadP11KEY_ASSIGNS", SN_NOWARN)
set_name(0x80071CA8, "GetActionButton__7GamePadPFi_v", SN_NOWARN)
set_name(0x80071D04, "SetUpAction__7GamePadPFi_vT1", SN_NOWARN)
set_name(0x80071D40, "RunFunc__7GamePadi", SN_NOWARN)
set_name(0x80071E04, "ButtonDown__7GamePadi", SN_NOWARN)
set_name(0x80072210, "TestButtons__7GamePad", SN_NOWARN)
set_name(0x80072354, "CheckCentre__FP12PlayerStructi", SN_NOWARN)
set_name(0x80072448, "CheckDirs__7GamePadi", SN_NOWARN)
set_name(0x80072560, "CheckSide__7GamePadi", SN_NOWARN)
set_name(0x800725B4, "CheckBodge__7GamePadi", SN_NOWARN)
set_name(0x800729C0, "walk__7GamePadc", SN_NOWARN)
set_name(0x80072CD8, "check_around_player__7GamePad", SN_NOWARN)
set_name(0x800730B8, "show_combos__7GamePad", SN_NOWARN)
set_name(0x80073258, "Handle__7GamePad", SN_NOWARN)
set_name(0x80073930, "GamePadTask__FP4TASK", SN_NOWARN)
set_name(0x800739FC, "PostGamePad__Fiiii", SN_NOWARN)
set_name(0x80073B0C, "Init_GamePad__Fv", SN_NOWARN)
set_name(0x80073B3C, "InitGamePadVars__Fv", SN_NOWARN)
set_name(0x80073BCC, "SetWalkStyle__Fii", SN_NOWARN)
set_name(0x80073C3C, "GetPadStyle__Fi", SN_NOWARN)
set_name(0x80073C60, "_GLOBAL__I_flyflag", SN_NOWARN)
set_name(0x80073C98, "MoveToScrollTarget__7CBlocks_addr_80073C98", SN_NOWARN)
set_name(0x80073CAC, "GetDown__C4CPad_addr_80073CAC", SN_NOWARN)
set_name(0x80073CD4, "GetUp__C4CPad_addr_80073CD4", SN_NOWARN)
set_name(0x80073CFC, "GetCur__C4CPad_addr_80073CFC", SN_NOWARN)
set_name(0x80073D24, "DoGameTestStuff__Fv", SN_NOWARN)
set_name(0x80073D50, "DoInitGameStuff__Fv", SN_NOWARN)
set_name(0x80073D84, "SMemAlloc", SN_NOWARN)
set_name(0x80073DA4, "SMemFree", SN_NOWARN)
set_name(0x80073DC4, "GRL_InitGwin__Fv", SN_NOWARN)
set_name(0x80073DD0, "GRL_SetWindowProc__FPFUlUilUl_Ul", SN_NOWARN)
set_name(0x80073DE0, "GRL_CallWindowProc__FUlUilUl", SN_NOWARN)
set_name(0x80073E08, "GRL_PostMessage__FUlUilUl", SN_NOWARN)
set_name(0x80073EB4, "Msg2Txt__Fi", SN_NOWARN)
set_name(0x80073EFC, "LANG_GetLang__Fv", SN_NOWARN)
set_name(0x80073F08, "LANG_SetDb__F10LANG_DB_NO", SN_NOWARN)
set_name(0x80074074, "GetStr__Fi", SN_NOWARN)
set_name(0x800740DC, "LANG_ReloadMainTXT__Fv", SN_NOWARN)
set_name(0x80074110, "LANG_SetLang__F9LANG_TYPE", SN_NOWARN)
set_name(0x80074274, "DumpCurrentText__Fv", SN_NOWARN)
set_name(0x800742CC, "CalcNumOfStrings__FPPc", SN_NOWARN)
set_name(0x800742D8, "GetLangFileName__F9LANG_TYPEPc", SN_NOWARN)
set_name(0x800743A0, "GetLangFileNameExt__F9LANG_TYPE", SN_NOWARN)
set_name(0x80074420, "TempPrintMissile__FiiiiiiiiccUcUcUcc", SN_NOWARN)
set_name(0x80074858, "FuncTOWN__FP13MissileStructiii", SN_NOWARN)
set_name(0x800749D8, "FuncRPORTAL__FP13MissileStructiii", SN_NOWARN)
set_name(0x80074B38, "FuncFIREBOLT__FP13MissileStructiii", SN_NOWARN)
set_name(0x80074BD0, "FuncHBOLT__FP13MissileStructiii", SN_NOWARN)
set_name(0x80074C80, "FuncLIGHTNING__FP13MissileStructiii", SN_NOWARN)
set_name(0x80074CE4, "FuncGUARDIAN__FP13MissileStructiii", SN_NOWARN)
set_name(0x80074DFC, "FuncFIREWALL__FP13MissileStructiii", SN_NOWARN)
set_name(0x80074E94, "FuncFIREMOVE__FP13MissileStructiii", SN_NOWARN)
set_name(0x80074F2C, "FuncFLAME__FP13MissileStructiii", SN_NOWARN)
set_name(0x80074F94, "FuncARROW__FP13MissileStructiii", SN_NOWARN)
set_name(0x80075034, "FuncFARROW__FP13MissileStructiii", SN_NOWARN)
set_name(0x80075114, "FuncLARROW__FP13MissileStructiii", SN_NOWARN)
set_name(0x800751EC, "FuncMAGMABALL__FP13MissileStructiii", SN_NOWARN)
set_name(0x8007527C, "FuncBONESPIRIT__FP13MissileStructiii", SN_NOWARN)
set_name(0x80075398, "FuncACID__FP13MissileStructiii", SN_NOWARN)
set_name(0x80075434, "FuncACIDSPLAT__FP13MissileStructiii", SN_NOWARN)
set_name(0x8007549C, "FuncACIDPUD__FP13MissileStructiii", SN_NOWARN)
set_name(0x80075504, "FuncFLARE__FP13MissileStructiii", SN_NOWARN)
set_name(0x80075668, "FuncFLAREXP__FP13MissileStructiii", SN_NOWARN)
set_name(0x800757AC, "FuncCBOLT__FP13MissileStructiii", SN_NOWARN)
set_name(0x80075814, "FuncBOOM__FP13MissileStructiii", SN_NOWARN)
set_name(0x8007586C, "FuncELEMENT__FP13MissileStructiii", SN_NOWARN)
set_name(0x80075938, "FuncMISEXP__FP13MissileStructiii", SN_NOWARN)
set_name(0x8007599C, "FuncRHINO__FP13MissileStructiii", SN_NOWARN)
set_name(0x800759A4, "FuncFLASH__FP13MissileStructiii", SN_NOWARN)
set_name(0x80075ECC, "FuncMANASHIELD__FP13MissileStructiii", SN_NOWARN)
set_name(0x80075F74, "FuncFLASH2__FP13MissileStructiii", SN_NOWARN)
set_name(0x80075F7C, "FuncRESURRECTBEAM__FP13MissileStructiii", SN_NOWARN)
set_name(0x80075FB0, "FuncWEAPEXP__FP13MissileStructiii", SN_NOWARN)
set_name(0x80075FD4, "PRIM_GetPrim__FPP8POLY_FT4_addr_80075FD4", SN_NOWARN)
set_name(0x80076050, "GetPlayer__7CPlayeri_addr_80076050", SN_NOWARN)
set_name(0x800760A0, "GetLastOtPos__C7CPlayer_addr_800760A0", SN_NOWARN)
set_name(0x800760AC, "GetLastScrY__C7CPlayer_addr_800760AC", SN_NOWARN)
set_name(0x800760B8, "GetLastScrX__C7CPlayer_addr_800760B8", SN_NOWARN)
set_name(0x800760C4, "GetNumOfFrames__7TextDat_addr_800760C4", SN_NOWARN)
set_name(0x800760D8, "GetFr__7TextDati_addr_800760D8", SN_NOWARN)
set_name(0x800760F4, "ML_Init__Fv", SN_NOWARN)
set_name(0x8007612C, "ML_GetList__Fi", SN_NOWARN)
set_name(0x800761AC, "ML_SetRandomList__Fi", SN_NOWARN)
set_name(0x80076244, "ML_SetList__Fii", SN_NOWARN)
set_name(0x800762F4, "ML_GetPresetMonsters__FiPiUl", SN_NOWARN)
set_name(0x800764B0, "DefaultObjPrint__FP12ObjectStructiiP7TextDatiii", SN_NOWARN)
set_name(0x80076644, "LightObjPrint__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x800766FC, "DoorObjPrint__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076990, "DrawLightSpark__Fiii", SN_NOWARN)
set_name(0x80076A68, "PrintOBJ_L1LIGHT__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076AF0, "PrintOBJ_SKFIRE__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076B1C, "PrintOBJ_LEVER__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076B48, "PrintOBJ_CHEST1__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076B74, "PrintOBJ_CHEST2__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076BA0, "PrintOBJ_CHEST3__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076BCC, "PrintOBJ_CANDLE1__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076BF0, "PrintOBJ_CANDLE2__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076C14, "PrintOBJ_CANDLEO__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076C40, "PrintOBJ_BANNERL__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076C6C, "PrintOBJ_BANNERM__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076C98, "PrintOBJ_BANNERR__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076CC4, "PrintOBJ_SKPILE__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076CF0, "PrintOBJ_SKSTICK1__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076D1C, "PrintOBJ_SKSTICK2__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076D48, "PrintOBJ_SKSTICK3__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076D74, "PrintOBJ_SKSTICK4__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076DA0, "PrintOBJ_SKSTICK5__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076DCC, "PrintOBJ_CRUX1__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076DF8, "PrintOBJ_CRUX2__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076E24, "PrintOBJ_CRUX3__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076E50, "PrintOBJ_STAND__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076E7C, "PrintOBJ_ANGEL__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076EA8, "PrintOBJ_BOOK2L__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076ED4, "PrintOBJ_BCROSS__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076F00, "PrintOBJ_NUDEW2R__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076F2C, "PrintOBJ_SWITCHSKL__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076F58, "PrintOBJ_TNUDEM1__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076F84, "PrintOBJ_TNUDEM2__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076FB0, "PrintOBJ_TNUDEM3__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80076FDC, "PrintOBJ_TNUDEM4__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077008, "PrintOBJ_TNUDEW1__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077034, "PrintOBJ_TNUDEW2__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077060, "PrintOBJ_TNUDEW3__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x8007708C, "PrintOBJ_TORTURE1__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x800770B8, "PrintOBJ_TORTURE2__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x800770E4, "PrintOBJ_TORTURE3__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077110, "PrintOBJ_TORTURE4__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x8007713C, "PrintOBJ_TORTURE5__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077168, "PrintOBJ_BOOK2R__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077194, "PrintTorchStick__Fiiii", SN_NOWARN)
set_name(0x80077228, "PrintOBJ_TORCHL__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x800772B8, "PrintOBJ_TORCHR__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077348, "PrintOBJ_TORCHL2__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x800773D8, "PrintOBJ_TORCHR2__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077468, "PrintOBJ_SARC__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077494, "PrintOBJ_FLAMEHOLE__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x800774C0, "PrintOBJ_FLAMELVR__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x800774EC, "PrintOBJ_WATER__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077518, "PrintOBJ_BOOKLVR__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077544, "PrintOBJ_TRAPL__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077570, "PrintOBJ_TRAPR__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x8007759C, "PrintOBJ_BOOKSHELF__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x800775C8, "PrintOBJ_WEAPRACK__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x800775F4, "PrintOBJ_BARREL__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077620, "PrintOBJ_BARRELEX__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077778, "PrintOBJ_SHRINEL__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077844, "PrintOBJ_SHRINER__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077910, "PrintOBJ_SKELBOOK__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x8007793C, "PrintOBJ_BOOKCASEL__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077968, "PrintOBJ_BOOKCASER__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077994, "PrintOBJ_BOOKSTAND__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x800779C0, "PrintOBJ_BOOKCANDLE__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x800779E4, "PrintOBJ_BLOODFTN__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077A10, "PrintOBJ_DECAP__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077A3C, "PrintOBJ_TCHEST1__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077A68, "PrintOBJ_TCHEST2__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077A94, "PrintOBJ_TCHEST3__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077AC0, "PrintOBJ_BLINDBOOK__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077AEC, "PrintOBJ_BLOODBOOK__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077B18, "PrintOBJ_PEDISTAL__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077B44, "PrintOBJ_PURIFYINGFTN__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077B70, "PrintOBJ_ARMORSTAND__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077B9C, "PrintOBJ_ARMORSTANDN__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077BC8, "PrintOBJ_GOATSHRINE__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077BF4, "PrintOBJ_CAULDRON__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077C20, "PrintOBJ_MURKYFTN__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077C4C, "PrintOBJ_TEARFTN__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077C78, "PrintOBJ_ALTBOY__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077CA4, "PrintOBJ_MCIRCLE1__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077E38, "PrintOBJ_STORYBOOK__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077FC0, "PrintOBJ_STORYCANDLE__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80077FE4, "PrintOBJ_STEELTOME__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80078010, "PrintOBJ_WARARMOR__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x8007803C, "PrintOBJ_WARWEAP__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80078068, "PrintOBJ_TBCROSS__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80078094, "PrintOBJ_WEAPONRACK__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x800780C0, "PrintOBJ_WEAPONRACKN__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x800780EC, "PrintOBJ_MUSHPATCH__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80078118, "PrintOBJ_LAZSTAND__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80078144, "PrintOBJ_SLAINHERO__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x80078170, "PrintOBJ_SIGNCHEST__FP12ObjectStructiiP7TextDati", SN_NOWARN)
set_name(0x8007819C, "PRIM_GetCopy__FP8POLY_FT4_addr_8007819C", SN_NOWARN)
set_name(0x800781D8, "PRIM_CopyPrim__FP8POLY_FT4T0_addr_800781D8", SN_NOWARN)
set_name(0x80078200, "PRIM_GetPrim__FPP8POLY_FT4_addr_80078200", SN_NOWARN)
set_name(0x8007827C, "GetBlockTexDat__7CBlocks_addr_8007827C", SN_NOWARN)
set_name(0x80078288, "GetNumOfFrames__7TextDatii_addr_80078288", SN_NOWARN)
set_name(0x800782C0, "GetCreature__7TextDati_addr_800782C0", SN_NOWARN)
set_name(0x80078338, "GetNumOfCreatures__7TextDat_addr_80078338", SN_NOWARN)
set_name(0x8007834C, "GetFr__7TextDati_addr_8007834C", SN_NOWARN)
set_name(0x80078368, "gamemenu_on__Fv", SN_NOWARN)
set_name(0x80078370, "gamemenu_off__Fv", SN_NOWARN)
set_name(0x80078378, "LoadPalette__FPCc", SN_NOWARN)
set_name(0x80078380, "LoadRndLvlPal__Fi", SN_NOWARN)
set_name(0x80078388, "ResetPal__Fv", SN_NOWARN)
set_name(0x80078390, "SetFadeLevel__Fi", SN_NOWARN)
set_name(0x800783C0, "GetFadeState__Fv", SN_NOWARN)
set_name(0x800783CC, "SetPolyXY__FP8POLY_GT4PUc", SN_NOWARN)
set_name(0x800784E8, "SmearScreen__Fv", SN_NOWARN)
set_name(0x800784F0, "DrawFadedScreen__Fv", SN_NOWARN)
set_name(0x80078544, "BlackPalette__Fv", SN_NOWARN)
set_name(0x80078600, "PaletteFadeInTask__FP4TASK", SN_NOWARN)
set_name(0x80078690, "PaletteFadeIn__Fi", SN_NOWARN)
set_name(0x800786E8, "PaletteFadeOutTask__FP4TASK", SN_NOWARN)
set_name(0x80078798, "PaletteFadeOut__Fi", SN_NOWARN)
set_name(0x800787EC, "M_CheckEFlag__Fi", SN_NOWARN)
set_name(0x8007880C, "M_ClearSquares__Fi", SN_NOWARN)
set_name(0x80078978, "IsSkel__Fi", SN_NOWARN)
set_name(0x800789B4, "NewMonsterAnim__FiR10AnimStructii", SN_NOWARN)
set_name(0x80078A00, "M_Ranged__Fi", SN_NOWARN)
set_name(0x80078A48, "M_Talker__Fi", SN_NOWARN)
set_name(0x80078AA8, "M_Enemy__Fi", SN_NOWARN)
set_name(0x8007901C, "ClearMVars__Fi", SN_NOWARN)
set_name(0x80079090, "InitMonster__Fiiiii", SN_NOWARN)
set_name(0x800794DC, "AddMonster__FiiiiUc", SN_NOWARN)
set_name(0x8007958C, "M_StartStand__Fii", SN_NOWARN)
set_name(0x800796D0, "M_UpdateLeader__Fi", SN_NOWARN)
set_name(0x800797C8, "ActivateSpawn__Fiiii", SN_NOWARN)
set_name(0x80079870, "SpawnSkeleton__Fiii", SN_NOWARN)
set_name(0x80079A60, "M_StartSpStand__Fii", SN_NOWARN)
set_name(0x80079B40, "PosOkMonst__Fiii", SN_NOWARN)
set_name(0x80079DBC, "CanPut__Fii", SN_NOWARN)
set_name(0x8007A0C4, "GetAutomapType__FiiUc", SN_NOWARN)
set_name(0x8007A3C0, "SetAutomapView__Fii", SN_NOWARN)
set_name(0x8007A810, "lAddMissile__Fiiici", SN_NOWARN)
set_name(0x8007A9E4, "AddWarpMissile__Fiii", SN_NOWARN)
set_name(0x8007AB2C, "SyncPortals__Fv", SN_NOWARN)
set_name(0x8007AC34, "AddInTownPortal__Fi", SN_NOWARN)
set_name(0x8007AC6C, "ActivatePortal__FiiiiiUc", SN_NOWARN)
set_name(0x8007ACDC, "DeactivatePortal__Fi", SN_NOWARN)
set_name(0x8007ACFC, "PortalOnLevel__Fi", SN_NOWARN)
set_name(0x8007AD34, "DelMis__Fii", SN_NOWARN)
set_name(0x8007AD94, "RemovePortalMissile__Fi", SN_NOWARN)
set_name(0x8007AF10, "SetCurrentPortal__Fi", SN_NOWARN)
set_name(0x8007AF1C, "GetPortalLevel__Fv", SN_NOWARN)
set_name(0x8007B0C0, "GetPortalLvlPos__Fv", SN_NOWARN)
set_name(0x8007B170, "__13CompLevelMaps", SN_NOWARN)
set_name(0x8007B1D8, "___13CompLevelMaps", SN_NOWARN)
set_name(0x8007B258, "Init__13CompLevelMaps", SN_NOWARN)
set_name(0x8007B288, "InitAllMaps__13CompLevelMaps", SN_NOWARN)
set_name(0x8007B2D0, "GetMap__13CompLevelMapsi", SN_NOWARN)
set_name(0x8007B344, "ReleaseMap__13CompLevelMapsP6DLevel", SN_NOWARN)
set_name(0x8007B3E8, "Init__4AMap", SN_NOWARN)
set_name(0x8007B450, "GetMap__4AMap", SN_NOWARN)
set_name(0x8007B570, "ReleaseMap__4AMapP6DLevel", SN_NOWARN)
set_name(0x8007B600, "CheckMapNum__13CompLevelMapsi", SN_NOWARN)
set_name(0x8007B634, "___4AMap", SN_NOWARN)
set_name(0x8007B67C, "__4AMap", SN_NOWARN)
set_name(0x8007B6B0, "GO_DoGameOver__Fv", SN_NOWARN)
set_name(0x8007B6F4, "GameOverTask__FP4TASK", SN_NOWARN)
set_name(0x8007B7B0, "PrintGameOver__Fv", SN_NOWARN)
set_name(0x8007B890, "SetRGB__6DialogUcUcUc_addr_8007B890", SN_NOWARN)
set_name(0x8007B8B0, "SetBack__6Dialogi_addr_8007B8B0", SN_NOWARN)
set_name(0x8007B8B8, "SetBorder__6Dialogi_addr_8007B8B8", SN_NOWARN)
set_name(0x8007B8C0, "___6Dialog_addr_8007B8C0", SN_NOWARN)
set_name(0x8007B8E8, "__6Dialog_addr_8007B8E8", SN_NOWARN)
set_name(0x8007B944, "VER_InitVersion__Fv", SN_NOWARN)
set_name(0x8007B988, "VER_GetVerString__Fv", SN_NOWARN)
set_name(0x8007B998, "CharPair2Num__FPc", SN_NOWARN)
set_name(0x8001E6A8, "TICK_InitModule", SN_NOWARN)
set_name(0x8001E6C8, "TICK_Set", SN_NOWARN)
set_name(0x8001E6D8, "TICK_Get", SN_NOWARN)
set_name(0x8001E6E8, "TICK_Update", SN_NOWARN)
set_name(0x8001E708, "TICK_GetAge", SN_NOWARN)
set_name(0x8001E734, "TICK_GetDateString", SN_NOWARN)
set_name(0x8001E744, "TICK_GetTimeString", SN_NOWARN)
set_name(0x8001E754, "GU_InitModule", SN_NOWARN)
set_name(0x8001E780, "GU_SetRndSeed", SN_NOWARN)
set_name(0x8001E7B0, "GU_GetRnd", SN_NOWARN)
set_name(0x8001E840, "GU_GetSRnd", SN_NOWARN)
set_name(0x8001E860, "GU_GetRndRange", SN_NOWARN)
set_name(0x8001E89C, "GU_AlignVal", SN_NOWARN)
set_name(0x8001E8C0, "main", SN_NOWARN)
set_name(0x8001E910, "DBG_OpenModule", SN_NOWARN)
set_name(0x8001E918, "DBG_PollHost", SN_NOWARN)
set_name(0x8001E920, "DBG_Halt", SN_NOWARN)
set_name(0x8001E928, "DBG_SendMessage", SN_NOWARN)
set_name(0x8001E940, "DBG_SetMessageHandler", SN_NOWARN)
set_name(0x8001E950, "DBG_Error", SN_NOWARN)
set_name(0x8001E97C, "DBG_SetErrorFunc", SN_NOWARN)
set_name(0x8001E98C, "SendPsyqString", SN_NOWARN)
set_name(0x8001E994, "DBG_SetPollRoutine", SN_NOWARN)
set_name(0x8001E9A4, "GTIMSYS_GetTimer", SN_NOWARN)
set_name(0x8001E9C8, "GTIMSYS_ResetTimer", SN_NOWARN)
set_name(0x8001E9EC, "GTIMSYS_InitTimer", SN_NOWARN)
set_name(0x8001EC20, "DoEpi", SN_NOWARN)
set_name(0x8001EC70, "DoPro", SN_NOWARN)
set_name(0x8001ECC0, "TSK_OpenModule", SN_NOWARN)
set_name(0x8001ED34, "TSK_AddTask", SN_NOWARN)
set_name(0x8001EF1C, "TSK_DoTasks", SN_NOWARN)
set_name(0x8001F0DC, "TSK_Sleep", SN_NOWARN)
set_name(0x8001F1B8, "ReturnToSchedulerIfCurrentTask", SN_NOWARN)
set_name(0x8001F240, "TSK_Die", SN_NOWARN)
set_name(0x8001F26C, "TSK_Kill", SN_NOWARN)
set_name(0x8001F2BC, "TSK_GetFirstActive", SN_NOWARN)
set_name(0x8001F2CC, "TSK_IsStackCorrupted", SN_NOWARN)
set_name(0x8001F348, "TSK_JumpAndResetStack", SN_NOWARN)
set_name(0x8001F390, "TSK_RepointProc", SN_NOWARN)
set_name(0x8001F3D4, "TSK_GetCurrentTask", SN_NOWARN)
set_name(0x8001F3E4, "TSK_IsCurrentTask", SN_NOWARN)
set_name(0x8001F3FC, "TSK_Exist", SN_NOWARN)
set_name(0x8001F454, "TSK_SetExecFilter", SN_NOWARN)
set_name(0x8001F46C, "TSK_ClearExecFilter", SN_NOWARN)
set_name(0x8001F490, "TSK_KillTasks", SN_NOWARN)
set_name(0x8001F590, "TSK_IterateTasks", SN_NOWARN)
set_name(0x8001F608, "TSK_MakeTaskInactive", SN_NOWARN)
set_name(0x8001F61C, "TSK_MakeTaskActive", SN_NOWARN)
set_name(0x8001F630, "TSK_MakeTaskImmortal", SN_NOWARN)
set_name(0x8001F644, "TSK_MakeTaskMortal", SN_NOWARN)
set_name(0x8001F658, "TSK_IsTaskActive", SN_NOWARN)
set_name(0x8001F66C, "TSK_IsTaskMortal", SN_NOWARN)
set_name(0x8001F680, "DetachFromList", SN_NOWARN)
set_name(0x8001F6CC, "AddToList", SN_NOWARN)
set_name(0x8001F6EC, "LoTskKill", SN_NOWARN)
set_name(0x8001F75C, "ExecuteTask", SN_NOWARN)
set_name(0x8001F7AC, "TSK_SetDoTasksPrologue", SN_NOWARN)
set_name(0x8001F7C4, "TSK_SetDoTasksEpilogue", SN_NOWARN)
set_name(0x8001F7DC, "TSK_SetTaskPrologue", SN_NOWARN)
set_name(0x8001F7F4, "TSK_SetTaskEpilogue", SN_NOWARN)
set_name(0x8001F80C, "TSK_SetEpiProFilter", SN_NOWARN)
set_name(0x8001F824, "TSK_ClearEpiProFilter", SN_NOWARN)
set_name(0x8001F858, "TSK_SetExtraStackProtection", SN_NOWARN)
set_name(0x8001F868, "TSK_SetStackFloodCallback", SN_NOWARN)
set_name(0x8001F880, "TSK_SetExtraStackSize", SN_NOWARN)
set_name(0x8001F8A8, "ExtraMarkStack", SN_NOWARN)
set_name(0x8001F8D4, "CheckExtraStack", SN_NOWARN)
set_name(0x8001F910, "GSYS_GetWorkMemInfo", SN_NOWARN)
set_name(0x8001F920, "GSYS_SetStackAndJump", SN_NOWARN)
set_name(0x8001F95C, "GSYS_MarkStack", SN_NOWARN)
set_name(0x8001F96C, "GSYS_IsStackCorrupted", SN_NOWARN)
set_name(0x8001F984, "GSYS_InitMachine", SN_NOWARN)
set_name(0x8001F9D8, "GSYS_CheckPtr", SN_NOWARN)
set_name(0x8001FA0C, "GSYS_IsStackOutOfBounds", SN_NOWARN)
set_name(0x8001FA88, "GAL_SetErrorChecking", SN_NOWARN)
set_name(0x8001FA98, "GAL_SplitBlock", SN_NOWARN)
set_name(0x8001FBCC, "GAL_InitModule", SN_NOWARN)
set_name(0x8001FC84, "GAL_AddMemType", SN_NOWARN)
set_name(0x8001FDA4, "GAL_Alloc", SN_NOWARN)
set_name(0x8001FF3C, "GAL_Lock", SN_NOWARN)
set_name(0x8001FF9C, "GAL_Unlock", SN_NOWARN)
set_name(0x80020018, "GAL_Free", SN_NOWARN)
set_name(0x800200B8, "GAL_GetFreeMem", SN_NOWARN)
set_name(0x8002012C, "GAL_GetUsedMem", SN_NOWARN)
set_name(0x800201A0, "GAL_LargestFreeBlock", SN_NOWARN)
set_name(0x8002021C, "AttachHdrToList", SN_NOWARN)
set_name(0x8002023C, "DetachHdrFromList", SN_NOWARN)
set_name(0x80020288, "IsActiveValidHandle", SN_NOWARN)
set_name(0x800202B8, "AlignPtr", SN_NOWARN)
set_name(0x800202E8, "AlignSize", SN_NOWARN)
set_name(0x80020318, "FindClosestSizedBlock", SN_NOWARN)
set_name(0x80020370, "FindHighestMemBlock", SN_NOWARN)
set_name(0x800203D8, "FindLowestMemBlock", SN_NOWARN)
set_name(0x80020440, "GetMemInitInfoBlockFromType", SN_NOWARN)
set_name(0x8002047C, "MergeToEmptyList", SN_NOWARN)
set_name(0x80020550, "GAL_AllocAt", SN_NOWARN)
set_name(0x8002062C, "LoAlloc", SN_NOWARN)
set_name(0x800207C4, "FindBlockInTheseBounds", SN_NOWARN)
set_name(0x80020830, "GetFreeMemHdrBlock", SN_NOWARN)
set_name(0x800208B8, "ReleaseMemHdrBlock", SN_NOWARN)
set_name(0x800208F8, "GAL_IterateEmptyMem", SN_NOWARN)
set_name(0x8002097C, "GAL_IterateUsedMem", SN_NOWARN)
set_name(0x80020A18, "GAL_SetMemName", SN_NOWARN)
set_name(0x80020A80, "GAL_TotalMem", SN_NOWARN)
set_name(0x80020AD4, "GAL_MemBase", SN_NOWARN)
set_name(0x80020B28, "GAL_DefragMem", SN_NOWARN)
set_name(0x80020BAC, "GSetError", SN_NOWARN)
set_name(0x80020C08, "GAL_CheckMem", SN_NOWARN)
set_name(0x80020D04, "CheckCollisions", SN_NOWARN)
set_name(0x80020DB0, "AreBlocksColliding", SN_NOWARN)
set_name(0x80020E08, "GAL_GetErrorText", SN_NOWARN)
set_name(0x80020E38, "GAL_GetLastErrorCode", SN_NOWARN)
set_name(0x80020E48, "GAL_GetLastErrorText", SN_NOWARN)
set_name(0x80020E70, "GAL_HowManyEmptyRegions", SN_NOWARN)
set_name(0x80020ED8, "GAL_HowManyUsedRegions", SN_NOWARN)
set_name(0x80020F40, "GAL_SetTimeStamp", SN_NOWARN)
set_name(0x80020F50, "GAL_IncTimeStamp", SN_NOWARN)
set_name(0x80020F70, "GAL_GetTimeStamp", SN_NOWARN)
set_name(0x80020F80, "GAL_AlignSizeToType", SN_NOWARN)
set_name(0x80020FD0, "GAL_AllocMultiStruct", SN_NOWARN)
set_name(0x80021020, "GAL_ProcessMultiStruct", SN_NOWARN)
set_name(0x800210CC, "GAL_GetSize", SN_NOWARN)
set_name(0x80021120, "GazDefragMem", SN_NOWARN)
set_name(0x80021288, "PutBlocksInRegionIntoList", SN_NOWARN)
set_name(0x8002132C, "CollideRegions", SN_NOWARN)
set_name(0x80021360, "DeleteEmptyBlocks", | |
<reponame>baojianzhou/sparse-auc<filename>test_simu.py
# -*- coding: utf-8 -*-
import os
import sys
import time
import numpy as np
import pickle as pkl
import multiprocessing
from itertools import product
from sklearn.model_selection import KFold
from sklearn.metrics import roc_auc_score
try:
sys.path.append(os.getcwd())
import sparse_module
try:
from sparse_module import c_algo_solam
from sparse_module import c_algo_spam
from sparse_module import c_algo_sht_auc
from sparse_module import c_algo_opauc
from sparse_module import c_algo_sto_iht
from sparse_module import c_algo_hsg_ht
from sparse_module import c_algo_fsauc
except ImportError:
print('cannot find some function(s) in sparse_module')
pass
except ImportError:
print('cannot find the module: sparse_module')
pass
data_path = 'enter/your/directory/to/00_simu'
def _gen_dataset_00_simu(data_path, num_tr, trial_id, mu, posi_ratio, noise_mu=0.0, noise_std=1.0):
"""
number of classes: 2
number of samples: 1,000
number of features: 1,000
---
:param data_path:
:param num_tr:
:param trial_id:
:param mu:
:param posi_ratio:
:param noise_mu:
:param noise_std:
:return:
"""
posi_label, nega_label, k_fold, p = +1, -1, 5, 1000
all_data = dict()
for s in [20, 40, 60, 80]:
perm = np.random.permutation(p)
subset_nodes = perm[:s]
n = num_tr
num_posi, num_nega = int(n * posi_ratio), int(n * (1. - posi_ratio))
assert (num_posi + num_nega) == n
# generate training samples and labels
labels = [posi_label] * num_posi + [nega_label] * num_nega
y_labels = np.asarray(labels, dtype=np.float64)
x_data = np.random.normal(noise_mu, noise_std, n * p).reshape(n, p)
anomalous_data = np.random.normal(mu, noise_std, s * num_posi).reshape(num_posi, s)
x_data[:num_posi, subset_nodes] = anomalous_data
rand_indices = np.random.permutation(len(y_labels))
x_tr, y_tr = x_data[rand_indices], y_labels[rand_indices]
print(trial_id, posi_ratio, s, np.linalg.norm(x_tr), subset_nodes[:5])
# normalize data by z-score
x_mean = np.tile(np.mean(x_tr, axis=0), (len(x_tr), 1))
x_std = np.tile(np.std(x_tr, axis=0), (len(x_tr), 1))
x_tr = np.nan_to_num(np.divide(x_tr - x_mean, x_std))
# normalize samples to unit length.
for i in range(len(x_tr)):
x_tr[i] = x_tr[i] / np.linalg.norm(x_tr[i])
data = {'x_tr': x_tr,
'y_tr': y_tr,
'subset': subset_nodes,
'mu': mu,
'p': p,
'n': num_tr,
's': len(subset_nodes),
'noise_mu': noise_mu,
'noise_std': noise_std,
'trial_id': trial_id,
'num_k_fold': k_fold,
'posi_ratio': posi_ratio}
# randomly permute the datasets 25 times for future use.
kf = KFold(n_splits=data['num_k_fold'], shuffle=False)
fake_x = np.zeros(shape=(data['n'], 1)) # just need the number of training samples
for fold_index, (train_index, test_index) in enumerate(kf.split(fake_x)):
# since original data is ordered, we need to shuffle it!
rand_perm = np.random.permutation(data['n'])
data['trial_%d_fold_%d' % (trial_id, fold_index)] = {'tr_index': rand_perm[train_index],
'te_index': rand_perm[test_index]}
all_data[s] = data
pkl.dump(all_data, open(data_path + '/data_trial_%02d_tr_%03d_mu_%.1f_p-ratio_%.2f.pkl'
% (trial_id, num_tr, mu, posi_ratio), 'wb'))
def node_pre_rec_fm(true_nodes, pred_nodes):
""" Return the precision, recall and f-measure.
:param true_nodes:
:param pred_nodes:
:return: precision, recall and f-measure """
true_nodes, pred_nodes = set(true_nodes), set(pred_nodes)
pre, rec, fm = 0.0, 0.0, 0.0
if len(pred_nodes) != 0:
pre = len(true_nodes & pred_nodes) / float(len(pred_nodes))
if len(true_nodes) != 0:
rec = len(true_nodes & pred_nodes) / float(len(true_nodes))
if (pre + rec) > 0.:
fm = (2. * pre * rec) / (pre + rec)
return [pre, rec, fm]
def cv_solam(para):
""" SOLAM algorithm. """
trial_id, k_fold, num_passes, num_tr, mu, posi_ratio, fig_i = para
# get data
f_name = data_path + 'data_trial_%02d_tr_%03d_mu_%.1f_p-ratio_%.2f.pkl'
data = pkl.load(open(f_name % (trial_id, num_tr, mu, posi_ratio), 'rb'))[fig_i]
__ = np.empty(shape=(1,), dtype=float)
# candidate parameters
list_xi = np.arange(1, 101, 9, dtype=float)
list_r = 10. ** np.arange(-1, 6, 1, dtype=float)
auc_wt, cv_wt_results = dict(), np.zeros((len(list_xi), len(list_r)))
step_len, verbose, record_aucs, stop_eps = 1e8, 0, 0, 1e-4
global_paras = np.asarray([num_passes, step_len, verbose, record_aucs, stop_eps], dtype=float)
for fold_id, (ind_xi, para_xi), (ind_r, para_r) in product(range(k_fold), enumerate(list_xi), enumerate(list_r)):
s_time = time.time()
algo_para = (para_xi, para_r, (trial_id, fold_id, fig_i, num_passes, posi_ratio, stop_eps))
tr_index = data['trial_%d_fold_%d' % (trial_id, fold_id)]['tr_index']
if (trial_id, fold_id) not in auc_wt: # cross validate based on tr_index
auc_wt[(trial_id, fold_id)] = {'auc': 0.0, 'para': algo_para, 'num_nonzeros': 0.0}
list_auc_wt = np.zeros(k_fold)
list_num_nonzeros_wt = np.zeros(k_fold)
list_epochs = np.zeros(k_fold)
kf = KFold(n_splits=k_fold, shuffle=False)
for ind, (sub_tr_ind, sub_te_ind) in enumerate(kf.split(np.zeros(shape=(len(tr_index), 1)))):
sub_x_tr = np.asarray(data['x_tr'][tr_index[sub_tr_ind]], dtype=float)
sub_y_tr = np.asarray(data['y_tr'][tr_index[sub_tr_ind]], dtype=float)
sub_x_te = data['x_tr'][tr_index[sub_te_ind]]
sub_y_te = data['y_tr'][tr_index[sub_te_ind]]
_ = c_algo_solam(sub_x_tr, __, __, __, sub_y_tr, 0, data['p'], global_paras, para_xi, para_r)
wt, aucs, rts, epochs = _
list_auc_wt[ind] = roc_auc_score(y_true=sub_y_te, y_score=np.dot(sub_x_te, wt))
list_num_nonzeros_wt[ind] = np.count_nonzero(wt)
list_epochs[ind] = epochs[0]
cv_wt_results[ind_xi, ind_r] = np.mean(list_auc_wt)
if auc_wt[(trial_id, fold_id)]['auc'] < np.mean(list_auc_wt):
auc_wt[(trial_id, fold_id)]['auc'] = float(np.mean(list_auc_wt))
auc_wt[(trial_id, fold_id)]['para'] = algo_para
auc_wt[(trial_id, fold_id)]['num_nonzeros'] = float(np.mean(list_num_nonzeros_wt))
auc_wt[(trial_id, fold_id)]['epochs'] = float(np.mean(list_epochs))
print("trial-%d fold-%d para_xi: %.1e para_r: %.1e auc: %.4f epochs: %02d run_time: %.6f" %
(trial_id, fold_id, para_xi, para_r, float(np.mean(list_auc_wt)),
float(np.mean(list_epochs)), time.time() - s_time))
sys.stdout.flush()
return para, auc_wt, cv_wt_results
def test_solam(para):
trial_id, k_fold, num_passes, num_tr, mu, posi_ratio, s = para
f_name = data_path + 'data_trial_%02d_tr_%03d_mu_%.1f_p-ratio_%.2f.pkl'
data = pkl.load(open(f_name % (trial_id, num_tr, mu, posi_ratio), 'rb'))[s]
__ = np.empty(shape=(1,), dtype=float)
ms = pkl.load(open(data_path + 'ms_00_05_solam.pkl', 'rb'))
results = dict()
step_len, verbose, record_aucs, stop_eps = 1e2, 0, 1, 1e-4
global_paras = np.asarray([num_passes, step_len, verbose, record_aucs, stop_eps], dtype=float)
for fold_id in range(k_fold):
para_xi, para_r, _ = ms[para]['solam']['auc_wt'][(trial_id, fold_id)]['para']
tr_index = data['trial_%d_fold_%d' % (trial_id, fold_id)]['tr_index']
te_index = data['trial_%d_fold_%d' % (trial_id, fold_id)]['te_index']
x_tr = np.asarray(data['x_tr'][tr_index], dtype=float)
y_tr = np.asarray(data['y_tr'][tr_index], dtype=float)
_ = c_algo_solam(x_tr, __, __, __, y_tr, 0, data['p'], global_paras, para_xi, para_r)
wt, aucs, rts, epochs = _
# indices = np.argsort(np.abs(wt))[::-1]
wt = np.asarray(wt)
wt[np.where(np.abs(wt) < 1e-3)] = 0.0
indices = np.nonzero(wt)[0]
xx = set(indices).intersection(set(data['subset']))
if float(len(indices)) != 0.0:
pre = float(len(xx)) / float(len(indices))
else:
pre = 0.0
rec = float(len(xx)) / float(len(data['subset']))
item = (trial_id, fold_id, k_fold, num_passes, num_tr, mu, posi_ratio, s)
results[item] = {'algo_para': [trial_id, fold_id, s, para_xi, para_r],
'auc_wt': roc_auc_score(y_true=data['y_tr'][te_index],
y_score=np.dot(data['x_tr'][te_index], wt)),
'f1_score': 2. * pre * rec / (pre + rec) if (pre + rec) > 0 else 0.0,
'aucs': aucs, 'rts': rts, 'wt': wt, 'nonzero_wt': np.count_nonzero(wt)}
print('trial-%d fold-%d %s p-ratio:%.2f auc: %.4f para_xi:%.4f para_r:%.4f' %
(trial_id, fold_id, s, posi_ratio, results[item]['auc_wt'], para_xi, para_r))
sys.stdout.flush()
return results
def cv_spam_l1(para):
""" SPAM algorithm with l1-regularization. """
trial_id, k_fold, num_passes, num_tr, mu, posi_ratio, fig_i = para
# get data
f_name = data_path + 'data_trial_%02d_tr_%03d_mu_%.1f_p-ratio_%.2f.pkl'
data = pkl.load(open(f_name % (trial_id, num_tr, mu, posi_ratio), 'rb'))[fig_i]
__ = np.empty(shape=(1,), dtype=float)
# candidate parameters
list_c = 10. ** np.arange(-5, 3, 1, dtype=float)
list_l1 = 10. ** np.arange(-5, 3, 1, dtype=float)
auc_wt, cv_wt_results = dict(), np.zeros((len(list_c), len(list_l1)))
step_len, verbose, record_aucs, stop_eps = 1e8, 0, 0, 1e-4
global_paras = np.asarray([num_passes, step_len, verbose, record_aucs, stop_eps], dtype=float)
for fold_id, (ind_xi, para_xi), (ind_l1, para_l1) in product(range(k_fold), enumerate(list_c), enumerate(list_l1)):
s_time = time.time()
algo_para = (para_xi, para_l1, (trial_id, fold_id, fig_i, num_passes, posi_ratio, stop_eps))
tr_index = data['trial_%d_fold_%d' % (trial_id, fold_id)]['tr_index']
if (trial_id, fold_id) not in auc_wt:
auc_wt[(trial_id, fold_id)] = {'auc': 0.0, 'para': algo_para, 'num_nonzeros': 0.0}
list_auc_wt = np.zeros(k_fold)
list_num_nonzeros_wt = np.zeros(k_fold)
list_epochs = np.zeros(k_fold)
kf = KFold(n_splits=k_fold, shuffle=False)
for ind, (sub_tr_ind, sub_te_ind) in enumerate(kf.split(np.zeros(shape=(len(tr_index), 1)))):
sub_x_tr = np.asarray(data['x_tr'][tr_index[sub_tr_ind]], dtype=float)
sub_y_tr = np.asarray(data['y_tr'][tr_index[sub_tr_ind]], dtype=float)
sub_x_te = data['x_tr'][tr_index[sub_te_ind]]
sub_y_te = data['y_tr'][tr_index[sub_te_ind]]
_ = c_algo_spam(sub_x_tr, __, __, __, sub_y_tr, 0, data['p'], global_paras, para_xi, para_l1, 0.0)
wt, aucs, rts, epochs = _
list_auc_wt[ind] = roc_auc_score(y_true=sub_y_te, y_score=np.dot(sub_x_te, wt))
list_num_nonzeros_wt[ind] = np.count_nonzero(wt)
list_epochs[ind] = epochs[0]
cv_wt_results[ind_xi, ind_l1] = np.mean(list_auc_wt)
if auc_wt[(trial_id, fold_id)]['auc'] < np.mean(list_auc_wt):
auc_wt[(trial_id, fold_id)]['para'] = algo_para
auc_wt[(trial_id, fold_id)]['auc'] = float(np.mean(list_auc_wt))
auc_wt[(trial_id, fold_id)]['num_nonzeros'] = float(np.mean(list_num_nonzeros_wt))
auc_wt[(trial_id, fold_id)]['epochs'] = float(np.mean(list_epochs))
print("trial-%d fold-%d para_xi: %.1e para_l1: %.1e auc: %.4f epochs: %02d run_time: %.6f" %
(trial_id, fold_id, para_xi, para_l1, float(np.mean(list_auc_wt)),
float(np.mean(list_epochs)), time.time() - s_time))
sys.stdout.flush()
return para, auc_wt, cv_wt_results
def test_spam_l1(para):
trial_id, k_fold, num_passes, num_tr, mu, posi_ratio, s = para
f_name = data_path + 'data_trial_%02d_tr_%03d_mu_%.1f_p-ratio_%.2f.pkl'
data = pkl.load(open(f_name % (trial_id, num_tr, mu, posi_ratio), 'rb'))[s]
__ = np.empty(shape=(1,), dtype=float)
ms = pkl.load(open(data_path + 'ms_00_05_spam_l1.pkl', 'rb'))
results = dict()
step_len, verbose, record_aucs, stop_eps = 1e2, 0, 1, 1e-4
global_paras = np.asarray([num_passes, step_len, verbose, record_aucs, stop_eps], dtype=float)
for fold_id in range(k_fold):
para_xi, para_l1, _ = ms[para]['spam_l1']['auc_wt'][(trial_id, fold_id)]['para']
tr_index = data['trial_%d_fold_%d' % (trial_id, fold_id)]['tr_index']
te_index = data['trial_%d_fold_%d' % (trial_id, fold_id)]['te_index']
x_tr = np.asarray(data['x_tr'][tr_index], dtype=float)
y_tr = np.asarray(data['y_tr'][tr_index], dtype=float)
_ = c_algo_spam(x_tr, __, __, __, y_tr, 0, data['p'], global_paras, para_xi, para_l1, 0.0)
wt, aucs, rts, epochs = _
wt = np.asarray(wt)
wt[np.where(np.abs(wt) < 1e-3)] = 0.0
indices = np.nonzero(wt)[0]
xx = set(indices).intersection(set(data['subset']))
if float(len(indices)) != 0.0:
pre = float(len(xx)) / float(len(indices))
else:
pre = 0.0
rec = float(len(xx)) / float(len(data['subset']))
item = (trial_id, fold_id, k_fold, num_passes, num_tr, mu, posi_ratio, s)
results[item] = {'algo_para': [trial_id, fold_id, s, para_xi, para_l1],
'auc_wt': roc_auc_score(y_true=data['y_tr'][te_index],
y_score=np.dot(data['x_tr'][te_index], wt)),
'f1_score': 2. * pre * rec / (pre + rec) if (pre + rec) > 0 else 0.0,
'aucs': aucs, 'rts': rts, 'wt': wt, 'nonzero_wt': | |
import argparse
import csv
import datetime
import math
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pickle
import shutil
import sys
import time
import warnings
import yaml
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as distributed
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.datasets as dset
import torchvision.utils as vutils
from multiprocessing import Process
from torch.utils.data import TensorDataset
from tqdm import tqdm
try:
from torchvision.transforms.functional import resize, InterpolationMode
interp = InterpolationMode.NEAREST
except:
from torchvision.transforms.functional import resize
interp = 0
import dist_utils
import lib.utils as utils
from lib import layers
from lib.dataloader import get_dataloaders
from lib.multiscale import CNFMultiscale
from lib.regularization import get_regularization, append_regularization_to_log
from lib.regularization import append_regularization_keys_header, append_regularization_csv_dict
from lib.utils import logit_logpx_to_image_bpd, convert_base_from_10, vis_imgs_laps, convert_time_stamp_to_hrs, logpx_to_bpd
from misc import set_cnf_options, count_nfe, count_training_parameters, count_parameters
cudnn.benchmark = True
SOLVERS = ["dopri5", "bdf", "rk4", "midpoint", 'adams', 'explicit_adams', 'adaptive_heun', 'bosh3']
def get_args():
parser = argparse.ArgumentParser("Multi-Resolution Continuous Normalizing Flow")
# Mode
parser.add_argument("--mode", type=str, default="image", choices=["wavelet", "mrcnf"])
# Multi-res
parser.add_argument("--normal_resolution", type=int, default=64, help="Resolution at which z is standard normal. (def: 64)")
parser.add_argument('--std_scale', type=eval, default=True, choices=[True, False], help="Add AffineTx layer at end of CNF to scale output acc to z_std")
# Data
parser.add_argument("--data", type=str, default="mnist", choices=["mnist", "svhn", "cifar10", "lsun_church", "celebahq", "imagenet", "imagenet64_cf", "zap50k", "fashion_mnist"])
parser.add_argument("--data_path", default="./data/", help="mnist: `./data/`, cifar10: `./data/CIFAR10`, imagenet: `./data/ilsvrc2012.hdf5`")
parser.add_argument("--imagenet_classes", type=str, default="")
parser.add_argument("--nworkers", type=int, default=8)
parser.add_argument("--im_size", type=int, default=32)
parser.add_argument('--ds_idx_mod', type=int, default=None, help="In case we want to train on only subset of images, e.g. mod=10 => images [0, 1, ..., 9]")
parser.add_argument('--ds_idx_skip', type=int, default=0, help="In case we want to train on only subset of images, e.g. mod=10 and skip=10 => images [10, 11, ..., 19]")
parser.add_argument('--ds_length', type=int, default=None, help="Total length of dataset, to decide number of batches per epoch")
parser.add_argument('--test_ds_idx_mod', type=int, default=None, help="In case we want to test on only subset of images, e.g. mod=10 => images [0, 1, ..., 9]")
parser.add_argument('--test_ds_idx_skip', type=int, default=0, help="In case we want to test on only subset of images, e.g. mod=10 and skip=10 => images [10, 11, ..., 19]")
parser.add_argument('--test_ds_length', type=int, default=None, help="Total length of test dataset, to decide number of batches per epoch")
# Save
parser.add_argument("--save_path", type=str, default="experiments/cnf")
# Model
parser.add_argument("--dims", type=str, default="64,64,64")
parser.add_argument("--strides", type=str, default="1,1,1,1")
parser.add_argument("--num_blocks", type=str, default="2,2", help='Number of stacked CNFs, per scale. Should have 1 item, or max_scales number of items.')
parser.add_argument('--bn', type=eval, default=False, choices=[True, False], help="Add BN to coarse")
parser.add_argument("--layer_type", type=str, default="concat", choices=["ignore", "concat"])
parser.add_argument("--nonlinearity", type=str, default="softplus", choices=["tanh", "relu", "softplus", "elu", "swish", "square", "identity"])
parser.add_argument('--zero_last', type=eval, default=True, choices=[True, False])
# Data characteristics
parser.add_argument("--nbits", type=int, default=8)
parser.add_argument('--max_scales', type=int, default=2, help="# of scales for image pyramid")
parser.add_argument('--scale', type=int, default=0, help='freeze all parameters but this scale; start evaluating loss from this scale')
parser.add_argument("--add_noise", type=eval, default=True, choices=[True, False])
parser.add_argument("--tau", type=float, default=0.5)
parser.add_argument('--logit', type=eval, default=True, choices=[True, False])
parser.add_argument("--alpha", type=float, default=0.05, help="if logit is true, alpha is used to convert from pixel to logit (and back)")
parser.add_argument('--concat_input', type=eval, default=True, choices=[True, False], help="To concat the image input to odefunc or not.")
# ODE Solver
parser.add_argument('--solver', type=str, default='dopri5', choices=SOLVERS)
parser.add_argument('--atol', type=float, default=1e-5, help='only for adaptive solvers')
parser.add_argument('--rtol', type=float, default=1e-5, help='only for adaptive solvers')
parser.add_argument('--step_size', type=float, default=0.25, help='only for fixed step size solvers')
parser.add_argument('--first_step', type=float, default=0.166667, help='only for adaptive solvers')
# ODE Solver for test
parser.add_argument('--test_solver', type=str, default=None, choices=SOLVERS + [None])
parser.add_argument('--test_atol', type=float, default=None)
parser.add_argument('--test_rtol', type=float, default=None)
parser.add_argument('--test_step_size', type=float, default=None)
parser.add_argument('--test_first_step', type=float, default=None)
# ODE stop time
parser.add_argument('--time_length', type=float, default=1.0)
parser.add_argument('--train_T', type=eval, default=False)
parser.add_argument('--steer_b', type=float, default=0.0)
# Train
parser.add_argument('--joint', type=eval, default=False, choices=[True, False], help="Joint training of all scales (else train each scale separately)")
parser.add_argument("--num_epochs", type=int, default=100, help="# of epochs in case of JOINT training only.")
parser.add_argument("--batch_size", type=int, default=4)
parser.add_argument('--epochs_per_scale', type=str, default=None, help="# of epochs per scale in case NOT JOINT training; if not specified, will default to `num_epochs/max_scales`. Eg. `100` or `40,30,30`")
parser.add_argument("--batch_size_per_scale", type=str, default=None, help="Batch sizes to use for every scale. # mentioned can be 1, or must match max_scales. Will default to batch_size if not specified. Eg. `256` or `1024,512,256`")
parser.add_argument("--test_batch_size", type=int, default=-1)
parser.add_argument("--lr", type=float, default=0.001, help="LR of different scales")
parser.add_argument("--lr_per_scale", type=str, default=None, help="LR of different scales; if not specified, will default to `lr")
parser.add_argument("--lr_warmup_iters", type=int, default=1000)
parser.add_argument('--lr_gamma', type=float, default=0.999)
parser.add_argument('--lr_scheduler', type=str, choices=["plateau", "step", "multiplicative"], default="plateau")
parser.add_argument('--plateau_factor', type=float, default=0.1)
parser.add_argument('--plateau_patience', type=int, default=4)
parser.add_argument('--plateau_threshold', type=float, default=0.0001)
parser.add_argument('--plateau_threshold_mode', type=str, choices=["abs", "rel"], default="abs")
parser.add_argument('--lr_step', type=int, default=10, help="Not valid for plateau or multiplicative")
parser.add_argument('--min_lr', type=float, default=1.01e-8, help="Min LR")
parser.add_argument('--min_lr_max_iters', type=int, default=100, help="Max iters to run at min_lr")
parser.add_argument('--optimizer', type=str, default='adam', choices=['adam', 'sgd'])
parser.add_argument("--weight_decay", type=float, default=0.0)
parser.add_argument("--max_grad_norm", type=float, default=100.0, help="Max norm of gradients")
parser.add_argument("--grad_norm_patience", type=int, default=10, help="Max norm of gradients")
# Regularizations
parser.add_argument('--kinetic-energy', type=float, default=None, help="int_t ||f||_2^2")
parser.add_argument('--jacobian-norm2', type=float, default=None, help="int_t ||df/dx||_F^2")
parser.add_argument('--div_samples',type=int, default=1)
parser.add_argument("--divergence_fn", type=str, default="approximate", choices=["brute_force", "approximate"])
# Distributed training
parser.add_argument('--distributed', action='store_true', help='Run distributed training')
parser.add_argument('--dist-url', default='env://', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str, help='distributed backend')
parser.add_argument('--local_rank', default=0, type=int,
help='Used for multi-process training. Can either be manually set ' +
'or automatically set by using \'python -m multiproc\'.')
parser.add_argument("--resume", type=str, default=None, help='path to saved check point')
parser.add_argument("--ckpt_to_load", type=str, nargs='?', default="", help='path to saved check point to load but not resume training from.')
parser.add_argument("--val_freq", type=int, default=1)
parser.add_argument("--save_freq_within_epoch", type=int, default=0, help="(>=0) Number of ITERATIONS(!) within an epoch in which to save model, calc metrics, visualize samples")
parser.add_argument('--disable_viz', action='store_true', help="Disable viz")
parser.add_argument("--plot_freq", type=int, default=1)
parser.add_argument("--log_freq", type=int, default=10)
parser.add_argument("--vis_n_images", type=int, default=100)
parser.add_argument('--disable_cuda', action='store_true')
parser.add_argument('--inference', type=eval, default=False, choices=[True, False])
parser.add_argument('--disable_date', action='store_true')
parser.add_argument('--copy_scripts', type=eval, default=True, choices=[True, False], help="Copy this and other scripts to save directory.")
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('-f', help="DUMMY arg for Jupyter")
try:
args = parser.parse_args()
except:
args = parser.parse_args(args=[])
args.command = 'python ' + ' '.join(sys.argv)
args.conv = True
args.im_ch = 1 if args.data == 'mnist' else 3
if args.inference:
args.copy_scripts = False
assert args.steer_b < args.time_length
args.imagenet_classes = list(map(int, args.imagenet_classes.split(","))) if len(args.imagenet_classes) > 0 else []
if args.data == 'mnist':
args.alpha = 1e-6
else:
args.alpha = 0.05
if not args.disable_date:
args.save_path = os.path.join(os.path.dirname(args.save_path), f'{datetime.datetime.now():%Y%m%d_%H%M%S}_{os.path.basename(args.save_path)}')
args.num_blocks = [int(args.num_blocks)] * args.max_scales if ',' not in args.num_blocks else list(map(int, args.num_blocks.split(",")))
d, dl, st = args.dims.split(',')[0], len(args.dims.split(',')), args.strides.split(',')[0]
args.save_path = f'{args.save_path}_M{args.mode[0]}_b{args.nbits}_sc{args.max_scales}_{args.scale}_d{d}_{dl}_st{st}_bl' + (f"{args.num_blocks}" if ',' not in args.num_blocks else "_".join(args.num_blocks.split(",")))
args.save_path += f'_S{args.solver[0]+args.solver[-1]}_{args.optimizer}_ke{args.kinetic_energy}_jf{args.jacobian_norm2}_st{args.steer_b}_n{str(args.add_noise)[0]}_GN{args.max_grad_norm}'
args.save_path += f'_nres{args.normal_resolution}'
if args.std_scale:
args.save_path += f"std"
if args.joint:
args.save_path += f'_j{str(args.joint)[0]}_e{args.num_epochs}_bs{args.batch_size}_lr{args.lr}'
if args.test_batch_size == -1:
args.test_batch_size = args.batch_size
else:
# epochs
if args.epochs_per_scale is None:
args.save_path += f'_j{str(args.joint)[0]}_ep{int(args.num_epochs / args.max_scales)}'
args.epochs_per_scale = [int(args.num_epochs / args.max_scales)] * args.max_scales
else:
args.save_path += f'_j{str(args.joint)[0]}_es{"_".join(args.epochs_per_scale.split(","))}'
args.epochs_per_scale = [int(args.epochs_per_scale)] * args.max_scales if ',' not in args.epochs_per_scale else list(map(int, args.epochs_per_scale.split(",")))
assert len(args.epochs_per_scale) == args.max_scales, f"Specify 1 or max_scales # of epochs_per_scale! Given {args.epochs_per_scale}, max_scales {args.max_scales}"
args.num_epochs = sum(args.epochs_per_scale)
# batch size
if args.batch_size_per_scale is None:
args.save_path += f'_bs{args.batch_size}'
args.batch_size_per_scale = [args.batch_size] * args.max_scales
else:
args.save_path += f'_bs{"_".join(args.batch_size_per_scale.split(","))}'
args.batch_size_per_scale = [int(args.batch_size_per_scale)] * args.max_scales if ',' not in args.batch_size_per_scale else list(map(int, args.batch_size_per_scale.split(",")))
assert len(args.batch_size_per_scale) == args.max_scales, f"Specify 1 or max_scales # of batch_size_per_scale! Given {args.batch_size_per_scale}, max_scales {args.max_scales}"
if args.test_batch_size == -1:
args.test_batch_size = min(args.batch_size_per_scale)
# LR
if args.lr_per_scale is None:
args.save_path += f'_lr{args.lr}'
args.lr_per_scale = [args.lr] * args.max_scales
else:
# args.save_path += f'_lr{"_".join(args.lr_per_scale.split(","))}'
args.lr_per_scale = [float(args.lr_per_scale)] * args.max_scales if ',' not in args.lr_per_scale else list(map(float, args.lr_per_scale.split(",")))
assert len(args.lr_per_scale) == args.max_scales, f"Specify 1 or max_scales # of lr_per_scale! Given {args.lr_per_scale}, max_scales {args.max_scales}"
# ckpt_to_load
if args.ckpt_to_load is not "" and args.ckpt_to_load is not None:
args.resume = None
return args
class MSFlow():
def __init__(self, args=None, train_im_dataset=None):
if args is None:
self.args = get_args()
else:
self.args = args
self.train_im_dataset = train_im_dataset
torch.manual_seed(self.args.seed)
# Get device
self.args.device = "cuda:%d"%torch.cuda.current_device() if torch.cuda.is_available() and not args.disable_cuda else "cpu"
self.device = torch.device(self.args.device)
self.cuda = self.device != torch.device('cpu')
self.cvt = lambda x: x.type(torch.float32).to(self.device, non_blocking=True)
# Build model
self.model = CNFMultiscale(**vars(args),
regs=argparse.Namespace(kinetic_energy=args.kinetic_energy,
jacobian_norm2=args.jacobian_norm2))
self.image_shapes = self.model.image_shapes
self.input_shapes = self.model.input_shapes
if self.args.mode == '1d' or self.args.mode == '2d' or 'wavelet' in self.args.mode:
self.z_stds = self.model.z_stds
self.num_scales = self.model.num_scales
for cnf in self.model.scale_models:
set_cnf_options(self.args, cnf)
# if self.args.mode == 'wavelet':
# self.wavelet_shapes = self.model.wavelet_tx.wavelet_shapes
# Distributed model
if self.args.distributed:
torch.cuda.set_device(self.args.local_rank)
distributed.init_process_group(backend=self.args.dist_backend, init_method=self.args.dist_url, world_size=dist_utils.env_world_size(), rank=dist_utils.env_rank())
assert(dist_utils.env_world_size() == distributed.get_world_size())
self.model = self.model.cuda()
self.model = dist_utils.DDP(self.model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank)
# Model to device, set to scale
else:
self.model = self.model.to(self.device)
# Load (possibly partial) ckpt
if self.args.ckpt_to_load:
print(f"Loading weights from | |
2.328746E+04, 2.370391E+04, 2.412431E+04, 2.454868E+04,
2.497703E+04, 2.540938E+04, 2.584574E+04, 2.628611E+04, 2.673051E+04, 2.717896E+04,
2.763147E+04, 2.808804E+04, 2.854870E+04, 2.901346E+04, 2.948232E+04, 2.995531E+04,
3.043243E+04, 3.091370E+04, 3.139913E+04, 3.188873E+04, 3.238252E+04, 3.288051E+04,
3.338272E+04, 3.388915E+04, 3.439982E+04, 3.491474E+04, 3.543392E+04, 3.595739E+04,
3.648514E+04, 3.701720E+04, 3.755358E+04, 3.809428E+04, 3.863933E+04, 3.918873E+04,
3.974250E+04, 4.030065E+04, 4.086319E+04, 4.143014E+04, 4.200151E+04, 4.257730E+04,
4.315754E+04, 4.374223E+04, 4.433139E+04, 4.492502E+04, 4.552315E+04, 4.612578E+04,
4.673292E+04, 4.734459E+04, 4.796079E+04, 4.858154E+04, 4.920686E+04, 4.983674E+04,
5.047121E+04, 5.111027E+04, 5.175393E+04, 5.240221E+04, 5.305511E+04, 5.371265E+04,
5.437484E+04, 5.504169E+04, 5.571320E+04, 5.638939E+04, 5.707027E+04, 5.775584E+04,
5.844613E+04, 5.914113E+04, 5.984085E+04, 6.054532E+04, 6.125452E+04, 6.196849E+04,
6.268721E+04, 6.341071E+04, 6.413899E+04, 6.487206E+04, 6.560993E+04, 6.635261E+04,
6.710010E+04, 6.785242E+04, 6.860957E+04, 6.937155E+04, 7.013839E+04, 7.091008E+04,
7.168663E+04, 7.246805E+04, 7.325435E+04, 7.404554E+04, 7.484161E+04, 7.564259E+04,
7.644847E+04, 7.725926E+04, 7.807497E+04, 7.889561E+04, 7.972118E+04, 8.055169E+04,
8.138714E+04, 8.222754E+04, 8.307289E+04, 8.392321E+04, 8.477849E+04, 8.563874E+04,
8.650398E+04, 8.737419E+04, 8.824940E+04, 8.912959E+04, 9.001479E+04, 9.090498E+04,
9.180018E+04, 9.270040E+04, 9.360563E+04, 9.451588E+04, 9.543115E+04, 9.635146E+04,
9.727679E+04, 9.820716E+04, 9.914257E+04, 1.000830E+05, 1.010285E+05, 1.019791E+05,
1.029347E+05, 1.038953E+05, 1.048611E+05, 1.058318E+05, 1.068077E+05, 1.077886E+05,
1.087746E+05, 1.097656E+05, 1.107617E+05, 1.117630E+05, 1.127692E+05, 1.137806E+05,
1.147971E+05, 1.158186E+05, 1.168452E+05, 1.178769E+05, 1.189137E+05, 1.199556E+05,
1.210026E+05, 1.220546E+05, 1.231118E+05, 1.241741E+05, 1.252414E+05, 1.263139E+05,
1.273914E+05, 1.284741E+05, 1.295618E+05, 1.306547E+05, 1.317526E+05, 1.328556E+05,
1.339638E+05, 1.350770E+05, 1.361953E+05, 1.373188E+05, 1.384473E+05, 1.395809E+05,
1.407197E+05, 1.418635E+05, 1.430124E+05, 1.441664E+05, 1.453255E+05, 1.464897E+05,
1.476590E+05, 1.488334E+05, 1.500129E+05, 1.511974E+05, 1.523871E+05, 1.535818E+05,
1.547816E+05, 1.559865E+05, 1.571964E+05, 1.584115E+05, 1.596316E+05, 1.608568E+05,
1.620871E+05, 1.633224E+05, 1.645628E+05, 1.658083E+05, 1.670588E+05, 1.683144E+05,
1.695750E+05, 1.708407E+05, 1.721114E+05, 1.733872E+05, 1.746680E+05, 1.759539E+05,
1.772448E+05, 1.785407E+05, 1.798417E+05, 1.811477E+05, 1.824587E+05, 1.837748E+05,
1.850958E+05, 1.864219E+05, 1.877530E+05, 1.890890E+05, 1.904301E+05, 1.917762E+05,
1.931273E+05, 1.944833E+05, 1.958443E+05, 1.972104E+05, 1.985814E+05, 1.999573E+05,
2.013382E+05, 2.027241E+05, 2.041150E+05, 2.055107E+05, 2.069115E+05, 2.083171E+05,
2.097278E+05, 2.111433E+05, 2.125638E+05, 2.139891E+05, 2.154194E+05, 2.168546E+05,
2.182947E+05, 2.197397E+05, 2.211896E+05, 2.226444E+05, 2.241041E+05, 2.255686E+05,
2.270380E+05, 2.285122E+05, 2.299914E+05, 2.314753E+05, 2.329641E+05, 2.344578E+05,
2.359562E+05, 2.374595E+05, 2.389676E+05, 2.404806E+05, 2.419983E+05, 2.435208E+05,
2.450481E+05, 2.465802E+05, 2.481171E+05, 2.496587E+05, 2.512051E+05, 2.527563E+05,
2.543122E+05, 2.558729E+05, 2.574382E+05, 2.590083E+05, 2.605832E+05, 2.621627E+05,
2.637469E+05, 2.653359E+05, 2.669295E+05, 2.685278E+05, 2.701308E+05, 2.717384E+05,
2.733507E+05, 2.749677E+05, 2.765893E+05, 2.782155E+05, 2.798463E+05, 2.814818E+05,
2.831219E+05, 2.847665E+05, 2.864158E+05, 2.880697E+05, 2.897281E+05, 2.913911E+05,
2.930586E+05, 2.947307E+05, 2.964074E+05, 2.980885E+05, 2.997742E+05, 3.014644E+05,
3.031591E+05, 3.048584E+05, 3.065620E+05, 3.082702E+05, 3.099829E+05, 3.117000E+05,
3.134215E+05, 3.151475E+05, 3.168780E+05, 3.186128E+05, 3.203521E+05, 3.220958E+05,
3.238439E+05, 3.255963E+05, 3.273532E+05, 3.291144E+05, 3.308799E+05, 3.326499E+05,
3.344241E+05, 3.362027E+05, 3.379856E+05, 3.397728E+05,
])
# ---------------------- M = 7, I = 5 ---------------------------
M = 7
I = 5
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[5]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
2.157469E+01, 1.910172E+02, 3.804688E+02, 5.704593E+02, 7.606097E+02, 9.508401E+02,
1.141123E+03, 1.331447E+03, 1.521808E+03, 1.712208E+03, 1.902659E+03, 2.093190E+03,
2.283847E+03, 2.474702E+03, 2.665851E+03, 2.857410E+03, 3.049515E+03, 3.242316E+03,
3.435973E+03, 3.630652E+03, 3.826519E+03, 4.023741E+03, 4.222477E+03, 4.422884E+03,
4.625110E+03, 4.829294E+03, 5.035569E+03, 5.244058E+03, 5.454873E+03, 5.668122E+03,
5.883902E+03, 6.102304E+03, 6.323410E+03, 6.547298E+03, 6.774037E+03, 7.003694E+03,
7.236326E+03, 7.471990E+03, 7.710735E+03, 7.952609E+03, 8.197654E+03, 8.445909E+03,
8.697412E+03, 8.952197E+03, 9.210295E+03, 9.471735E+03, 9.736545E+03, 1.000475E+04,
1.027637E+04, 1.055144E+04, 1.082997E+04, 1.111198E+04, 1.139749E+04, 1.168652E+04,
1.197908E+04, 1.227520E+04, 1.257488E+04, 1.287815E+04, 1.318501E+04, 1.349548E+04,
1.380958E+04, 1.412731E+04, 1.444869E+04, 1.477374E+04, 1.510246E+04, 1.543486E+04,
1.577096E+04, 1.611078E+04, 1.645431E+04, 1.680158E+04, 1.715259E+04, 1.750736E+04,
1.786589E+04, 1.822821E+04, 1.859431E+04, 1.896422E+04, 1.933794E+04, 1.971548E+04,
2.009687E+04, 2.048210E+04, 2.087119E+04, 2.126415E+04, 2.166100E+04, 2.206174E+04,
2.246639E+04, 2.287496E+04, 2.328746E+04, 2.370391E+04, 2.412431E+04, 2.454868E+04,
2.497703E+04, 2.540938E+04, 2.584574E+04, 2.628611E+04, 2.673051E+04, 2.717896E+04,
2.763147E+04, 2.808804E+04, 2.854870E+04, 2.901346E+04, 2.948232E+04, 2.995531E+04,
3.043243E+04, 3.091370E+04, 3.139913E+04, 3.188873E+04, 3.238252E+04, 3.288051E+04,
3.338272E+04, 3.388915E+04, 3.439982E+04, 3.491474E+04, 3.543392E+04, 3.595739E+04,
3.648514E+04, 3.701720E+04, 3.755358E+04, 3.809428E+04, 3.863933E+04, 3.918873E+04,
3.974250E+04, 4.030065E+04, 4.086319E+04, 4.143014E+04, 4.200151E+04, 4.257730E+04,
4.315754E+04, 4.374223E+04, 4.433139E+04, 4.492502E+04, 4.552315E+04, 4.612578E+04,
4.673292E+04, 4.734459E+04, 4.796079E+04, 4.858154E+04, 4.920686E+04, 4.983674E+04,
5.047121E+04, 5.111027E+04, 5.175393E+04, 5.240221E+04, 5.305511E+04, 5.371265E+04,
5.437484E+04, 5.504169E+04, 5.571320E+04, 5.638939E+04, 5.707027E+04, 5.775584E+04,
5.844613E+04, 5.914113E+04, 5.984085E+04, 6.054532E+04, 6.125452E+04, 6.196849E+04,
6.268721E+04, 6.341071E+04, 6.413899E+04, 6.487206E+04, 6.560993E+04, 6.635261E+04,
6.710010E+04, 6.785242E+04, 6.860957E+04, 6.937155E+04, 7.013839E+04, 7.091008E+04,
7.168663E+04, 7.246805E+04, 7.325435E+04, 7.404554E+04, 7.484161E+04, 7.564259E+04,
7.644847E+04, 7.725926E+04, 7.807497E+04, 7.889561E+04, 7.972118E+04, 8.055169E+04,
8.138714E+04, 8.222754E+04, 8.307289E+04, 8.392321E+04, 8.477849E+04, 8.563874E+04,
8.650398E+04, 8.737419E+04, 8.824940E+04, 8.912959E+04, 9.001479E+04, 9.090498E+04,
9.180018E+04, 9.270040E+04, 9.360563E+04, 9.451588E+04, 9.543115E+04, 9.635146E+04,
9.727679E+04, 9.820716E+04, 9.914257E+04, 1.000830E+05, 1.010285E+05, 1.019791E+05,
1.029347E+05, 1.038953E+05, 1.048611E+05, 1.058318E+05, 1.068077E+05, 1.077886E+05,
1.087746E+05, 1.097656E+05, 1.107617E+05, 1.117630E+05, 1.127692E+05, 1.137806E+05,
1.147971E+05, 1.158186E+05, 1.168452E+05, 1.178769E+05, 1.189137E+05, 1.199556E+05,
1.210026E+05, 1.220546E+05, 1.231118E+05, 1.241741E+05, 1.252414E+05, 1.263139E+05,
1.273914E+05, 1.284741E+05, 1.295618E+05, 1.306547E+05, 1.317526E+05, 1.328556E+05,
1.339638E+05, 1.350770E+05, 1.361953E+05, 1.373188E+05, 1.384473E+05, 1.395809E+05,
1.407197E+05, 1.418635E+05, 1.430124E+05, 1.441664E+05, 1.453255E+05, 1.464897E+05,
1.476590E+05, 1.488334E+05, 1.500129E+05, 1.511974E+05, 1.523871E+05, 1.535818E+05,
1.547816E+05, 1.559865E+05, 1.571964E+05, 1.584115E+05, 1.596316E+05, 1.608568E+05,
1.620871E+05, 1.633224E+05, 1.645628E+05, 1.658083E+05, 1.670588E+05, 1.683144E+05,
1.695750E+05, 1.708407E+05, 1.721114E+05, 1.733872E+05, 1.746680E+05, 1.759539E+05,
1.772448E+05, 1.785407E+05, 1.798417E+05, 1.811477E+05, 1.824587E+05, 1.837748E+05,
1.850958E+05, 1.864219E+05, 1.877530E+05, 1.890890E+05, 1.904301E+05, 1.917762E+05,
1.931273E+05, 1.944833E+05, 1.958443E+05, 1.972104E+05, 1.985814E+05, 1.999573E+05,
2.013382E+05, 2.027241E+05, 2.041150E+05, 2.055107E+05, 2.069115E+05, 2.083171E+05,
2.097278E+05, 2.111433E+05, 2.125638E+05, 2.139891E+05, 2.154194E+05, 2.168546E+05,
2.182947E+05, 2.197397E+05, 2.211896E+05, 2.226444E+05, 2.241041E+05, 2.255686E+05,
2.270380E+05, 2.285122E+05, 2.299914E+05, 2.314753E+05, 2.329641E+05, 2.344578E+05,
2.359562E+05, 2.374595E+05, 2.389676E+05, 2.404806E+05, 2.419983E+05, 2.435208E+05,
2.450481E+05, 2.465802E+05, 2.481171E+05, 2.496587E+05, 2.512051E+05, 2.527563E+05,
2.543122E+05, 2.558729E+05, 2.574382E+05, 2.590083E+05, 2.605832E+05, 2.621627E+05,
2.637469E+05, 2.653359E+05, 2.669295E+05, 2.685278E+05, 2.701308E+05, 2.717384E+05,
2.733507E+05, 2.749677E+05, 2.765893E+05, 2.782155E+05, 2.798463E+05, 2.814818E+05,
2.831219E+05, 2.847665E+05, 2.864158E+05, 2.880697E+05, 2.897281E+05, 2.913911E+05,
2.930586E+05, 2.947307E+05, 2.964074E+05, 2.980885E+05, 2.997742E+05, 3.014644E+05,
3.031591E+05, 3.048584E+05, 3.065620E+05, 3.082702E+05, 3.099829E+05, 3.117000E+05,
3.134215E+05, 3.151475E+05, 3.168780E+05, 3.186128E+05, 3.203521E+05, 3.220958E+05,
3.238439E+05, 3.255963E+05, 3.273532E+05, 3.291144E+05, 3.308799E+05, 3.326499E+05,
3.344241E+05, 3.362027E+05, 3.379856E+05, 3.397728E+05,
])
# ---------------------- M = 7, I = 6 ---------------------------
M = 7
I = 6
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[5]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
7.197400E+01, 5.586187E+02, 1.111229E+03, 1.665410E+03, 2.220057E+03, 2.774938E+03,
3.329973E+03, 3.885127E+03, 4.440388E+03, 4.995759E+03, 5.551272E+03, 6.106999E+03,
6.663066E+03, 7.219666E+03, 7.777057E+03, 8.335560E+03, 8.895550E+03, 9.457446E+03,
1.002170E+04, 1.058877E+04, 1.115914E+04, 1.173327E+04, 1.231163E+04, 1.289467E+04,
1.348281E+04, 1.407645E+04, 1.467598E+04, 1.528176E+04, 1.589412E+04, 1.651337E+04,
1.713979E+04, 1.777365E+04, 1.841520E+04, 1.906465E+04, 1.972223E+04, 2.038811E+04,
2.106247E+04, 2.174549E+04, 2.243730E+04, 2.313805E+04, 2.384786E+04, 2.456684E+04,
2.529512E+04, 2.603279E+04, 2.677994E+04, 2.753666E+04, 2.830303E+04, 2.907913E+04,
2.986503E+04, 3.066080E+04, 3.146649E+04, 3.228216E+04, 3.310788E+04, 3.394370E+04,
3.478965E+04, 3.564581E+04, 3.651220E+04, 3.738887E+04, 3.827587E+04, 3.917323E+04,
4.008100E+04, 4.099922E+04, 4.192792E+04, 4.286713E+04, 4.381690E+04, 4.477727E+04,
4.574825E+04, 4.672990E+04, 4.772225E+04, 4.872532E+04, 4.973916E+04, 5.076379E+04,
5.179925E+04, 5.284558E+04, 5.390280E+04, 5.497096E+04, 5.605008E+04, 5.714021E+04,
5.824136E+04, 5.935359E+04, 6.047692E+04, 6.161139E+04, 6.275703E+04, 6.391387E+04,
6.508197E+04, 6.626134E+04, 6.745202E+04, 6.865406E+04, 6.986749E+04, 7.109234E+04,
7.232865E+04, 7.357646E+04, 7.483581E+04, 7.610672E+04, 7.738925E+04, 7.868343E+04,
7.998930E+04, 8.130689E+04, 8.263624E+04, 8.397740E+04, 8.533040E+04, 8.669528E+04,
8.807207E+04, 8.946082E+04, 9.086157E+04, 9.227436E+04, 9.369922E+04, 9.513619E+04,
9.658532E+04, 9.804664E+04, 9.952019E+04, 1.010060E+05, 1.025041E+05, 1.040146E+05,
1.055375E+05, 1.070728E+05, 1.086206E+05, 1.101808E+05, 1.117536E+05, 1.133390E+05,
1.149371E+05, 1.165477E+05, 1.181711E+05, 1.198072E+05, 1.214561E+05, 1.231178E+05,
1.247924E+05, 1.264798E+05, 1.281802E+05, 1.298935E+05, 1.316198E+05, 1.333592E+05,
1.351116E+05, 1.368771E+05, 1.386558E+05, 1.404477E+05, 1.422528E+05, 1.440711E+05,
1.459027E+05, 1.477476E+05, 1.496059E+05, 1.514775E+05, 1.533626E+05, 1.552611E+05,
1.571732E+05, 1.590987E+05, 1.610378E+05, 1.629905E+05, 1.649567E+05, 1.669367E+05,
1.689303E+05, 1.709376E+05, 1.729586E+05, 1.749935E+05, 1.770421E+05, 1.791045E+05,
1.811808E+05, 1.832710E+05, 1.853751E+05, 1.874931E+05, 1.896251E+05, 1.917711E+05,
1.939311E+05, 1.961052E+05, 1.982934E+05, 2.004956E+05, 2.027120E+05, 2.049425E+05,
2.071872E+05, 2.094461E+05, 2.117192E+05, 2.140066E+05, 2.163082E+05, 2.186241E+05,
2.209544E+05, 2.232989E+05, 2.256579E+05, 2.280312E+05, 2.304189E+05, 2.328211E+05,
2.352376E+05, 2.376687E+05, 2.401142E+05, 2.425742E+05, 2.450488E+05, 2.475379E+05,
2.500415E+05, 2.525597E+05, 2.550925E+05, 2.576400E+05, 2.602020E+05, 2.627787E+05,
2.653700E+05, 2.679761E+05, 2.705968E+05, 2.732322E+05, 2.758823E+05, 2.785471E+05,
2.812268E+05, 2.839211E+05, 2.866302E+05, 2.893542E+05, 2.920929E+05, 2.948464E+05,
2.976148E+05, 3.003980E+05, 3.031960E+05, 3.060089E+05, 3.088366E+05, 3.116792E+05,
3.145368E+05, 3.174092E+05, 3.202965E+05, 3.231987E+05, 3.261158E+05, 3.290479E+05,
3.319949E+05, 3.349569E+05, 3.379338E+05, 3.409257E+05, 3.439325E+05, 3.469543E+05,
3.499911E+05, 3.530429E+05, 3.561096E+05, 3.591914E+05, 3.622881E+05, 3.653999E+05,
3.685267E+05, 3.716684E+05, 3.748252E+05, 3.779970E+05, 3.811839E+05, 3.843857E+05,
3.876026E+05, 3.908345E+05, 3.940814E+05, 3.973434E+05, 4.006204E+05, 4.039124E+05,
4.072195E+05, 4.105416E+05, 4.138787E+05, 4.172308E+05, 4.205980E+05, 4.239803E+05,
4.273775E+05, 4.307898E+05, 4.342171E+05, 4.376594E+05, 4.411168E+05, 4.445892E+05,
4.480765E+05, 4.515790E+05, 4.550964E+05, 4.586288E+05, 4.621762E+05, 4.657386E+05,
4.693160E+05, 4.729085E+05, 4.765158E+05, 4.801382E+05, 4.837756E+05, 4.874279E+05,
4.910951E+05, 4.947774E+05, 4.984745E+05, 5.021866E+05, 5.059137E+05, 5.096557E+05,
5.134126E+05, 5.171844E+05, 5.209711E+05, 5.247727E+05, 5.285892E+05, 5.324206E+05,
5.362668E+05, 5.401279E+05, 5.440038E+05, 5.478946E+05, 5.518002E+05, 5.557207E+05,
5.596559E+05, 5.636060E+05, 5.675708E+05, 5.715504E+05, 5.755448E+05, 5.795539E+05,
5.835777E+05, 5.876163E+05, 5.916696E+05, 5.957376E+05, 5.998203E+05, 6.039177E+05,
6.080298E+05, 6.121564E+05, 6.162978E+05, 6.204537E+05, 6.246243E+05, 6.288094E+05,
6.330091E+05, 6.372234E+05, 6.414523E+05, 6.456957E+05, 6.499535E+05, 6.542259E+05,
6.585128E+05, 6.628142E+05, 6.671300E+05, 6.714602E+05, 6.758049E+05, 6.801639E+05,
6.845374E+05, 6.889252E+05, 6.933274E+05, 6.977439E+05, 7.021747E+05, 7.066198E+05,
7.110792E+05, 7.155528E+05, 7.200407E+05, 7.245429E+05, 7.290592E+05, 7.335897E+05,
7.381343E+05, 7.426932E+05, 7.472661E+05, 7.518531E+05, 7.564543E+05, 7.610694E+05,
7.656987E+05, 7.703419E+05, 7.749992E+05, 7.796704E+05, 7.843556E+05, 7.890547E+05,
7.937678E+05, 7.984947E+05, 8.032355E+05, 8.079901E+05, 8.127586E+05, 8.175409E+05,
8.223370E+05, 8.271468E+05, 8.319703E+05, 8.368076E+05, 8.416586E+05, 8.465232E+05,
8.514015E+05, 8.562933E+05, 8.611988E+05, 8.661178E+05, 8.710504E+05, 8.759965E+05,
8.809561E+05, 8.859292E+05, 8.909157E+05, 8.959156E+05, 9.009290E+05, 9.059557E+05,
9.109957E+05, 9.160491E+05, 9.211157E+05, 9.261956E+05, 9.312888E+05, 9.363952E+05,
9.415147E+05, 9.466474E+05, 9.517933E+05, 9.569522E+05, 9.621243E+05, 9.673094E+05,
9.725075E+05, 9.777186E+05, 9.829427E+05, 9.881797E+05,
])
# ---------------------- M = 8, I = 1 ---------------------------
M = 8
I = 1
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[2]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
1.187882E+01, 5.438944E+01, 1.046755E+02, 1.605493E+02, 2.237848E+02, 2.936074E+02,
3.693790E+02, 4.493987E+02, 5.325091E+02, 6.180282E+02, 7.054511E+02, 7.943992E+02,
8.845954E+02, 9.758218E+02, 1.067921E+03, 1.160750E+03, 1.254256E+03, 1.348352E+03,
1.443007E+03, 1.538179E+03, 1.633904E+03, 1.730134E+03, 1.826913E+03, 1.924263E+03,
2.022194E+03, 2.120743E+03, 2.219926E+03, 2.319814E+03, 2.420412E+03, 2.521749E+03,
2.623906E+03, 2.726868E+03, 2.830696E+03, 2.935398E+03, 3.041038E+03, 3.147623E+03,
3.255221E+03, 3.363806E+03, 3.473413E+03, 3.584110E+03, 3.695900E+03, 3.808781E+03,
3.922787E+03, 4.037949E+03, 4.154301E+03, 4.271797E+03, 4.390503E+03, 4.510451E+03,
4.631630E+03, 4.754023E+03, 4.877700E+03, 5.002642E+03, 5.128876E+03, 5.256379E+03,
5.385224E+03, 5.515334E+03, 5.646783E+03, 5.779540E+03, 5.913678E+03, 6.049113E+03,
6.185916E+03, 6.324108E+03, 6.463595E+03, 6.604508E+03, 6.746748E+03, 6.890451E+03,
7.035454E+03, 7.181892E+03, 7.329656E+03, 7.478888E+03, 7.629473E+03, 7.781490E+03,
7.934955E+03, 8.089742E+03, 8.246004E+03, 8.403681E+03, 8.562787E+03, 8.723261E+03,
8.885184E+03, 9.048571E+03, 9.213358E+03, 9.379626E+03, 9.547237E+03, 9.716351E+03,
9.886903E+03, 1.005890E+04, 1.023227E+04, 1.040718E+04, 1.058348E+04, 1.076127E+04,
1.094045E+04, 1.112113E+04, 1.130323E+04, 1.148684E+04, 1.167188E+04, 1.185836E+04,
1.204629E+04, 1.223566E+04, 1.242649E+04, 1.261887E+04, 1.281263E+04, 1.300787E+04,
1.320458E+04, 1.340278E+04, 1.360238E+04, 1.380347E+04, 1.400606E+04, 1.421006E+04,
1.441557E+04, 1.462260E+04, 1.483105E+04, 1.504093E+04, 1.525234E+04, 1.546529E+04,
1.567956E+04, 1.589539E+04, 1.611265E+04, 1.633148E+04, 1.655176E+04, 1.677349E+04,
1.699667E+04, 1.722133E+04, 1.744744E+04, 1.767514E+04, 1.790419E+04, 1.813483E+04,
1.836696E+04, 1.860044E+04, 1.883553E+04, 1.907199E+04, 1.931006E+04, 1.954950E+04,
1.979055E+04, 2.003299E+04, 2.027692E+04, 2.052237E+04, 2.076931E+04, 2.101764E+04,
2.126762E+04, 2.151898E+04, 2.177186E+04, 2.202612E+04, 2.228205E+04, 2.253936E+04,
2.279821E+04, 2.305845E+04, 2.332023E+04, 2.358354E+04, 2.384840E+04, 2.411465E+04,
2.438245E+04, 2.465165E+04, 2.492240E+04, 2.519456E+04, 2.546842E+04, 2.574354E+04,
2.602037E+04, 2.629846E+04, 2.657826E+04, 2.685948E+04, 2.714211E+04, 2.742631E+04,
2.771194E+04, 2.799913E+04, 2.828791E+04, 2.857810E+04, 2.886972E+04, 2.916292E+04,
2.945755E+04, 2.975376E+04, 3.005141E+04, 3.035064E+04, 3.065130E+04, 3.095340E+04,
3.125708E+04, 3.156221E+04,
])
# ---------------------- M = 8, I = 2 ---------------------------
M = 8
I = 2
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[2]
TIPS_2017_ISOQ_HASH[(M,I)] | |
= 'DELETE'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
if 'name' in params:
path_params['name'] = params['name']
if 'path2' in params:
path_params['path'] = params['path2']
query_params = {}
if 'path' in params:
query_params['path'] = params['path']
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def connect_options_namespaced_pod_proxy_6(self, namespace, name, path2, **kwargs):
"""
connect OPTIONS requests to proxy of Pod
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.connect_options_namespaced_pod_proxy_6(namespace, name, path2, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str name: name of the Pod (required)
:param str path2: path to the resource (required)
:param str path: Path is the URL path to use for the current proxy request to pod.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'name', 'path2', 'path']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method connect_options_namespaced_pod_proxy_6" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `connect_options_namespaced_pod_proxy_6`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `connect_options_namespaced_pod_proxy_6`")
# verify the required parameter 'path2' is set
if ('path2' not in params) or (params['path2'] is None):
raise ValueError("Missing the required parameter `path2` when calling `connect_options_namespaced_pod_proxy_6`")
resource_path = '/api/v1/namespaces/{namespace}/pods/{name}/proxy/{path}'.replace('{format}', 'json')
method = 'OPTIONS'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
if 'name' in params:
path_params['name'] = params['name']
if 'path2' in params:
path_params['path'] = params['path2']
query_params = {}
if 'path' in params:
query_params['path'] = params['path']
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def replace_namespaced_pod_status(self, body, namespace, name, **kwargs):
"""
replace status of the specified Pod
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.replace_namespaced_pod_status(body, namespace, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param V1Pod body: (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str name: name of the Pod (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Pod
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'namespace', 'name', 'pretty']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_pod_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_pod_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_pod_status`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_pod_status`")
resource_path = '/api/v1/namespaces/{namespace}/pods/{name}/status'.replace('{format}', 'json')
method = 'PUT'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = {}
files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='V1Pod',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def list_namespaced_pod_template(self, namespace, **kwargs):
"""
list or watch objects of kind PodTemplate
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_namespaced_pod_template(namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:return: V1PodTemplateList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_pod_template" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_pod_template`")
resource_path = '/api/v1/namespaces/{namespace}/podtemplates'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'watch' in params:
query_params['watch'] = params['watch']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='V1PodTemplateList',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def create_namespaced_pod_template(self, body, namespace, **kwargs):
"""
create a PodTemplate
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_namespaced_pod_template(body, namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param V1PodTemplate body: (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1PodTemplate
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'namespace', 'pretty']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_pod_template" % key
)
params[key] = val
del | |
from utils import *
import json
import numpy as np
import os
import matplotlib.pyplot as plt
import collections
import seaborn as sns
import pandas as pd
def histo_flows(instance, out_dir, files_ID, year, month, week_day_list):
link_min_dict = zload(out_dir + "link_min_dict" + files_ID + ".pkz")
link_id = zload(out_dir + "link_edge_dict" + files_ID + ".pkz")
capacity = zload(out_dir + "capacity_link" + files_ID + ".pkz")
for i in link_id.keys():
flow = []
for day in week_day_list:
a = link_min_dict["link_" + str(link_id[i]) + "_" + str(year) + "_" + str(month) + "_" + str(day)]
a = a["flow_" + instance]
flow.extend(a)
capac = capacity[str(link_id[i]) + "_" + instance]
plt.figure()
plt.title('link' + str(link_id[i]) + " :" + instance )
plt.axvline(capac, color='k', linestyle='dashed', linewidth=2)
plt.hist(flow, color='c', edgecolor='k', alpha=0.65)
def plot_POA(instance, out_dir, month_w):
#for instance in time_instances['id']:
#with open(out_dir + "PoA_dict_noAdj_" + month_w + '_' + instance + '.json', 'r') as json_file:
# PoA_dict_noAdj = json.load(json_file)
#PoA_dict_noAdj = sorted(PoA_dict_noAdj.items())
#x, y = zip(*PoA_dict_noAdj)
PoA_dict = {}
#for i in range(len(x)):
# PoA_dict[int(x[i])] = y[i]
with open(out_dir + "PoA_dict_" + month_w + '_' + instance + '.json', 'r') as json_file:
PoA_dict_ = json.load(json_file)
PoA_dict_ = sorted(PoA_dict_.items())
x2, y2 = zip(*PoA_dict_)
PoA_dict2 = {}
for i in range(len(x2)):
PoA_dict2[int(x2[i])] = y2[i]
plt.figure()
PoA_dictP = plt.plot(PoA_dict.keys(), PoA_dict.values(), "bo-")
PoA_dict_noAdj = plt.plot(PoA_dict2.keys(), PoA_dict2.values(), "rs-")
#plt.legend([PoA_dict, PoA_dict_noAdj], ["PoA", "PoA demand adj"], loc=0)
plt.xlabel('Days of ' + month_w)
plt.ylabel('PoA')
#pylab.xlim(1, 30)
#pylab.ylim(0.9, 2.0)
grid("on")
savefig(out_dir + 'PoA'+ '_' + instance + '_' + month_w +'.pdf')
def plot_cong(instance, out_dir, month_w):
#for instance in time_instances['id']:
'''
with open(out_dir + "cong_dict_noAdj_" + month_w + '_' + instance + '.json', 'r') as json_file:
cong_dict_noAdj = json.load(json_file)
cong_dict_noAdj = sorted(cong_dict_noAdj.items())
x, y = zip(*cong_dict_noAdj)
'''
with open(out_dir + "cong_" + month_w + '_' + instance + '.json', 'r') as json_file:
cong_dict_ = json.load(json_file)
cong_dict_ = sorted(cong_dict_.items())
x2, y2 = zip(*cong_dict_)
cong_dict = {}
for i in range(len(cong_dict_)):
cong_dict[int(x2[i])] = y2[i]
plt.figure()
# PoA_dict = plt.plot(x, y, "bo-")
PoA_dict_noAdj = plt.plot(cong_dict.keys(),cong_dict.values() , "rs-")
#plt.legend([PoA_dict, PoA_dict_noAdj], ["PoA", "PoA demand adj"], loc=0)
plt.xlabel('Days of ' + month_w)
plt.ylabel('cong')
#pylab.xlim(-0.1, 1.6)
#pylab.ylim(0.9, 2.0)
grid("on")
savefig(out_dir + 'cong'+ '_' + instance + '_' + month_w +'.pdf')
#plt.show()
def plt_cong_vs_poa(instance, out_dir, month_w):
# Load congestion
with open(out_dir + "cong_" + month_w + '_' + instance + '.json', 'r') as json_file:
cong_dict_ = json.load(json_file)
cong_dict_ = sorted(cong_dict_.items())
x2, y2 = zip(*cong_dict_)
cong_dict = {}
for i in range(len(cong_dict_)):
cong_dict[int(x2[i])] = y2[i]
#Load PoA
# with open(out_dir + "PoA_dict_noAdj_" + month_w + '_' + instance + '.json', 'r') as json_file:
#PoA_dict_noAdj = json.load(json_file)
#PoA_dict_noAdj = sorted(PoA_dict_noAdj.items())
#x, y = zip(*PoA_dict_noAdj)
PoA_dict = {}
#for i in range(len(x)):
# PoA_dict[int(x[i])] = y[i]
with open(out_dir + "PoA_dict_" + month_w + '_' + instance + '.json', 'r') as json_file:
PoA_dict_ = json.load(json_file)
PoA_dict_ = sorted(PoA_dict_.items())
x2, y2 = zip(*PoA_dict_)
PoA_dict2 = {}
for i in range(len(x2)):
PoA_dict2[int(x2[i])] = y2[i]
#Dict relating cong and Poa
poa_cong_dict = {}
for key in PoA_dict2.keys():
poa_cong_dict[PoA_dict2[key]] = cong_dict[key]
plt.figure()
# PoA_dict = plt.plot(x, y, "bo-")
PoA_dict_noAdj = plt.scatter(poa_cong_dict.values(),poa_cong_dict.keys())
#plt.legend([PoA_dict, PoA_dict_noAdj], ["PoA", "PoA demand adj"], loc=0)
plt.xlabel('Cong ' + month_w)
plt.ylabel('PoA')
#pylab.xlim(-0.1, 1.6)
#pylab.ylim(0.9, 2.0)
grid("on")
savefig(out_dir + 'Cong_vs_PoA_'+ instance + '_' + month_w +'.pdf')
#plt.show()
def plt_cong_vs_all(time_instances, out_dir, month_w):
# Load congestion
poa_cong_dict = {}
plt.figure()
for instance in time_instances['id']:
with open(out_dir + "cong_" + month_w + '_' + instance + '.json', 'r') as json_file:
cong_dict_ = json.load(json_file)
cong_dict_ = sorted(cong_dict_.items())
x2, y2 = zip(*cong_dict_)
cong_dict = {}
for i in range(len(cong_dict_)):
cong_dict[int(x2[i])] = y2[i]
#Load PoA
# with open(out_dir + "PoA_dict_noAdj_" + month_w + '_' + instance + '.json', 'r') as json_file:
# PoA_dict_noAdj = json.load(json_file)
#PoA_dict_noAdj = sorted(PoA_dict_noAdj.items())
#x, y = zip(*PoA_dict_noAdj)
PoA_dict = {}
for i in range(len(x2)):
PoA_dict[int(x2[i])] = y2[i]
with open(out_dir + "PoA_dict_" + month_w + '_' + instance + '.json', 'r') as json_file:
PoA_dict_ = json.load(json_file)
PoA_dict_ = sorted(PoA_dict_.items())
x2, y2 = zip(*PoA_dict_)
PoA_dict2 = {}
for i in range(len(x2)):
PoA_dict2[int(x2[i])] = y2[i]
#Dict relating cong and Poa
poa_cong_dict={}
for key in PoA_dict.keys():
poa_cong_dict[PoA_dict2[key]] = cong_dict[key]
# PoA_dict = plt.plot(x, y, "bo-")
PoA_dict_noAdj = plt.scatter(poa_cong_dict.values(),poa_cong_dict.keys(), alpha = 0.7, label= instance)
plt.legend(loc=0)
#plt.legend(PoA_dict_noAdj, instance, loc=0)
plt.xlabel('Cong ' + month_w)
plt.ylabel('PoA')
#pylab.xlim(-0.1, 1.6)
#pylab.ylim(0.9, 2.0)
grid("on")
savefig(out_dir + 'Cong_vs_PoA_all'+ '_' + month_w +'.pdf')
#plt.show()
def plt_obj_vs_all(time_instances, out_dir, month_w):
# Load congestion
poa_cong_dict = {}
plt.figure()
for instance in time_instances['id']:
with open(out_dir + "obj_dict" + month_w + '_' + instance + '.json', 'r') as json_file:
obj_dict = json.load(json_file)
obj_dict = sorted(obj_dict.items())
x2, y2 = zip(*obj_dict)
#print len(x2)
obj_dict = {}
for i in range(len(x2)):
obj_dict[int(x2[i])] = y2[i]
#print obj_dict
#Load PoA
#with open(out_dir + "PoA_dict_noAdj_" + month_w + '_' + instance + '.json', 'r') as json_file:
# PoA_dict_noAdj = json.load(json_file)
# PoA_dict_noAdj = sorted(PoA_dict_noAdj.items())
# x, y = zip(*PoA_dict_noAdj)
PoA_dict = {}
for i in range(len(x2)):
PoA_dict[int(x2[i])] = y2[i]
with open(out_dir + "PoA_dict_" + month_w + '_' + instance + '.json', 'r') as json_file:
PoA_dict_ = json.load(json_file)
PoA_dict_ = sorted(PoA_dict_.items())
x2, y2 = zip(*PoA_dict_)
PoA_dict2 = {}
for i in range(len(x2)):
PoA_dict2[int(x2[i])] = y2[i]
with open(out_dir + "cong_" + month_w + '_' + instance + '.json', 'r') as json_file:
cong_dict_ = json.load(json_file)
cong_dict_ = sorted(cong_dict_.items())
x2, y2 = zip(*cong_dict_)
cong_dict = {}
max_cong = max(y2)
cong_dict_marker = {}
for i in range(len(cong_dict_)):
cong_dict[int(x2[i])] = y2[i]
cong_dict_marker[int(x2[i])] = y2[i]/max_cong
#Dict relating cong and Poa
poa_cong_dict={}
for key in PoA_dict2.keys():
#print key
poa_cong_dict[PoA_dict2[key]] = obj_dict[key]
#print(poa_cong_dict)
#poa_cong_dict = sorted(poa_cong_dict.items())
#cong_dict = sorted(cong_dict.items())
# PoA_dict = plt.plot(x, y, "bo-")
PoA_dict_noAdj = plt.scatter(poa_cong_dict.values(),poa_cong_dict.keys(), alpha = 0.7, label= instance)
plt.legend(loc=0)
#plt.legend(PoA_dict_noAdj, instance, loc=0)
plt.xlabel('Obj diff ' + month_w)
plt.ylabel('PoA')
#pylab.xlim(-0.1, 1.6)
#pylab.ylim(0.9, 2.0)
grid("on")
savefig(out_dir + 'Obj_Diff_vs_PoA_all'+ '_' + month_w +'.pdf')
#plt.show()
def plt_obj_vs_cong_all(time_instances, out_dir, month_w):
# Load congestion
poa_cong_dict = {}
plt.figure()
for instance in time_instances['id']:
with open(out_dir + "obj_dict" + month_w + '_' + instance + '.json', 'r') as json_file:
obj_dict = json.load(json_file)
obj_dict = sorted(obj_dict.items())
x2, y2 = zip(*obj_dict)
#print len(x2)
obj_dict = {}
for i in range(len(x2)):
obj_dict[int(x2[i])] = y2[i]
#print obj_dict
#Load PoA
# with open(out_dir + "PoA_dict_noAdj_" + month_w + '_' + instance + '.json', 'r') as json_file:
# PoA_dict_noAdj = json.load(json_file)
#PoA_dict_noAdj = sorted(PoA_dict_noAdj.items())
#x, y = zip(*PoA_dict_noAdj)
PoA_dict = {}
for i in range(len(x2)):
PoA_dict[int(x2[i])] = y2[i]
with open(out_dir + "PoA_dict_" + month_w + '_' + instance + '.json', 'r') as json_file:
PoA_dict_ = json.load(json_file)
PoA_dict_ = sorted(PoA_dict_.items())
x2, y2 = zip(*PoA_dict_)
PoA_dict2 = {}
for i in range(len(x2)):
PoA_dict2[int(x2[i])] = y2[i]
with open(out_dir + "cong_" + month_w + '_' + instance + '.json', 'r') as json_file:
cong_dict_ = json.load(json_file)
cong_dict_ = sorted(cong_dict_.items())
x2, y2 = zip(*cong_dict_)
cong_dict = {}
max_cong = max(y2)
cong_dict_marker = {}
for i in range(len(cong_dict_)):
cong_dict[int(x2[i])] = y2[i]
#Dict relating cong and Poa
poa_cong_dict={}
for key in cong_dict.keys():
#print key
poa_cong_dict[cong_dict[key]] = obj_dict[key]
#print(poa_cong_dict)
#poa_cong_dict = sorted(poa_cong_dict.items())
#cong_dict = sorted(cong_dict.items())
# PoA_dict = plt.plot(x, y, "bo-")
PoA_dict_noAdj = plt.scatter(poa_cong_dict.values(),poa_cong_dict.keys(), alpha = 0.7, label= instance)
plt.legend(loc=0)
#plt.legend(PoA_dict_noAdj, instance, loc=0)
plt.xlabel('Obj diff ' + month_w)
plt.ylabel('Cong')
#pylab.xlim(-0.1, 1.6)
#pylab.ylim(0.9, 2.0)
grid("on")
savefig(out_dir + 'Obj_Diff_vs_Cong_all'+ '_' + month_w +'.pdf')
#plt.show()
def heatmap_ODdemand(out_dir, files_ID, month_id, instance, week_day_list):
cnt = 0
for day in week_day_list:
cnt = cnt + 1
OD_demand_dict = {}
with open(out_dir + 'OD_demands/OD_demand_matrix_' + month_id + '_full_weekday_'+ instance + files_ID + '.txt', 'r') as the_file:
num_lines = sum(1 for line in the_file)
if cnt ==1:
x = np.zeros(num_lines)
np.transpose
with open(out_dir + 'OD_demands/OD_demand_matrix_'+ month_id + "_" + str(day) + '_weekday_'+ instance + files_ID + '.txt', 'r') as the_file:
for line in the_file:
sep = line.split(",")
origin = sep[0]
dest = sep[1]
demand = sep[2]
OD_demand_dict[str(origin) + '->' + str(dest)] = float(demand)
OD_demand_dict = collections.OrderedDict(sorted(OD_demand_dict.items()))
a = np.array(list(OD_demand_dict.values()))
#print(OD_demand_dict)
x = np.c_[x,a]
x = np.delete(x,0,1)
x = np.asmatrix(x)
x = pd.DataFrame(x)
x.columns = week_day_list
x.index = OD_demand_dict.keys()
#sns.set()
sns_plot = sns.heatmap(x, cmap="YlGnBu", linewidths=.1, xticklabels = True)
fig = sns_plot.get_figure()
fig.savefig(out_dir + 'OD_demand'+ '_' + instance + '_' + month_w +'.pdf')
fig.clf()
def heatmap_ODdemand_adj(out_dir, files_ID, month_w, instance, week_day_list):
cnt = 0
for day in week_day_list:
cnt = cnt + 1
OD_demand_dict = {}
with open(out_dir + "demandsDict/demandsDictFixed" + str(day) + "_" + month_w + "_" + instance + ".json", 'r') as json_file:
OD_demand_dict = json.load(json_file)
num_lines = len(OD_demand_dict)
if cnt ==1:
x = np.zeros(num_lines)
np.transpose
for edege in OD_demand_dict:
OD_demand_dict = collections.OrderedDict(sorted(OD_demand_dict.items()))
a = np.array(list(OD_demand_dict.values()))
#print(OD_demand_dict)
x = np.c_[x,a]
x = np.delete(x,0,1)
x = np.asmatrix(x)
x = pd.DataFrame(x)
x.columns = week_day_list
x.index = OD_demand_dict.keys()
#sns.set()
sns_plot = sns.heatmap(x, cmap="YlGnBu", linewidths=.1, xticklabels = True)
fig = sns_plot.get_figure()
fig.savefig(out_dir + 'OD_demandFixed'+ '_' + instance + '_' + month_w +'.pdf')
fig.clf()
def plot_poa_gls(out_dir, files_ID, month_w, time_instances, week_day_list):
for instance in time_instances['id']:
with open (out_dir + 'OD_demands/gls_cost_vec_'+ month_w + '_weekday_'+ instance + files_ID + '.json', 'r' ) as json_file:
gls_cost_vec = json.load(json_file)
#Load PoA
#with open(out_dir + "PoA_dict_noAdj_" + month_w + '_' + instance + '.json', 'r') as json_file:
# PoA_dict_noAdj = json.load(json_file)
# PoA_dict_noAdj = sorted(PoA_dict_noAdj.items())
#x, y = zip(*PoA_dict_noAdj)
PoA_dict = {}
for i in range(len(x)):
PoA_dict[int(x[i])] = y[i]
with open(out_dir + "PoA_dict_" + month_w + '_' + instance + '.json', 'r') as json_file:
PoA_dict_ = json.load(json_file)
PoA_dict_ = sorted(PoA_dict_.items())
x2, y2 = zip(*PoA_dict_)
PoA_dict2 = {}
for i in range(len(x)):
PoA_dict2[int(x2[i])] = y2[i]
#print(gls_cost_vec)
#Dict relating gls and Poa
poa_gls_dict={}
for key in PoA_dict.keys():
# print(key)
poa_gls_dict[PoA_dict2[key]] = gls_cost_vec[str(key)]
#PoA_dict_noAdj = []
# PoA_dict = plt.plot(x, y, "bo-")
plt_ = plt.scatter(poa_gls_dict.values(),poa_gls_dict.keys(), alpha = 0.7, label= instance)
plt.legend(loc=0)
#plt.legend(PoA_dict_noAdj, instance, loc=0)
plt.xlabel('GLS cost ' + month_w)
plt.ylabel('PoA')
#pylab.xlim(-0.1, 1.6)
#pylab.ylim(0.9, 2.0)
plt.grid("on")
fig_ = plt_.get_figure()
fig_.savefig(out_dir + 'GLS_vs_PoA_all'+ '_' + month_w +'.pdf')
fig_.clf()
def polyEval(coeff, pt):
x = sum([coeff[i] * pt^(i-1) for i in range(len(coeff))])
def plot_cost_funct(out_dir, files_ID, link, month_w, key, time_instances):
for instance in time_instances['id']:
with open(out_dir + "coeffs_dict_"+ month_w + "_" + instance + ".json", 'r') as json_file:
coeff = json.load(json_file)
coeff = coeff[key]
def plot_fcoeff(out_dir, month_w, instance):
with open(out_dir + "fcoeffs_" + month_w + '_' + instance + '.json', 'r') as json_file:
fcoeff_dict = json.load(json_file)
x = np.linspace(0,1.3,100)
for day in fcoeff_dict.keys():
f = []
#print(fcoeff_dict[day].values()[0][1])
#print(range(len(fcoeff_dict[day])))
for i in x:
f.append(sum([fcoeff_dict[day].values()[0][a]*i**(a) for a in range(len(fcoeff_dict[day].values()[0]))]))
#print(f)
plt_ = plt.scatter(x,f, alpha = 0.7, label= str(day))
plt.legend(loc=0)
#plt.legend(loc=0)
#plt.legend(PoA_dict_noAdj, instance, loc=0)
#plt.xlabel('GLS cost ' + month_w)
#plt.ylabel('PoA')
#pylab.xlim(-0.1, | |
import os
import html
import nekos
import requests
from PIL import Image
from telegram import ParseMode
from KilluaRobot import dispatcher, updater
import KilluaRobot.modules.sql.nsfw_sql as sql
from KilluaRobot.modules.log_channel import gloggable
from telegram import Message, Chat, Update, Bot, MessageEntity
from telegram.error import BadRequest, RetryAfter, Unauthorized
from telegram.ext import CommandHandler, run_async, CallbackContext
from KilluaRobot.modules.helper_funcs.filters import CustomFilters
from KilluaRobot.modules.helper_funcs.chat_status import user_admin
from telegram.utils.helpers import mention_html, mention_markdown, escape_markdown
@user_admin
@gloggable
def add_nsfw(update: Update, context: CallbackContext):
chat = update.effective_chat
msg = update.effective_message
user = update.effective_user # Remodified by @EverythingSuckz
is_nsfw = sql.is_nsfw(chat.id)
if not is_nsfw:
sql.set_nsfw(chat.id)
msg.reply_text("Activated NSFW Mode!")
message = (
f"<b>{html.escape(chat.title)} :</b>\n"
f"ACTIVATED_NSFW\n"
f"<b>Admin :</b> {mention_html(user.id, html.escape(user.first_name))}\n"
)
return message
else:
msg.reply_text("NSFW Mode is already Activated for this chat!")
return ""
@user_admin
@gloggable
def rem_nsfw(update: Update, context: CallbackContext):
msg = update.effective_message
chat = update.effective_chat
user = update.effective_user
is_nsfw = sql.is_nsfw(chat.id)
if not is_nsfw:
msg.reply_text("NSFW Mode is already Deactivated")
return ""
else:
sql.rem_nsfw(chat.id)
msg.reply_text("Rolled Back to SFW Mode!")
message = (
f"<b>{html.escape(chat.title)} :</b>\n"
f"DEACTIVATED_NSFW\n"
f"<b>Admin :</b> {mention_html(user.id, html.escape(user.first_name))}\n"
)
return message
def list_nsfw_chats(update: Update, context: CallbackContext):
chats = sql.get_all_nsfw_chats()
text = "<b>NSFW Activated Chats</b>\n"
for chat in chats:
try:
x = context.bot.get_chat(int(*chat))
name = x.title if x.title else x.first_name
text += f"• <code>{name}</code>\n"
except BadRequest:
sql.rem_nsfw(*chat)
except Unauthorized:
sql.rem_nsfw(*chat)
except RetryAfter as e:
sleep(e.retry_after)
update.effective_message.reply_text(text, parse_mode="HTML")
def neko(update, context):
msg = update.effective_message
target = "neko"
msg.reply_photo(nekos.img(target))
def feet(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "feet"
msg.reply_photo(nekos.img(target))
def yuri(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "yuri"
msg.reply_photo(nekos.img(target))
def trap(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "trap"
msg.reply_photo(nekos.img(target))
def futanari(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "futanari"
msg.reply_photo(nekos.img(target))
def hololewd(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "hololewd"
msg.reply_photo(nekos.img(target))
def lewdkemo(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "lewdkemo"
msg.reply_photo(nekos.img(target))
def sologif(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "solog"
msg.reply_video(nekos.img(target))
def feetgif(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "feetg"
msg.reply_video(nekos.img(target))
def cumgif(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "cum"
msg.reply_video(nekos.img(target))
def erokemo(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "erokemo"
msg.reply_photo(nekos.img(target))
def lesbian(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "les"
msg.reply_video(nekos.img(target))
def wallpaper(update, context):
msg = update.effective_message
target = "wallpaper"
msg.reply_photo(nekos.img(target))
def lewdk(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "lewdk"
msg.reply_photo(nekos.img(target))
def ngif(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "ngif"
msg.reply_video(nekos.img(target))
def tickle(update, context):
msg = update.effective_message
target = "tickle"
msg.reply_video(nekos.img(target))
def lewd(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "lewd"
msg.reply_photo(nekos.img(target))
def feed(update, context):
msg = update.effective_message
target = "feed"
msg.reply_video(nekos.img(target))
def eroyuri(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "eroyuri"
msg.reply_photo(nekos.img(target))
def eron(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "eron"
msg.reply_photo(nekos.img(target))
def cum(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "cum_jpg"
msg.reply_photo(nekos.img(target))
def bjgif(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "bj"
msg.reply_video(nekos.img(target))
def bj(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "blowjob"
msg.reply_photo(nekos.img(target))
def nekonsfw(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "nsfw_neko_gif"
msg.reply_video(nekos.img(target))
def solo(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "solo"
msg.reply_photo(nekos.img(target))
def kemonomimi(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "kemonomimi"
msg.reply_photo(nekos.img(target))
def avatarlewd(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "nsfw_avatar"
with open("temp.png", "wb") as f:
f.write(requests.get(nekos.img(target)).content)
img = Image.open("temp.png")
img.save("temp.webp", "webp")
msg.reply_document(open("temp.webp", "rb"))
os.remove("temp.webp")
def gasm(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "gasm"
with open("temp.png", "wb") as f:
f.write(requests.get(nekos.img(target)).content)
img = Image.open("temp.png")
img.save("temp.webp", "webp")
msg.reply_document(open("temp.webp", "rb"))
os.remove("temp.webp")
def poke(update, context):
msg = update.effective_message
target = "poke"
msg.reply_video(nekos.img(target))
def anal(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "anal"
msg.reply_video(nekos.img(target))
def hentai(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "hentai"
msg.reply_photo(nekos.img(target))
def avatar(update, context):
msg = update.effective_message
target = "nsfw_avatar"
with open("temp.png", "wb") as f:
f.write(requests.get(nekos.img(target)).content)
img = Image.open("temp.png")
img.save("temp.webp", "webp")
msg.reply_document(open("temp.webp", "rb"))
os.remove("temp.webp")
def erofeet(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "erofeet"
msg.reply_photo(nekos.img(target))
def holo(update, context):
msg = update.effective_message
target = "holo"
msg.reply_photo(nekos.img(target))
def keta(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "keta"
if not target:
msg.reply_text("No URL was received from the API!")
return
msg.reply_photo(nekos.img(target))
def pussygif(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "pussy"
msg.reply_video(nekos.img(target))
def tits(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "tits"
msg.reply_photo(nekos.img(target))
def holoero(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "holoero"
msg.reply_photo(nekos.img(target))
def pussy(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "pussy_jpg"
msg.reply_photo(nekos.img(target))
def hentaigif(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "random_hentai_gif"
msg.reply_video(nekos.img(target))
def classic(update, context):
msg = update.effective_message
target = "classic"
msg.reply_video(nekos.img(target))
def kuni(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "kuni"
msg.reply_video(nekos.img(target))
def waifu(update, context):
msg = update.effective_message
target = "waifu"
with open("temp.png", "wb") as f:
f.write(requests.get(nekos.img(target)).content)
img = Image.open("temp.png")
img.save("temp.webp", "webp")
msg.reply_document(open("temp.webp", "rb"))
os.remove("temp.webp")
def kiss(update, context):
msg = update.effective_message
target = "kiss"
msg.reply_video(nekos.img(target))
def femdom(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "femdom"
msg.reply_photo(nekos.img(target))
def hug(update, context):
msg = update.effective_message
target = "cuddle"
msg.reply_video(nekos.img(target))
def erok(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "erok"
msg.reply_photo(nekos.img(target))
def foxgirl(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "fox_girl"
msg.reply_photo(nekos.img(target))
def titsgif(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
return
msg = update.effective_message
target = "boobs"
msg.reply_video(nekos.img(target))
def ero(update, context):
chat_id = update.effective_chat.id
if not update.effective_message.chat.type == "private":
is_nsfw = sql.is_nsfw(chat_id)
if not is_nsfw:
| |
<filename>esociallib/v2_04/evtTabEstab.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Tue Oct 10 00:42:26 2017 by generateDS.py version 2.28b.
# Python 2.7.12 (default, Nov 19 2016, 06:48:10) [GCC 5.4.0 20160609]
#
# Command line options:
# ('--no-process-includes', '')
# ('-o', 'esociallib/v2_04/evtTabEstab.py')
#
# Command line arguments:
# schemas/v2_04/evtTabEstab.xsd
#
# Command line:
# /usr/local/bin/generateDS --no-process-includes -o "esociallib/v2_04/evtTabEstab.py" schemas/v2_04/evtTabEstab.xsd
#
# Current working directory (os.getcwd()):
# esociallib
#
import sys
import re as re_
import base64
import datetime as datetime_
import warnings as warnings_
try:
from lxml import etree as etree_
except ImportError:
from xml.etree import ElementTree as etree_
Validate_simpletypes_ = True
if sys.version_info.major == 2:
BaseStrType_ = basestring
else:
BaseStrType_ = str
def parsexml_(infile, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
doc = etree_.parse(infile, parser=parser, **kwargs)
return doc
#
# Namespace prefix definition table (and other attributes, too)
#
# The module generatedsnamespaces, if it is importable, must contain
# a dictionary named GeneratedsNamespaceDefs. This Python dictionary
# should map element type names (strings) to XML schema namespace prefix
# definitions. The export method for any class for which there is
# a namespace prefix definition, will export that definition in the
# XML representation of that element. See the export method of
# any generated element type class for a example of the use of this
# table.
# A sample table is:
#
# # File: generatedsnamespaces.py
#
# GenerateDSNamespaceDefs = {
# "ElementtypeA": "http://www.xxx.com/namespaceA",
# "ElementtypeB": "http://www.xxx.com/namespaceB",
# }
#
try:
from generatedsnamespaces import GenerateDSNamespaceDefs as GenerateDSNamespaceDefs_
except ImportError:
GenerateDSNamespaceDefs_ = {}
#
# The root super-class for element type classes
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError as exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node=None, input_name=''):
if not input_data:
return ''
else:
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_integer_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
int(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integers')
return values
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % input_data).rstrip('0')
def gds_validate_float(self, input_data, node=None, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_float_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return values
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node=None, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_double_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of doubles')
return values
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node=None, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_boolean_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return values
def gds_validate_datetime(self, input_data, node=None, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
time_parts = input_data.split('.')
if len(time_parts) > 1:
micro_seconds = int(float('0.' + time_parts[1]) * 1000000)
input_data = '%s.%s' % (time_parts[0], micro_seconds, )
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node=None, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(
hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node=None, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_validate_simple_patterns(self, patterns, target):
# pat is a list of lists of strings/patterns. We should:
# - AND the outer elements
# - OR the inner elements
found1 = True
for patterns1 in patterns:
found2 = False
for patterns2 in patterns1:
if re_.search(patterns2, target) is not None:
found2 = True
break
if not found2:
found1 = False
break
return found1
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.iteritems()))
@staticmethod
def gds_encode(instring):
if sys.version_info.major == 2:
return instring.encode(ExternalEncoding)
else:
return instring
@staticmethod
def convert_unicode(instring):
if isinstance(instring, str):
result = quote_xml(instring)
elif sys.version_info.major == 2 and isinstance(instring, unicode):
result = quote_xml(instring).encode('utf8')
else:
result = GeneratedsSuper.gds_encode(str(instring))
return result
def __eq__(self, other):
if type(self) != type(other):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def getSubclassFromModule_(module, class_):
'''Get the | |
#!/usr/bin/env pytest
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: ESRIJson driver test suite.
# Author: <NAME> <even dot rouault at spatialys.com>
#
###############################################################################
# Copyright (c) 2009-2019, <NAME> <even dot rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import contextlib
from osgeo import ogr
from osgeo import gdal
import gdaltest
import ogrtest
import pytest
pytestmark = pytest.mark.require_driver('ESRIJson')
###############################################################################
# Test utilities
def validate_layer(lyr, name, features, typ, fields, box):
if name is not None and name != lyr.GetName():
print('Wrong layer name')
return False
if features != lyr.GetFeatureCount():
print('Wrong number of features')
return False
lyrDefn = lyr.GetLayerDefn()
if lyrDefn is None:
print('Layer definition is none')
return False
if typ != lyrDefn.GetGeomType():
print('Wrong geometry type')
print(lyrDefn.GetGeomType())
return False
if fields != lyrDefn.GetFieldCount():
print('Wrong number of fields')
return False
extent = lyr.GetExtent()
minx = abs(extent[0] - box[0])
maxx = abs(extent[1] - box[1])
miny = abs(extent[2] - box[2])
maxy = abs(extent[3] - box[3])
if max(minx, maxx, miny, maxy) > 0.0001:
print('Wrong spatial extent of layer')
print(extent)
return False
return True
###############################################################################
# Test reading ESRI point file
def test_ogr_esrijson_read_point():
ds = ogr.Open('data/esrijson/esripoint.json')
assert ds is not None, 'Failed to open datasource'
assert ds.GetLayerCount() == 1, 'Wrong number of layers'
lyr = ds.GetLayerByName('esripoint')
assert lyr is not None, 'Missing layer called esripoint'
extent = (2, 2, 49, 49)
rc = validate_layer(lyr, 'esripoint', 1, ogr.wkbPoint, 4, extent)
assert rc
ref = lyr.GetSpatialRef()
gcs = int(ref.GetAuthorityCode('GEOGCS'))
assert gcs == 4326, "Spatial reference was not valid"
feature = lyr.GetNextFeature()
ref_geom = ogr.CreateGeometryFromWkt('POINT(2 49)')
if ogrtest.check_feature_geometry(feature, ref_geom) != 0:
feature.DumpReadable()
pytest.fail()
if feature.GetFID() != 1:
feature.DumpReadable()
pytest.fail()
if feature.GetFieldAsInteger('fooInt') != 2:
feature.DumpReadable()
pytest.fail()
if feature.GetFieldAsDouble('fooDouble') != 3.4:
feature.DumpReadable()
pytest.fail()
if feature.GetFieldAsString('fooString') != '56':
feature.DumpReadable()
pytest.fail()
lyr = None
ds = None
###############################################################################
# Test reading ESRI linestring file
def test_ogr_esrijson_read_linestring():
ds = ogr.Open('data/esrijson/esrilinestring.json')
assert ds is not None, 'Failed to open datasource'
assert ds.GetLayerCount() == 1, 'Wrong number of layers'
lyr = ds.GetLayer(0)
extent = (2, 3, 49, 50)
rc = validate_layer(lyr, None, 1, ogr.wkbLineString, 0, extent)
assert rc
feature = lyr.GetNextFeature()
ref_geom = ogr.CreateGeometryFromWkt('LINESTRING (2 49,3 50)')
if ogrtest.check_feature_geometry(feature, ref_geom) != 0:
feature.DumpReadable()
pytest.fail()
lyr = None
ds = None
# MultiLineString
ds = ogr.Open("""{
"geometryType": "esriGeometryPolyline",
"fields": [],
"features": [
{
"geometry": {
"paths" : [
[ [2,49],[2.1,49.1] ],
[ [3,50],[3.1,50.1] ]
]
}
}
]
}""")
lyr = ds.GetLayer(0)
feature = lyr.GetNextFeature()
ref_geom = ogr.CreateGeometryFromWkt('MULTILINESTRING ((2 49,2.1 49.1),(3 50,3.1 50.1))')
if ogrtest.check_feature_geometry(feature, ref_geom) != 0:
feature.DumpReadable()
pytest.fail()
###############################################################################
# Test reading ESRI polygon file
def test_ogr_esrijson_read_polygon():
ds = ogr.Open('data/esrijson/esripolygon.json')
assert ds is not None, 'Failed to open datasource'
assert ds.GetLayerCount() == 1, 'Wrong number of layers'
lyr = ds.GetLayer(0)
extent = (-3, 3, 49, 50)
rc = validate_layer(lyr, None, 1, ogr.wkbPolygon, 0, extent)
assert rc
feature = lyr.GetNextFeature()
ref_geom = ogr.CreateGeometryFromWkt('MULTIPOLYGON (((2 49,2 50,3 50,3 49,2 49),(2.1 49.1,2.1 49.9,2.9 49.9,2.9 49.1,2.1 49.1)),((-2 49,-2 50,-3 50,-3 49,-2 49)))')
if ogrtest.check_feature_geometry(feature, ref_geom) != 0:
feature.DumpReadable()
pytest.fail()
lyr = None
ds = None
ds = ogr.Open('data/esrijson/esripolygonempty.json')
assert ds is not None, 'Failed to open datasource'
lyr = ds.GetLayer(0)
feature = lyr.GetNextFeature()
if feature.GetGeometryRef().ExportToWkt() != 'POLYGON EMPTY':
feature.DumpReadable()
pytest.fail()
lyr = None
ds = None
###############################################################################
# Test reading ESRI multipoint file
def test_ogr_esrijson_read_multipoint():
ds = ogr.Open('data/esrijson/esrimultipoint.json')
assert ds is not None, 'Failed to open datasource'
assert ds.GetLayerCount() == 1, 'Wrong number of layers'
lyr = ds.GetLayer(0)
extent = (2, 3, 49, 50)
rc = validate_layer(lyr, None, 1, ogr.wkbMultiPoint, 4, extent)
assert rc
feature = lyr.GetNextFeature()
ref_geom = ogr.CreateGeometryFromWkt('MULTIPOINT (2 49,3 50)')
if ogrtest.check_feature_geometry(feature, ref_geom) != 0:
feature.DumpReadable()
pytest.fail()
lyr = None
ds = None
###############################################################################
# Test reading ESRI point file with z value
def test_ogr_esrijson_read_pointz():
ds = ogr.Open('data/esrijson/esrizpoint.json')
assert ds is not None, 'Failed to open datasource'
assert ds.GetLayerCount() == 1, 'Wrong number of layers'
lyr = ds.GetLayer(0)
# validate layer doesn't check z, but put it in
extent = (2, 2, 49, 49, 1, 1)
rc = validate_layer(lyr, None, 1, ogr.wkbPoint, 4, extent)
assert rc
ref = lyr.GetSpatialRef()
gcs = int(ref.GetAuthorityCode('GEOGCS'))
assert gcs == 4326, "Spatial reference was not valid"
feature = lyr.GetNextFeature()
ref_geom = ogr.CreateGeometryFromWkt('POINT(2 49 1)')
if ogrtest.check_feature_geometry(feature, ref_geom) != 0:
feature.DumpReadable()
pytest.fail()
if feature.GetFID() != 1:
feature.DumpReadable()
pytest.fail()
if feature.GetFieldAsInteger('fooInt') != 2:
feature.DumpReadable()
pytest.fail()
if feature.GetFieldAsDouble('fooDouble') != 3.4:
feature.DumpReadable()
pytest.fail()
if feature.GetFieldAsString('fooString') != '56':
feature.DumpReadable()
pytest.fail()
lyr = None
ds = None
###############################################################################
# Test reading ESRI linestring file with z
def test_ogr_esrijson_read_linestringz():
ds = ogr.Open('data/esrijson/esrizlinestring.json')
assert ds is not None, 'Failed to open datasource'
assert ds.GetLayerCount() == 1, 'Wrong number of layers'
lyr = ds.GetLayer(0)
# validate layer doesn't check z, but put it in
extent = (2, 3, 49, 50, 1, 2)
rc = validate_layer(lyr, None, 1, ogr.wkbLineString, 0, extent)
assert rc
feature = lyr.GetNextFeature()
ref_geom = ogr.CreateGeometryFromWkt('LINESTRING (2 49 1,3 50 2)')
if ogrtest.check_feature_geometry(feature, ref_geom) != 0:
feature.DumpReadable()
pytest.fail()
lyr = None
ds = None
###############################################################################
# Test reading ESRI multipoint file with z
def test_ogr_esrijson_read_multipointz():
ds = ogr.Open('data/esrijson/esrizmultipoint.json')
assert ds is not None, 'Failed to open datasource'
assert ds.GetLayerCount() == 1, 'Wrong number of layers'
lyr = ds.GetLayer(0)
# validate layer doesn't check z, but put it in
extent = (2, 3, 49, 50, 1, 2)
rc = validate_layer(lyr, None, 1, ogr.wkbMultiPoint, 4, extent)
assert rc
feature = lyr.GetNextFeature()
ref_geom = ogr.CreateGeometryFromWkt('MULTIPOINT (2 49 1,3 50 2)')
if ogrtest.check_feature_geometry(feature, ref_geom) != 0:
feature.DumpReadable()
pytest.fail()
lyr = None
ds = None
###############################################################################
# Test reading ESRI polygon file with z
def test_ogr_esrijson_read_polygonz():
ds = ogr.Open('data/esrijson/esrizpolygon.json')
assert ds is not None, 'Failed to open datasource'
assert ds.GetLayerCount() == 1, 'Wrong number of layers'
lyr = ds.GetLayer(0)
# validate layer doesn't check z, but put it in
extent = (2, 3, 49, 50, 1, 4)
rc = validate_layer(lyr, None, 1, ogr.wkbPolygon, 0, extent)
assert rc
feature = lyr.GetNextFeature()
ref_geom = ogr.CreateGeometryFromWkt('POLYGON ((2 49 1,2 50 2,3 50 3,3 49 4,2 49 1))')
if ogrtest.check_feature_geometry(feature, ref_geom) != 0:
feature.DumpReadable()
pytest.fail()
lyr = None
ds = None
###############################################################################
# Test reading ESRI multipoint file with m, but no z (hasM=true, hasZ omitted)
def test_ogr_esrijson_read_multipointm():
ds = ogr.Open('data/esrijson/esrihasmnozmultipoint.json')
assert ds is not None, 'Failed to open datasource'
assert ds.GetLayerCount() == 1, 'Wrong number of layers'
lyr = ds.GetLayer(0)
extent = (2, 3, 49, 50)
rc = validate_layer(lyr, None, 1, ogr.wkbMultiPoint, 4, extent)
assert rc
feature = lyr.GetNextFeature()
ref_geom = ogr.CreateGeometryFromWkt('MULTIPOINT M ((2 49 1),(3 50 2))')
if ogrtest.check_feature_geometry(feature, ref_geom) != 0:
feature.DumpReadable()
pytest.fail()
lyr = None
ds = None
###############################################################################
# Test reading ESRI multipoint file with hasZ=true, but only 2 components.
def test_ogr_esrijson_read_pointz_withou_z():
ds = ogr.Open('data/esrijson/esriinvalidhaszmultipoint.json')
assert ds is not None, 'Failed to open datasource'
assert ds.GetLayerCount() == 1, 'Wrong number of layers'
lyr = ds.GetLayer(0)
extent = (2, 3, 49, 50)
rc = validate_layer(lyr, None, 1, ogr.wkbMultiPoint, 4, extent)
assert rc
feature = lyr.GetNextFeature()
ref_geom = ogr.CreateGeometryFromWkt('MULTIPOINT (2 49,3 50)')
if ogrtest.check_feature_geometry(feature, ref_geom) != 0:
feature.DumpReadable()
pytest.fail()
lyr = None
ds = None
###############################################################################
# Test reading ESRI multipoint file with z and m
def test_ogr_esrijson_read_multipointzm():
ds = ogr.Open('data/esrijson/esrizmmultipoint.json')
assert ds is not None, 'Failed to open datasource'
assert ds.GetLayerCount() == 1, 'Wrong number of layers'
lyr = ds.GetLayer(0)
extent = (2, 3, | |
elif value is NULL:
if (property_.types is not None) and (Null not in property_.types):
error_messages.append(
"Null values are not allowed in `{}.{}`, "
"permitted types include: {}.".format(
qualified_name(type(self)),
property_name_,
", ".join(
"`{}`".format(
qualified_name(type_)
if isinstance(type_, type)
else qualified_name(type(type_))
)
for type_ in getattr(property_, "types")
),
)
)
else:
error_message: str
for error_message in validate(
value, property_.types, raise_errors=False
):
yield (
"Error encountered while attempting to validate "
"`{}.{}`:\n\n{}".format(
qualified_name(type(self)),
property_name_,
error_message,
)
)
def _validate(self, raise_errors: bool = True) -> List[str]:
"""
This method verifies that all required properties are present, and
that all property values are of the correct type.
"""
validation_error_messages: List[str] = []
validated_object: abc.Object = self
instance_hooks: Optional[abc.ObjectHooks] = hooks.object_read(self)
if instance_hooks and instance_hooks.before_validate:
validated_model: abc.Model = instance_hooks.before_validate(self)
if TYPE_CHECKING:
assert isinstance(validated_model, abc.Object)
validated_object = validated_model
instance_meta: Optional[abc.ObjectMeta] = meta.object_read(
validated_object
)
if instance_meta and instance_meta.properties:
property_name_: str
property_: abc.Property
error_message: str
for (
property_name_,
property_,
) in instance_meta.properties.items():
for error_message in (
validated_object
)._get_property_validation_error_messages(
property_name_,
property_,
getattr(validated_object, property_name_),
):
validation_error_messages.append(error_message)
if instance_hooks and instance_hooks.after_validate:
instance_hooks.after_validate(validated_object)
if raise_errors and validation_error_messages:
raise errors.ValidationError(
"\n".join(validation_error_messages)
)
return validation_error_messages
# region marshal
def _marshal_collection(
data: Union[
Mapping[str, abc.MarshallableTypes],
Collection[abc.MarshallableTypes],
abc.Dictionary,
],
value_types: Optional[Iterable[Union[type, abc.Property]]] = None,
item_types: Union[
Iterable[Union[type, abc.Property]], abc.Types, None
] = None,
) -> Union[Mapping[str, abc.MarshallableTypes], List[abc.MarshallableTypes]]:
if isinstance(data, (Mapping, abc.Dictionary)):
return _marshal_mapping(data, value_types)
else:
value: abc.MarshallableTypes
marshalled_data: List[abc.MarshallableTypes] = []
for value in data:
marshalled_data.append(marshal(value, types=item_types))
return marshalled_data
def _marshal_mapping(
data: Union[Mapping[str, abc.MarshallableTypes], abc.Dictionary],
value_types: Union[
Iterable[Union[type, abc.Property]], abc.Types, None
] = None,
) -> "abc.OrderedDict[str, abc.MarshallableTypes]":
key: str
value: abc.MarshallableTypes
marshalled_data: "abc.OrderedDict[str, abc.MarshallableTypes]" = (
collections.OrderedDict()
)
items: Iterable[Tuple[str, abc.MarshallableTypes]]
if isinstance(data, (abc.Dictionary, collections.OrderedDict)) or (
isinstance(data, Reversible) and isinstance(data, Mapping)
):
items = data.items()
else:
assert isinstance(data, Mapping)
# This gives consistent sorting for non-ordered mappings
items = sorted(data.items(), key=lambda item: item[0])
for key, value in items:
marshalled_data[key] = marshal(value, types=value_types)
return marshalled_data
def _marshal_typed(
data: abc.MarshallableTypes,
types: Union[Iterable[Union[type, abc.Property]], abc.Types],
) -> Any:
"""
This attempts to initialize the provided type(s) with `data`, and accepts
the first which does not raise an error
"""
# For each potential type, attempt to marshal the data, and accept the
# first result which does not throw an error
marshalled_data: Any = UNDEFINED
for type_ in types:
if isinstance(type_, abc.Property):
try:
marshalled_data = _marshal_property_value(type_, data)
break
except TypeError:
pass
elif isinstance(type_, type) and isinstance(data, type_):
marshalled_data = data
break
# If no matches are found, raise a `TypeError` with sufficient
# information about the data and `types` to debug
if marshalled_data is UNDEFINED:
raise TypeError(
f"{repr(data)} cannot be interpreted as any of the designated "
f"types: {repr(types)}"
)
return marshalled_data
def marshal(
data: abc.MarshallableTypes,
types: Union[Iterable[Union[type, abc.Property]], abc.Types, None] = None,
value_types: Union[
Iterable[Union[type, abc.Property]], abc.Types, None
] = None,
item_types: Union[
Iterable[Union[type, abc.Property]], abc.Types, None
] = None,
) -> abc.JSONTypes:
"""
Recursively converts data which is not serializable using the `json` module
into formats which *can* be represented as JSON.
"""
marshalled_data: abc.JSONTypes
if isinstance(data, Decimal):
# Instances of `decimal.Decimal` can'ts be serialized as JSON, so we
# convert them to `float`
marshalled_data = float(data)
elif (data is None) or isinstance(data, (str, int, float)):
# Don't do anything with `None`--this just means an attributes is not
# used for this instance (an explicit `null` would be passed as
# `sob.properties.types.NULL`).
marshalled_data = data
elif data is NULL:
marshalled_data = None
elif isinstance(data, abc.Model):
marshalled_data = getattr(data, "_marshal")()
elif types is not None:
marshalled_data = _marshal_typed(data, types)
elif isinstance(data, (date, datetime)):
marshalled_data = data.isoformat()
elif isinstance(data, (bytes, bytearray)):
# Convert `bytes` to base-64 encoded strings
marshalled_data = str(b64encode(data), "ascii")
elif isinstance(data, Collection):
marshalled_data = _marshal_collection(
data, value_types=value_types, item_types=item_types
)
elif isinstance(data, SupportsBytes):
# Convert objects which can be *cast* as `bytes` to
# base-64 encoded strings
marshalled_data = str(b64encode(bytes(data)), "ascii")
else:
raise ValueError(f"Cannot unmarshal: {repr(data)}")
return marshalled_data
# endregion
# region unmarshal
def _is_non_string_iterable(value: abc.MarshallableTypes) -> bool:
return (
(not isinstance(value, (str, bytes)))
and (not isinstance(value, Mapping))
and isinstance(value, Iterable)
)
def _is_non_string_sequence_or_set_subclass(type_: type) -> bool:
return (
issubclass(type_, (collections.abc.Set, collections.abc.Sequence))
) and (not issubclass(type_, (str, bytes)))
class _Unmarshal:
"""
This class should be used exclusively by wrapper function `unmarshal`.
"""
def __init__(
self,
data: abc.MarshallableTypes,
types: Union[
Iterable[Union[type, abc.Property]],
abc.Types,
None,
type,
abc.Property,
] = None,
value_types: Union[
Iterable[Union[type, abc.Property]],
abc.Types,
None,
type,
abc.Property,
] = None,
item_types: Union[
Iterable[Union[type, abc.Property]],
abc.Types,
None,
type,
abc.Property,
] = None,
) -> None:
# If only one type was passed for any of the following parameters--we
# convert it to a tuple
if types is not None:
if isinstance(types, (type, abc.Property)):
types = (types,)
if value_types is not None:
if isinstance(value_types, (type, abc.Property)):
value_types = (value_types,)
if item_types is not None:
if isinstance(item_types, (type, abc.Property)):
item_types = (item_types,)
# Instance Attributes
self.data: abc.MarshallableTypes = data
self.types: Union[
Iterable[Union[type, abc.Property]],
abc.Types,
None,
] = types
self.value_types: Union[
Iterable[Union[type, abc.Property]],
abc.Types,
None,
] = value_types
self.item_types: Union[
Iterable[Union[type, abc.Property]],
abc.Types,
None,
] = item_types
self.meta: Optional[abc.Meta] = None
def __call__(self) -> abc.MarshallableTypes:
"""
Return `self.data` unmarshalled
"""
try:
unmarshalled_data: abc.MarshallableTypes = self.data
if self.data is not NULL:
# If the data is a sob `Model`, get it's metadata
if isinstance(self.data, abc.Model):
self.meta = meta.read(self.data)
# Only un-marshall models if they have no metadata yet (are
# generic)
if self.meta is None:
# If the data provided is a `Generator`, make it static by
# casting the data into a tuple
if isinstance(self.data, GeneratorType):
self.data = tuple(self.data)
if self.types is None:
# If no types are provided, we unmarshal the data into
# one of sob's generic container types
unmarshalled_data = self.as_container_or_simple_type
else:
unmarshalled_data = self.as_typed
except Exception as error:
append_exception_text(
error,
(
"An error was encountered during execution of:\n"
f"{self.represent_function_call()}"
),
)
raise error
return unmarshalled_data
@property # type: ignore
def as_container_or_simple_type(self) -> Any:
"""
This function unmarshals and returns the data into one of sob's
container types, or if the data is of a simple data type--it returns
that data unmodified
"""
type_: type
unmarshalled_data = self.data
if unmarshalled_data is None:
unmarshalled_data = NULL
elif isinstance(self.data, abc.Dictionary):
type_ = type(self.data)
if self.value_types is not None:
unmarshalled_data = type_(
self.data, value_types=self.value_types
)
elif isinstance(self.data, abc.Array):
type_ = type(self.data)
if self.item_types is not None:
unmarshalled_data = type_(
self.data, item_types=self.item_types
)
elif isinstance(self.data, (Mapping, abc.Dictionary)):
unmarshalled_data = Dictionary(
self.data, value_types=self.value_types
)
elif isinstance(self.data, Iterable) and not isinstance(
self.data, (str, bytes, bytearray)
):
# `None` is interpreted as `NULL` during un-marshalling
items: List[abc.MarshallableTypes] = [
(NULL if item is None else item) for item in self.data
]
unmarshalled_data = Array(items, item_types=self.item_types)
elif not isinstance(self.data, abc.MARSHALLABLE_TYPES):
raise errors.UnmarshalValueError(
f"{repr(self.data)} cannot be un-marshalled"
)
return unmarshalled_data
@property # type: ignore
def as_typed(self) -> abc.MarshallableTypes:
unmarshalled_data: Union[abc.MarshallableTypes, Undefined] = UNDEFINED
first_error: Optional[Exception] = None
error_messages: List[str] = []
# Attempt to un-marshal the data as each type, in the order
# provided
assert self.types is not None
for type_ in self.types:
try:
unmarshalled_data = self.as_type(type_)
# If the data is un-marshalled successfully, we do
# not need to try any further types
break
except (AttributeError, KeyError, TypeError, ValueError) as error:
if first_error is None:
first_error = error
error_messages.append(errors.get_exception_text())
if isinstance(unmarshalled_data, Undefined):
if (first_error is None) or isinstance(first_error, TypeError):
raise errors.UnmarshalTypeError(
"\n".join(error_messages),
data=self.data,
types=self.types,
value_types=self.value_types,
item_types=self.item_types,
)
elif isinstance(first_error, ValueError):
raise errors.UnmarshalValueError(
"\n".join(error_messages),
data=self.data,
types=self.types,
value_types=self.value_types,
item_types=self.item_types,
)
else:
raise first_error
return unmarshalled_data
def get_dictionary_type(self, type_: type) -> Optional[type]:
"""
Get the dictionary type to use
"""
dictionary_type: Optional[type]
if type_ is abc.Dictionary:
dictionary_type = Dictionary
elif issubclass(type_, abc.Object):
dictionary_type = None
elif issubclass(type_, abc.Dictionary):
dictionary_type = type_
elif issubclass(type_, Mapping):
dictionary_type = Dictionary
else:
raise TypeError(self.data)
return dictionary_type
def before_hook(self, type_: type) -> abc.MarshallableTypes:
data = self.data
hooks_ = hooks.read(type_)
if hooks_:
before_unmarshal_hook = hooks_.before_unmarshal
if before_unmarshal_hook:
data = before_unmarshal_hook(deepcopy(data))
return data
@staticmethod
def after_hook(type_: type, data: abc.Model) -> abc.Model:
hooks_ = hooks.read(type_)
if hooks_:
after_unmarshal_hook = | |
__init__(self):
r"""
:param DomainName: 播放域名。
:type DomainName: str
:param CertId: 证书Id。
:type CertId: int
:param Status: 状态,0:关闭 1:打开。
:type Status: int
"""
self.DomainName = None
self.CertId = None
self.Status = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.CertId = params.get("CertId")
self.Status = params.get("Status")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyLiveDomainCertResponse(AbstractModel):
"""ModifyLiveDomainCert返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLiveDomainRefererRequest(AbstractModel):
"""ModifyLiveDomainReferer请求参数结构体
"""
def __init__(self):
r"""
:param DomainName: 播放域名。
:type DomainName: str
:param Enable: 是否开启当前域名的 Referer 黑白名单鉴权。
:type Enable: int
:param Type: 名单类型,0:黑名单,1:白名单。
:type Type: int
:param AllowEmpty: 是否允许空 Referer,0:不允许,1:允许。
:type AllowEmpty: int
:param Rules: Referer 名单列表,以;分隔。
:type Rules: str
"""
self.DomainName = None
self.Enable = None
self.Type = None
self.AllowEmpty = None
self.Rules = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.Enable = params.get("Enable")
self.Type = params.get("Type")
self.AllowEmpty = params.get("AllowEmpty")
self.Rules = params.get("Rules")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyLiveDomainRefererResponse(AbstractModel):
"""ModifyLiveDomainReferer返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLivePlayAuthKeyRequest(AbstractModel):
"""ModifyLivePlayAuthKey请求参数结构体
"""
def __init__(self):
r"""
:param DomainName: 播放域名。
:type DomainName: str
:param Enable: 是否启用,0:关闭,1:启用。
不传表示不修改当前值。
:type Enable: int
:param AuthKey: 鉴权key。
不传表示不修改当前值。
:type AuthKey: str
:param AuthDelta: 有效时间,单位:秒。
不传表示不修改当前值。
:type AuthDelta: int
:param AuthBackKey: 鉴权备用key。
不传表示不修改当前值。
:type AuthBackKey: str
"""
self.DomainName = None
self.Enable = None
self.AuthKey = None
self.AuthDelta = None
self.AuthBackKey = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.Enable = params.get("Enable")
self.AuthKey = params.get("AuthKey")
self.AuthDelta = params.get("AuthDelta")
self.AuthBackKey = params.get("AuthBackKey")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyLivePlayAuthKeyResponse(AbstractModel):
"""ModifyLivePlayAuthKey返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLivePlayDomainRequest(AbstractModel):
"""ModifyLivePlayDomain请求参数结构体
"""
def __init__(self):
r"""
:param DomainName: 播放域名。
:type DomainName: str
:param PlayType: 拉流域名类型。1-国内;2-全球;3-境外
:type PlayType: int
"""
self.DomainName = None
self.PlayType = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.PlayType = params.get("PlayType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyLivePlayDomainResponse(AbstractModel):
"""ModifyLivePlayDomain返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLivePullStreamTaskRequest(AbstractModel):
"""ModifyLivePullStreamTask请求参数结构体
"""
def __init__(self):
r"""
:param TaskId: 任务Id。
:type TaskId: str
:param Operator: 操作人姓名。
:type Operator: str
:param SourceUrls: 拉流源url列表。
SourceType为直播(PullLivePushLive)只可以填1个,
SourceType为点播(PullVodPushLive)可以填多个,上限30个。
:type SourceUrls: list of str
:param StartTime: 开始时间。
使用UTC格式时间,
例如:2019-01-08T10:00:00Z。
注意:北京时间值为 UTC 时间值 + 8 小时,格式按照 ISO 8601 标准表示,详见 [ISO 日期格式说明](https://cloud.tencent.com/document/product/266/11732#I)。
:type StartTime: str
:param EndTime: 结束时间,注意:
1. 结束时间必须大于开始时间;
2. 结束时间和开始时间必须大于当前时间;
3. 结束时间 和 开始时间 间隔必须小于七天。
使用UTC格式时间,
例如:2019-01-08T10:00:00Z。
注意:北京时间值为 UTC 时间值 + 8 小时,格式按照 ISO 8601 标准表示,详见 [ISO 日期格式说明](https://cloud.tencent.com/document/product/266/11732#I)。
:type EndTime: str
:param VodLoopTimes: 点播拉流转推循环次数。
-1:无限循环,直到任务结束。
0:不循环。
>0:具体循环次数。次数和时间以先结束的为准。
注意:拉流源为点播,该配置生效。
:type VodLoopTimes: int
:param VodRefreshType: 点播更新SourceUrls后的播放方式:
ImmediateNewSource:立即从更新的拉流源开始播放;
ContinueBreakPoint:从上次断流url源的断点处继续,结束后再使用新的拉流源。
注意:拉流源为点播,该配置生效。
:type VodRefreshType: str
:param Status: 任务状态:
enable - 启用,
pause - 暂停。
:type Status: str
:param CallbackEvents: 选择需要回调的事件(不填则回调全部):
TaskStart:任务启动回调,
TaskExit:任务停止回调,
VodSourceFileStart:从点播源文件开始拉流回调,
VodSourceFileFinish:从点播源文件拉流结束回调,
ResetTaskConfig:任务更新回调。
:type CallbackEvents: list of str
:param CallbackUrl: 自定义回调地址。
相关事件会回调到该地址。
:type CallbackUrl: str
:param FileIndex: 指定播放文件索引。
注意: 从1开始,不大于SourceUrls中文件个数。
:type FileIndex: int
:param OffsetTime: 指定播放文件偏移。
注意:
1. 单位:秒,配合FileIndex使用。
:type OffsetTime: int
:param Comment: 任务备注。
:type Comment: str
"""
self.TaskId = None
self.Operator = None
self.SourceUrls = None
self.StartTime = None
self.EndTime = None
self.VodLoopTimes = None
self.VodRefreshType = None
self.Status = None
self.CallbackEvents = None
self.CallbackUrl = None
self.FileIndex = None
self.OffsetTime = None
self.Comment = None
def _deserialize(self, params):
self.TaskId = params.get("TaskId")
self.Operator = params.get("Operator")
self.SourceUrls = params.get("SourceUrls")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.VodLoopTimes = params.get("VodLoopTimes")
self.VodRefreshType = params.get("VodRefreshType")
self.Status = params.get("Status")
self.CallbackEvents = params.get("CallbackEvents")
self.CallbackUrl = params.get("CallbackUrl")
self.FileIndex = params.get("FileIndex")
self.OffsetTime = params.get("OffsetTime")
self.Comment = params.get("Comment")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyLivePullStreamTaskResponse(AbstractModel):
"""ModifyLivePullStreamTask返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLivePushAuthKeyRequest(AbstractModel):
"""ModifyLivePushAuthKey请求参数结构体
"""
def __init__(self):
r"""
:param DomainName: 推流域名。
:type DomainName: str
:param Enable: 是否启用,0:关闭,1:启用。
不传表示不修改当前值。
:type Enable: int
:param MasterAuthKey: 主鉴权key。
不传表示不修改当前值。
:type MasterAuthKey: str
:param BackupAuthKey: 备鉴权key。
不传表示不修改当前值。
:type BackupAuthKey: str
:param AuthDelta: 有效时间,单位:秒。
:type AuthDelta: int
"""
self.DomainName = None
self.Enable = None
self.MasterAuthKey = None
self.BackupAuthKey = None
self.AuthDelta = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.Enable = params.get("Enable")
self.MasterAuthKey = params.get("MasterAuthKey")
self.BackupAuthKey = params.get("BackupAuthKey")
self.AuthDelta = params.get("AuthDelta")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyLivePushAuthKeyResponse(AbstractModel):
"""ModifyLivePushAuthKey返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLiveRecordTemplateRequest(AbstractModel):
"""ModifyLiveRecordTemplate请求参数结构体
"""
def __init__(self):
r"""
:param TemplateId: DescribeRecordTemplates接口获取到的模板 ID。
:type TemplateId: int
:param TemplateName: 模板名称。
:type TemplateName: str
:param Description: 描述信息。
:type Description: str
:param FlvParam: FLV 录制参数,开启 FLV 录制时设置。
:type FlvParam: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param HlsParam: HLS 录制参数,开启 HLS 录制时设置。
:type HlsParam: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param Mp4Param: MP4 录制参数,开启 MP4 录制时设置。
:type Mp4Param: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param AacParam: AAC 录制参数,开启 AAC 录制时设置。
:type AacParam: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param HlsSpecialParam: HLS 录制定制参数。
:type HlsSpecialParam: :class:`tencentcloud.live.v20180801.models.HlsSpecialParam`
:param Mp3Param: MP3 录制参数,开启 MP3 录制时设置。
:type Mp3Param: :class:`tencentcloud.live.v20180801.models.RecordParam`
"""
self.TemplateId = None
self.TemplateName = None
self.Description = None
self.FlvParam = None
self.HlsParam = None
self.Mp4Param = None
self.AacParam = None
self.HlsSpecialParam = None
self.Mp3Param = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.TemplateName = params.get("TemplateName")
self.Description = params.get("Description")
if params.get("FlvParam") is not None:
self.FlvParam = RecordParam()
self.FlvParam._deserialize(params.get("FlvParam"))
if params.get("HlsParam") is not None:
self.HlsParam = RecordParam()
self.HlsParam._deserialize(params.get("HlsParam"))
if params.get("Mp4Param") is not None:
self.Mp4Param = RecordParam()
self.Mp4Param._deserialize(params.get("Mp4Param"))
if params.get("AacParam") is not None:
self.AacParam = RecordParam()
self.AacParam._deserialize(params.get("AacParam"))
if params.get("HlsSpecialParam") is not None:
self.HlsSpecialParam = HlsSpecialParam()
self.HlsSpecialParam._deserialize(params.get("HlsSpecialParam"))
if params.get("Mp3Param") is not None:
self.Mp3Param = RecordParam()
self.Mp3Param._deserialize(params.get("Mp3Param"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyLiveRecordTemplateResponse(AbstractModel):
"""ModifyLiveRecordTemplate返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLiveSnapshotTemplateRequest(AbstractModel):
"""ModifyLiveSnapshotTemplate请求参数结构体
"""
def __init__(self):
r"""
:param TemplateId: 模板 ID。
:type TemplateId: int
:param TemplateName: 模板名称。
长度上限:255字节。
:type TemplateName: str
:param Description: 描述信息。
长度上限:1024字节。
:type Description: str
:param SnapshotInterval: 截图间隔,单位s,默认10s。
范围: 5s ~ 300s。
:type SnapshotInterval: int
:param Width: 截图宽度。默认:0(原始宽)。
:type Width: int
:param Height: 截图高度。默认:0(原始高)。
:type Height: int
:param PornFlag: 是否开启鉴黄,默认 0 。
0:不开启。
1:开启。
:type PornFlag: int
:param CosAppId: Cos 应用 ID。
:type CosAppId: int
:param CosBucket: Cos Bucket名称。
注:CosBucket参数值不能包含-[appid] 部分。
:type CosBucket: str
:param CosRegion: Cos 地域。
:type CosRegion: str
:param CosPrefix: Cos Bucket文件夹前缀。
:type CosPrefix: str
:param CosFileName: Cos 文件名称。
:type CosFileName: str
"""
self.TemplateId = None
self.TemplateName = None
self.Description = None
self.SnapshotInterval = None
self.Width = None
self.Height = None
self.PornFlag = None
self.CosAppId = None
self.CosBucket = None
self.CosRegion = None
self.CosPrefix = None
self.CosFileName = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.TemplateName = params.get("TemplateName")
self.Description = params.get("Description")
self.SnapshotInterval = params.get("SnapshotInterval")
self.Width = params.get("Width")
self.Height = params.get("Height")
self.PornFlag = params.get("PornFlag")
self.CosAppId = params.get("CosAppId")
self.CosBucket = params.get("CosBucket")
self.CosRegion = params.get("CosRegion")
self.CosPrefix = params.get("CosPrefix")
self.CosFileName = params.get("CosFileName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyLiveSnapshotTemplateResponse(AbstractModel):
"""ModifyLiveSnapshotTemplate返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLiveTranscodeTemplateRequest(AbstractModel):
"""ModifyLiveTranscodeTemplate请求参数结构体
"""
def __init__(self):
r"""
:param TemplateId: 模板 Id。
:type TemplateId: int
:param Vcodec: 视频编码:h264/h265/origin,默认origin。
origin: 保持原始编码格式
:type Vcodec: str
:param Acodec: 音频编码:aac,默认aac。
注意:当前该参数未生效,待后续支持!
:type Acodec: str
:param AudioBitrate: 音频码率,默认0。
范围:0-500。
:type AudioBitrate: int
:param Description: 模板描述。
:type Description: str
:param VideoBitrate: 视频码率。范围:0kbps - 8000kbps。
0为保持原始码率。
注: 转码模板有码率唯一要求,最终保存的码率可能与输入码率有所差别。
:type VideoBitrate: int
:param Width: 宽。0-3000。
数值必须是2的倍数,0是原始宽度
:type Width: int
:param NeedVideo: 是否保留视频,0:否,1:是。默认1。
:type NeedVideo: int
:param NeedAudio: 是否保留音频,0:否,1:是。默认1。
:type NeedAudio: int
| |
<gh_stars>1-10
"""
Utilities for analyzing and plotting length distributions for line data.
"""
import logging
from dataclasses import dataclass
from enum import Enum, unique
from functools import lru_cache
from itertools import chain, cycle
from textwrap import wrap
from typing import Callable, Dict, List, Optional, Tuple
import numpy as np
import pandas as pd
import powerlaw
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from sklearn.linear_model import LinearRegression
ALPHA = "alpha"
EXPONENT = "exponent"
CUT_OFF = "cut-off"
KOLM_DIST = "Kolmogorov-Smirnov distance D"
SIGMA = "sigma"
MU = "mu"
LAMBDA = "lambda"
LOGLIKELIHOOD = "loglikelihood"
@dataclass
class LengthDistribution:
"""
Dataclass for length distributions.
"""
name: str
lengths: np.ndarray
area_value: float
@unique
class Dist(Enum):
"""
Enums of powerlaw model types.
"""
POWERLAW = "power_law"
LOGNORMAL = "lognormal"
EXPONENTIAL = "exponential"
TRUNCATED_POWERLAW = "truncated_power_law"
def numpy_polyfit(log_lengths: np.ndarray, log_ccm: np.ndarray) -> Tuple[float, float]:
"""
Fit numpy polyfit to data.
"""
vals = np.polyfit(log_lengths, log_ccm, 1)
assert len(vals) == 2
return vals
def scikit_linear_regression(
log_lengths: np.ndarray, log_ccm: np.ndarray
) -> Tuple[float, float]:
"""
Fit using scikit LinearRegression.
"""
model = LinearRegression().fit(log_lengths.reshape((-1, 1)), log_ccm)
coefs = model.coef_
intercept = model.intercept_
assert len(coefs) == 1
assert isinstance(intercept, float)
m_value = coefs[0]
assert isinstance(m_value, float)
return m_value, intercept
@dataclass
class MultiLengthDistribution:
"""
Multi-scale length distribution.
"""
distributions: List[LengthDistribution]
cut_distributions: bool
using_branches: bool
fitter: Callable[[np.ndarray, np.ndarray], Tuple[float, float]] = numpy_polyfit
def __hash__(self) -> int:
"""
Implement hashing for MultiLengthDistribution.
"""
all_lengths = tuple(chain(*[ld.lengths for ld in self.distributions]))
all_lengths_str = tuple(map(str, all_lengths))
all_area_values_str = tuple(map(str, [ld.name for ld in self.distributions]))
return hash(
(
all_lengths_str,
self.cut_distributions,
self.using_branches,
all_area_values_str,
)
)
@lru_cache(maxsize=None)
def create_normalized_distributions(
self,
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
"""
Create normalized ccm of distributions.
"""
return create_normalized_distributions(
distributions=self.distributions, cut_distributions=self.cut_distributions
)
@property
def truncated_length_array_all(self) -> List[np.ndarray]:
"""
Get truncated length array by cut-off.
"""
return self.create_normalized_distributions()[0]
@property
def ccm_array_normed_all(self) -> List[np.ndarray]:
"""
Get truncated ccm array by cut-off.
"""
return self.create_normalized_distributions()[1]
@property
def concatted_lengths(self) -> np.ndarray:
"""
Concat lengths into single array.
"""
return np.concatenate(self.truncated_length_array_all)
@property
def concatted_ccm(self) -> np.ndarray:
"""
Concat ccm into single array.
"""
return np.concatenate(self.ccm_array_normed_all)
@lru_cache(maxsize=None)
def fit_to_multi_scale_lengths(self) -> Tuple[np.ndarray, float, float]:
"""
Fit np.polyfit to multi-scale lengths.
"""
return fit_to_multi_scale_lengths(
lengths=self.concatted_lengths, ccm=self.concatted_ccm, fitter=self.fitter
)
@property
def fitted_y_values(self) -> np.ndarray:
"""
Get fitted y values.
"""
return self.fit_to_multi_scale_lengths()[0]
@property
def m_value(self) -> float:
"""
Get fitted m value.
"""
return self.fit_to_multi_scale_lengths()[1]
@property
def constant(self) -> float:
"""
Get fitted constant value.
"""
return self.fit_to_multi_scale_lengths()[2]
@property
def names(self) -> List[str]:
"""
Get length distribution names.
"""
return [ld.name for ld in self.distributions]
def plot_multi_length_distributions(self) -> Tuple[Figure, Axes]:
"""
Plot multi-scale length distribution.
"""
return plot_multi_distributions_and_fit(
truncated_length_array_all=self.truncated_length_array_all,
concatted_lengths=self.concatted_lengths,
ccm_array_normed_all=self.ccm_array_normed_all,
names=self.names,
y_fit=self.fitted_y_values,
m_value=self.m_value,
using_branches=self.using_branches,
)
def determine_fit(
length_array: np.ndarray, cut_off: Optional[float] = None
) -> powerlaw.Fit:
"""
Determine powerlaw (along other) length distribution fits for given data.
"""
fit = (
powerlaw.Fit(length_array, xmin=cut_off, verbose=False)
if cut_off is not None
else powerlaw.Fit(length_array, verbose=False)
)
return fit
def plot_length_data_on_ax(
ax: Axes,
length_array: np.ndarray,
ccm_array: np.ndarray,
label: str,
):
"""
Plot length data on given ax.
Sets ax scales to logarithmic.
"""
ax.scatter(
x=length_array,
y=ccm_array,
s=50,
label=label,
)
ax.set_xscale("log")
ax.set_yscale("log")
def plot_fit_on_ax(
ax: Axes,
fit: powerlaw.Fit,
fit_distribution: Dist,
) -> None:
"""
Plot powerlaw model to ax.
"""
if fit_distribution == Dist.POWERLAW:
fit.power_law.plot_ccdf(ax=ax, label="Powerlaw", linestyle="--", color="red")
elif fit_distribution == Dist.LOGNORMAL:
fit.lognormal.plot_ccdf(ax=ax, label="Lognormal", linestyle="--", color="lime")
elif fit_distribution == Dist.EXPONENTIAL:
fit.exponential.plot_ccdf(
ax=ax, label="Exponential", linestyle="--", color="blue"
)
else:
raise ValueError(f"Expected fit_distribution to be one of {list(Dist)}")
def _setup_length_plot_axlims(
ax: Axes,
length_array: np.ndarray,
ccm_array: np.ndarray,
# cut_off: float,
):
"""
Set ax limits for length plotting.
"""
# truncated_length_array = (
# length_array[length_array > cut_off] if cut_off is not None else length_array
# )
left = length_array.min() / 10
right = length_array.max() * 10
bottom = ccm_array.min() / 10
top = ccm_array.max() * 10
try:
ax.set_xlim(left, right)
ax.set_ylim(bottom, top)
except ValueError:
# Don't try setting if it errors
pass
def plot_distribution_fits(
length_array: np.ndarray,
label: str,
cut_off: Optional[float] = None,
fit: Optional[powerlaw.Fit] = None,
) -> Tuple[powerlaw.Fit, Figure, Axes]:
"""
Plot length distribution and `powerlaw` fits.
If a powerlaw.Fit is not given it will be automatically determined (using
the optionally given cut_off).
"""
if fit is None:
# Determine powerlaw, exponential, lognormal fits
fit = determine_fit(length_array, cut_off)
# Create figure, ax
fig, ax = plt.subplots(figsize=(7, 7))
# Get the x, y data from fit
truncated_length_array, ccm_array = fit.ccdf()
# Plot length scatter plot
plot_length_data_on_ax(ax, truncated_length_array, ccm_array, label)
# Plot the actual fits (powerlaw, exp...)
for fit_distribution in (Dist.EXPONENTIAL, Dist.LOGNORMAL, Dist.POWERLAW):
plot_fit_on_ax(ax, fit, fit_distribution)
# Setup of ax appearance and axlims
setup_ax_for_ld(ax, using_branches=False)
_setup_length_plot_axlims(
ax=ax,
length_array=truncated_length_array,
ccm_array=ccm_array,
)
return fit, fig, ax
def setup_ax_for_ld(ax_for_setup, using_branches, indiv_fit=False):
"""
Configure ax for length distribution plots.
:param ax_for_setup: Ax to setup.
:type ax_for_setup: Axes
:param using_branches: Are the lines branches or traces.
:type using_branches: bool
"""
#
ax = ax_for_setup
# LABELS
label = "Branch length $(m)$" if using_branches else "Trace Length $(m)$"
ax.set_xlabel(
label,
fontsize="xx-large",
fontfamily="DejaVu Sans",
style="italic",
labelpad=16,
)
# Individual powerlaw fits are not normalized to area because they aren't
# multiscale
ccm_unit = r"$(\frac{1}{m^2})$" if not indiv_fit else ""
ax.set_ylabel(
"Complementary Cumulative Number " + ccm_unit,
fontsize="xx-large",
fontfamily="DejaVu Sans",
style="italic",
)
# TICKS
plt.xticks(color="black", fontsize="x-large")
plt.yticks(color="black", fontsize="x-large")
plt.tick_params(axis="both", width=1.2)
# LEGEND
handles, labels = ax.get_legend_handles_labels()
labels = ["\n".join(wrap(label, 13)) for label in labels]
lgnd = plt.legend(
handles,
labels,
loc="upper center",
bbox_to_anchor=(1.37, 1.02),
ncol=2,
columnspacing=0.3,
shadow=True,
prop={"family": "DejaVu Sans", "weight": "heavy", "size": "large"},
)
for lh in lgnd.legendHandles:
# lh._sizes = [750]
lh.set_linewidth(3)
ax.grid(zorder=-10, color="black", alpha=0.5)
def distribution_compare_dict(fit: powerlaw.Fit) -> Dict[str, float]:
"""
Compose a dict of length distribution fit comparisons.
"""
compare_dict = dict()
for dist_enum_pairs in [
(Dist.POWERLAW, Dist.LOGNORMAL),
(Dist.POWERLAW, Dist.EXPONENTIAL),
(Dist.LOGNORMAL, Dist.EXPONENTIAL),
(Dist.POWERLAW, Dist.TRUNCATED_POWERLAW),
]:
first, second = dist_enum_pairs[0].value, dist_enum_pairs[1].value
r, p = fit.distribution_compare(first, second, normalized_ratio=True)
compare_dict[f"{first} vs. {second} R"] = r
compare_dict[f"{first} vs. {second} p"] = p
return compare_dict
def all_fit_attributes_dict(fit: powerlaw.Fit) -> Dict[str, float]:
"""
Collect 'all' fit attributes into a dict.
"""
return {
# **describe_powerlaw_fit(fit),
# Attributes for remaking fits
Dist.LOGNORMAL.value + " " + SIGMA: fit.lognormal.sigma,
Dist.LOGNORMAL.value + " " + MU: fit.lognormal.mu,
Dist.EXPONENTIAL.value + " " + LAMBDA: fit.exponential.Lambda,
Dist.TRUNCATED_POWERLAW.value + " " + LAMBDA: fit.truncated_power_law.Lambda,
Dist.TRUNCATED_POWERLAW.value + " " + ALPHA: fit.truncated_power_law.alpha,
Dist.TRUNCATED_POWERLAW.value
+ " "
+ EXPONENT: -(fit.truncated_power_law.alpha - 1),
# Fit statistics
Dist.LOGNORMAL.value + " " + LOGLIKELIHOOD: fit.lognormal.loglikelihood,
Dist.EXPONENTIAL.value + " " + LOGLIKELIHOOD: fit.exponential.loglikelihood,
Dist.TRUNCATED_POWERLAW.value
+ " "
+ LOGLIKELIHOOD: fit.truncated_power_law.loglikelihood,
}
def describe_powerlaw_fit(
fit: powerlaw.Fit, label: Optional[str] = None
) -> Dict[str, float]:
"""
Compose dict of fit powerlaw attributes and comparisons between fits.
"""
base = {
**distribution_compare_dict(fit),
Dist.POWERLAW.value + " " + KOLM_DIST: fit.power_law.D,
Dist.EXPONENTIAL.value + " " + KOLM_DIST: fit.exponential.D,
Dist.LOGNORMAL.value + " " + KOLM_DIST: fit.lognormal.D,
Dist.TRUNCATED_POWERLAW.value + " " + KOLM_DIST: fit.truncated_power_law.D,
Dist.POWERLAW.value + " " + ALPHA: fit.alpha,
Dist.POWERLAW.value + " " + EXPONENT: -(fit.alpha - 1),
Dist.POWERLAW.value + " " + CUT_OFF: fit.xmin,
Dist.POWERLAW.value + " " + SIGMA: fit.power_law.sigma,
**all_fit_attributes_dict(fit),
}
if label is None:
return base
return {f"{label} {key}": value for key, value in base.items()}
def sort_and_log_lengths_and_ccm(
lengths: np.ndarray, ccm: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""
Preprocess lengths and ccm.
Sorts them and calculates their natural logarithmic.
"""
# Find sorted order of lengths
length_sort_permutation = lengths.argsort()
# Apply sorted order of lengths to both lengths and ccms
log_lengths_sorted = np.log(lengths[length_sort_permutation])
log_ccm_sorted = np.log(ccm[length_sort_permutation])
return log_lengths_sorted, log_ccm_sorted
def calculate_fitted_values(
log_lengths: np.ndarray, m_value: float, constant: float
) -> np.ndarray:
"""
Calculate fitted values of y.
"""
# Calculate the fitted values of y
y_fit = np.exp(m_value * log_lengths + constant)
return y_fit
def fit_to_multi_scale_lengths(
ccm: np.ndarray,
lengths: np.ndarray,
fitter: Callable[[np.ndarray, np.ndarray], Tuple[float, float]] = numpy_polyfit,
) -> Tuple[np.ndarray, float, float]:
"""
Fit np.polyfit to multiscale length distributions.
Returns the fitted values, exponent and constant of fit.
"""
log_lengths_sorted, log_ccm_sorted = sort_and_log_lengths_and_ccm(
lengths=lengths, ccm=ccm
)
# Fit numpy polyfit to data
fit_vals = fitter(log_lengths_sorted, log_ccm_sorted)
assert len(fit_vals) == 2
m_value, constant = fit_vals
logging.info(
"Fitted with fitter.",
extra=dict(fitter=fitter, m_value=m_value, constant=constant),
)
# Calculate the fitted values of y
y_fit = calculate_fitted_values(
log_lengths=log_lengths_sorted,
m_value=m_value,
constant=constant,
)
return y_fit, m_value, constant
# def multi_scale_length_distribution_fit(
# distributions: List[LengthDistribution],
# auto_cut_off: bool,
# | |
# -*- test-case-name: twisted.internet.test -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module provides support for Twisted to interact with the glib/gtk2
mainloop.
In order to use this support, simply do the following::
| from twisted.internet import gtk2reactor
| gtk2reactor.install()
Then use twisted.internet APIs as usual. The other methods here are not
intended to be called directly.
When installing the reactor, you can choose whether to use the glib
event loop or the GTK+ event loop which is based on it but adds GUI
integration.
"""
# System Imports
import sys, signal
from zope.interface import implements
try:
if not hasattr(sys, 'frozen'):
# Don't want to check this for py2exe
import pygtk
pygtk.require('2.0')
except (ImportError, AttributeError):
pass # maybe we're using pygtk before this hack existed.
import gobject
if hasattr(gobject, "threads_init"):
# recent versions of python-gtk expose this. python-gtk=2.4.1
# (wrapping glib-2.4.7) does. python-gtk=2.0.0 (wrapping
# glib-2.2.3) does not.
gobject.threads_init()
# Twisted Imports
from twisted.python import log, runtime, failure
from twisted.python.compat import set
from twisted.internet.interfaces import IReactorFDSet
from twisted.internet import main, base, posixbase, error, selectreactor
POLL_DISCONNECTED = gobject.IO_HUP | gobject.IO_ERR | gobject.IO_NVAL
# glib's iochannel sources won't tell us about any events that we haven't
# asked for, even if those events aren't sensible inputs to the poll()
# call.
INFLAGS = gobject.IO_IN | POLL_DISCONNECTED
OUTFLAGS = gobject.IO_OUT | POLL_DISCONNECTED
def _our_mainquit():
# XXX: gtk.main_quit() (which is used for crash()) raises an exception if
# gtk.main_level() == 0; however, all the tests freeze if we use this
# function to stop the reactor. what gives? (I believe this may have been
# a stupid mistake where I forgot to import gtk here... I will remove this
# comment if the tests pass)
import gtk
if gtk.main_level():
gtk.main_quit()
class Gtk2Reactor(posixbase.PosixReactorBase):
"""
GTK+-2 event loop reactor.
@ivar _sources: A dictionary mapping L{FileDescriptor} instances to gtk
watch handles.
@ivar _reads: A set of L{FileDescriptor} instances currently monitored for
reading.
@ivar _writes: A set of L{FileDescriptor} instances currently monitored for
writing.
@ivar _simtag: A gtk timeout handle for the next L{simulate} call.
"""
implements(IReactorFDSet)
def __init__(self, useGtk=True):
self._simtag = None
self._reads = set()
self._writes = set()
self._sources = {}
posixbase.PosixReactorBase.__init__(self)
# pre 2.3.91 the glib iteration and mainloop functions didn't release
# global interpreter lock, thus breaking thread and signal support.
if getattr(gobject, "pygtk_version", ()) >= (2, 3, 91) and not useGtk:
self.context = gobject.main_context_default()
self.__pending = self.context.pending
self.__iteration = self.context.iteration
self.loop = gobject.MainLoop()
self.__crash = self.loop.quit
self.__run = self.loop.run
else:
import gtk
self.__pending = gtk.events_pending
self.__iteration = gtk.main_iteration
self.__crash = _our_mainquit
self.__run = gtk.main
if runtime.platformType == 'posix':
def _handleSignals(self):
# Let the base class do its thing, but pygtk is probably
# going to stomp on us so go beyond that and set up some
# signal handling which pygtk won't mess with. This would
# be better done by letting this reactor select a
# different implementation of installHandler for
# _SIGCHLDWaker to use. Then, at least, we could fall
# back to our extension module. See #4286.
from twisted.internet.process import reapAllProcesses as _reapAllProcesses
base._SignalReactorMixin._handleSignals(self)
signal.signal(signal.SIGCHLD, lambda *a: self.callFromThread(_reapAllProcesses))
if getattr(signal, "siginterrupt", None) is not None:
signal.siginterrupt(signal.SIGCHLD, False)
# Like the base, reap processes now in case a process
# exited before the handlers above were installed.
_reapAllProcesses()
# The input_add function in pygtk1 checks for objects with a
# 'fileno' method and, if present, uses the result of that method
# as the input source. The pygtk2 input_add does not do this. The
# function below replicates the pygtk1 functionality.
# In addition, pygtk maps gtk.input_add to _gobject.io_add_watch, and
# g_io_add_watch() takes different condition bitfields than
# gtk_input_add(). We use g_io_add_watch() here in case pygtk fixes this
# bug.
def input_add(self, source, condition, callback):
if hasattr(source, 'fileno'):
# handle python objects
def wrapper(source, condition, real_s=source, real_cb=callback):
return real_cb(real_s, condition)
return gobject.io_add_watch(source.fileno(), condition, wrapper)
else:
return gobject.io_add_watch(source, condition, callback)
def _add(self, source, primary, other, primaryFlag, otherFlag):
"""
Add the given L{FileDescriptor} for monitoring either for reading or
writing. If the file is already monitored for the other operation, we
delete the previous registration and re-register it for both reading
and writing.
"""
if source in primary:
return
flags = primaryFlag
if source in other:
gobject.source_remove(self._sources[source])
flags |= otherFlag
self._sources[source] = self.input_add(source, flags, self.callback)
primary.add(source)
def addReader(self, reader):
"""
Add a L{FileDescriptor} for monitoring of data available to read.
"""
self._add(reader, self._reads, self._writes, INFLAGS, OUTFLAGS)
def addWriter(self, writer):
"""
Add a L{FileDescriptor} for monitoring ability to write data.
"""
self._add(writer, self._writes, self._reads, OUTFLAGS, INFLAGS)
def getReaders(self):
"""
Retrieve the list of current L{FileDescriptor} monitored for reading.
"""
return list(self._reads)
def getWriters(self):
"""
Retrieve the list of current L{FileDescriptor} monitored for writing.
"""
return list(self._writes)
def removeAll(self):
"""
Remove monitoring for all registered L{FileDescriptor}s.
"""
return self._removeAll(self._reads, self._writes)
def _remove(self, source, primary, other, flags):
"""
Remove monitoring the given L{FileDescriptor} for either reading or
writing. If it's still monitored for the other operation, we
re-register the L{FileDescriptor} for only that operation.
"""
if source not in primary:
return
gobject.source_remove(self._sources[source])
primary.remove(source)
if source in other:
self._sources[source] = self.input_add(
source, flags, self.callback)
else:
self._sources.pop(source)
def removeReader(self, reader):
"""
Stop monitoring the given L{FileDescriptor} for reading.
"""
self._remove(reader, self._reads, self._writes, OUTFLAGS)
def removeWriter(self, writer):
"""
Stop monitoring the given L{FileDescriptor} for writing.
"""
self._remove(writer, self._writes, self._reads, INFLAGS)
doIterationTimer = None
def doIterationTimeout(self, *args):
self.doIterationTimer = None
return 0 # auto-remove
def doIteration(self, delay):
# flush some pending events, return if there was something to do
# don't use the usual "while self.context.pending(): self.context.iteration()"
# idiom because lots of IO (in particular test_tcp's
# ProperlyCloseFilesTestCase) can keep us from ever exiting.
log.msg(channel='system', event='iteration', reactor=self)
if self.__pending():
self.__iteration(0)
return
# nothing to do, must delay
if delay == 0:
return # shouldn't delay, so just return
self.doIterationTimer = gobject.timeout_add(int(delay * 1000),
self.doIterationTimeout)
# This will either wake up from IO or from a timeout.
self.__iteration(1) # block
# note: with the .simulate timer below, delays > 0.1 will always be
# woken up by the .simulate timer
if self.doIterationTimer:
# if woken by IO, need to cancel the timer
gobject.source_remove(self.doIterationTimer)
self.doIterationTimer = None
def crash(self):
posixbase.PosixReactorBase.crash(self)
self.__crash()
def run(self, installSignalHandlers=1):
self.startRunning(installSignalHandlers=installSignalHandlers)
gobject.timeout_add(0, self.simulate)
if self._started:
self.__run()
def _doReadOrWrite(self, source, condition, faildict={
error.ConnectionDone: failure.Failure(error.ConnectionDone()),
error.ConnectionLost: failure.Failure(error.ConnectionLost()),
}):
why = None
inRead = False
if condition & POLL_DISCONNECTED and not (condition & gobject.IO_IN):
if source in self._reads:
why = main.CONNECTION_DONE
inRead = True
else:
why = main.CONNECTION_LOST
else:
try:
if condition & gobject.IO_IN:
why = source.doRead()
inRead = True
if not why and condition & gobject.IO_OUT:
# if doRead caused connectionLost, don't call doWrite
# if doRead is doWrite, don't call it again.
if not source.disconnected:
why = source.doWrite()
except:
why = sys.exc_info()[1]
log.msg('Error In %s' % source)
log.deferr()
if why:
self._disconnectSelectable(source, why, inRead)
def callback(self, source, condition):
log.callWithLogger(source, self._doReadOrWrite, source, condition)
self.simulate() # fire Twisted timers
return 1 # 1=don't auto-remove the source
def simulate(self):
"""
Run simulation loops and reschedule callbacks.
"""
if self._simtag is not None:
gobject.source_remove(self._simtag)
self.runUntilCurrent()
timeout = min(self.timeout(), 0.1)
if timeout is None:
timeout = 0.1
# grumble
self._simtag = gobject.timeout_add(int(timeout * 1010), self.simulate)
class PortableGtkReactor(selectreactor.SelectReactor):
"""
Reactor that works on Windows.
Sockets aren't supported by GTK+'s input_add on Win32.
"""
_simtag = None
def crash(self):
selectreactor.SelectReactor.crash(self)
import gtk
# mainquit is deprecated in newer versions
if gtk.main_level():
if hasattr(gtk, 'main_quit'):
gtk.main_quit()
else:
gtk.mainquit()
def run(self, installSignalHandlers=1):
import gtk
self.startRunning(installSignalHandlers=installSignalHandlers)
gobject.timeout_add(0, self.simulate)
# mainloop is deprecated in newer versions
if hasattr(gtk, 'main'):
gtk.main()
else:
gtk.mainloop()
def simulate(self):
"""
Run simulation loops and reschedule callbacks.
"""
if self._simtag is not None:
gobject.source_remove(self._simtag)
self.iterate()
timeout = min(self.timeout(), 0.1)
if timeout is None:
timeout = 0.1
# grumble
self._simtag = gobject.timeout_add(int(timeout * 1010), self.simulate)
def install(useGtk=True):
"""
Configure the twisted mainloop to be run inside the gtk mainloop.
@param useGtk: should glib rather than GTK+ event loop be
used (this will be slightly faster but does not support GUI).
"""
reactor = Gtk2Reactor(useGtk)
from twisted.internet.main import | |
# -*- coding: utf-8 -*-
"""
Conditional logit
Sources: sandbox-statsmodels:runmnl.py
General References
--------------------
<NAME>. `Econometric Analysis`. Prentice Hall, 5th. edition. 2003.
<NAME>. `Discrete Choice Methods with Simulation`.
Cambridge University Press. 2003
--------------------
"""
import numpy as np
import pandas as pd
from statsmodels.base.model import (LikelihoodModel,
LikelihoodModelResults, ResultMixin)
import statsmodels.api as sm
import time
from collections import OrderedDict
from scipy import stats
from statsmodels.tools.decorators import (resettable_cache,
cache_readonly)
# TODO: public/private method
class CLogit(LikelihoodModel):
__doc__ = """
Conditional Logit
Parameters
----------
endog_data : array
dummy encoding of realized choices.
exog_data : array (nobs, k*)
array with explanatory variables.Variables for the model are select
by V, so, k* can be >= than k. An intercept is not included by
default and should be added by the user.
V: dict
a dictionary with the names of the explanatory variables for the
utility function for each alternative.
Alternative specific variables (common coefficients) have to be first.
For specific variables (various coefficients), choose an alternative and
drop all specific variables on it.
ncommon : int
number of explanatory variables with common coefficients.
ref_level : str
Name of the key for the alternative of reference.
name_intercept : str
name of the column with the intercept. 'None' if an intercept is not
included.
Attributes
----------
endog : array (nobs*J, )
the endogenous response variable
endog_bychoices: array (nobs,J)
the endogenous response variable by choices
exog_matrix: array (nobs*J,K)
the enxogenous response variables
exog_bychoices: list of arrays J * (nobs,K)
the enxogenous response variables by choices. one array of exog
for each choice.
nobs : float
number of observations.
J : float
The number of choices for the endogenous variable. Note that this
is zero-indexed.
K : float
The actual number of parameters for the exogenous design. Includes
the constant if the design has one and excludes the constant of one
choice which should be dropped for identification.
loglikeobs
params
score
jac
hessian
information
predict
residuals
resid_misclassified
pred_table
summary : Summary instance
summarize the results inside CLogitResults class.
Notes
-----
Utility for choice j is given by
$V_j = X_j * beta + Z * gamma_j$
where X_j contains generic variables (terminology Hess) that have the same
coefficient across choices, and Z are variables, like individual-specific
variables that have different coefficients across variables.
If there are choice specific constants, then they should be contained in Z.
For identification, the constant of one choice should be dropped.
"""
def __init__(self, endog_data, exog_data, V, ncommon, ref_level,
name_intercept = None, **kwds):
self.endog_data = endog_data
self.exog_data = exog_data
self.V = V
self.ncommon = ncommon
self.ref_level = ref_level
if name_intercept == None:
self.exog_data['Intercept'] = 1
self.name_intercept = 'Intercept'
else:
self.name_intercept = name_intercept
self._initialize()
super(CLogit, self).__init__(endog = endog_data,
exog = self.exog_matrix, **kwds)
def _initialize(self):
"""
Preprocesses the data for Clogit
"""
self.J = len(self.V)
self.nobs = self.endog_data.shape[0] / self.J
# Endog_bychoices
self.endog_bychoices = self.endog_data.values.reshape(-1, self.J)
# Exog_bychoices
exog_bychoices = []
exog_bychoices_names = []
choice_index = np.array(self.V.keys() * self.nobs)
for key in iter(self.V):
(exog_bychoices.append(self.exog_data[self.V[key]]
[choice_index == key]
.values.reshape(self.nobs, -1)))
for key in self.V:
exog_bychoices_names.append(self.V[key])
self.exog_bychoices = exog_bychoices
# Betas
beta_not_common = ([len(exog_bychoices_names[ii]) - self.ncommon
for ii in range(self.J)])
exog_names_prueba = []
for ii, key in enumerate(self.V):
exog_names_prueba.append(key * beta_not_common[ii])
zi = np.r_[[self.ncommon], self.ncommon + np.array(beta_not_common)\
.cumsum()]
z = np.arange(max(zi))
beta_ind = [np.r_[np.arange(self.ncommon), z[zi[ii]:zi[ii + 1]]]
for ii in range(len(zi) - 1)] # index of betas
self.beta_ind = beta_ind
beta_ind_str = ([map(str, beta_ind[ii]) for ii in range(self.J)])
beta_ind_J = ([map(str, beta_ind[ii]) for ii in range(self.J)])
for ii in range(self.J):
for jj, item in enumerate(beta_ind[ii]):
if item in np.arange(self.ncommon):
beta_ind_J[ii][jj] = ''
else:
beta_ind_J[ii][jj] = ' (' + self.V.keys()[ii] + ')'
self.betas = OrderedDict()
for sublist in range(self.J):
aa = []
for ii in range(len(exog_bychoices_names[sublist])):
aa.append(
beta_ind_str[sublist][ii] + ' ' +
exog_bychoices_names[sublist][ii]
+ beta_ind_J[sublist][ii])
self.betas[sublist] = aa
# Exog
pieces = []
for ii in range(self.J):
pieces.append(pd.DataFrame(exog_bychoices[ii], columns=self.betas[ii]))
self.exog_matrix_all = (pd.concat(pieces, axis = 0, keys = self.V.keys(),
names = ['choice', 'nobs'])
.fillna(value = 0).sortlevel(1).reset_index())
self.exog_matrix = self.exog_matrix_all.iloc[:, 2:]
self.K = len(self.exog_matrix.columns)
self.df_model = self.K
self.df_resid = int(self.nobs - self.K)
def xbetas(self, params):
'''the Utilities V_i
'''
res = np.empty((self.nobs, self.J))
for ii in range(self.J):
res[:, ii] = np.dot(self.exog_bychoices[ii],
params[self.beta_ind[ii]])
return res
def cdf(self, item):
"""
Conditional Logit cumulative distribution function.
Parameters
----------
X : array (nobs,K)
the linear predictor of the model.
Returns
--------
cdf : ndarray
the cdf evaluated at `X`.
Notes
-----
.. math:: \\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right){\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}
"""
eXB = np.exp(item)
return eXB / eXB.sum(1)[:, None]
def pdf(self, item):
"""
Conditional Logit probability density function.
"""
raise NotImplementedError
def loglike(self, params):
"""
Log-likelihood of the conditional logit model.
Parameters
----------
params : array
the parameters of the conditional logit model.
Returns
-------
loglike : float
the log-likelihood function of the model evaluated at `params`.
Notes
------
.. math:: \\ln L=\\sum_{i=1}^{n}\\sum_{j=0}^{J}d_{ij}\\ln\\left(\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)
where :math:`d_{ij}=1` if individual `i` chose alternative `j` and 0
if not.
"""
xb = self.xbetas(params)
loglike = (self.endog_bychoices * np.log(self.cdf(xb))).sum(1)
return loglike.sum()
def loglikeobs(self, params):
"""
Log-likelihood for each observation.
Parameters
----------
params : array-like
The parameters of the model.
Returns
-------
loglike : ndarray (nobs,K)
The log likelihood for each observation of the model evaluated
at `params`. See Notes
Notes
------
.. math:: \\ln L_{i}=\\sum_{j=0}^{J}d_{ij}\\ln\\left(\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)
for observations :math:`i=1,...,n`
where :math:`d_{ij}=1` if individual `i` chose alternative `j` and 0
if not.
"""
xb = self.xbetas(params)
return (self.endog_bychoices * np.log(self.cdf(xb))).sum(1)
def score(self, params):
"""
Score/gradient matrix for conditional logit model log-likelihood
Parameters
----------
params : array
the parameters of the conditional logit model.
Returns
--------
score : ndarray 1d (K)
the score vector of the model evaluated at `params`.
Notes
-----
It is the first derivative of the loglikelihood function of the
conditional logit model evaluated at `params`.
.. math:: \\frac{\\partial\\ln L}{\\partial\\beta_{j}}=\\sum_{i}\\left(d_{ij}-\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)x_{i}
for :math:`j=1,...,J`
"""
firstterm = (self.endog_bychoices - self.cdf(self.xbetas(params)))\
.reshape(-1, 1)
return np.dot(firstterm.T, self.exog).flatten()
def jac(self, params):
"""
Jacobian matrix for conditional logit model log-likelihood.
Parameters
----------
params : array
the parameters of the conditional logit model.
Returns
--------
jac : ndarray, (nobs, K)
the jacobian for each observation.
Notes
-----
It is the first derivative of the loglikelihood function of the
conditional logit model for each observation evaluated at `params`.
.. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta_{j}}=\\left(d_{ij}-\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)x_{i}
for :math:`j=1,...,J`, for observations :math:`i=1,...,n`
"""
firsterm = (self.endog_bychoices - self.cdf(self.xbetas(params)))\
.reshape(-1, 1)
return (firsterm * self.exog)
def hessian(self, params):
"""
Conditional logit Hessian matrix of the log-likelihood
Parameters
-----------
params : array-like
The parameters of the model
Returns
-------
hess : ndarray, (K, K)
The Hessian
Notes
-----
It is the second derivative with respect to the flattened parameters
of the loglikelihood function of the conditional logit model
evaluated at `params`.
.. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta_{j}\\partial\\beta_{l}}=-\\sum_{i=1}^{n}\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\left[\\boldsymbol{1}\\left(j=l\\right)-\\frac{\\exp\\left(\\beta_{l}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right]x_{i}x_{l}^{\\prime}
where
:math:`\\boldsymbol{1}\\left(j=l\\right)` equals 1 if `j` = `l` and 0
otherwise.
"""
# TODO: analytical derivatives
from statsmodels.tools.numdiff import approx_hess
# need options for hess (epsilon)
return approx_hess(params, self.loglike)
def information(self, params):
"""
Fisher information matrix of model
Returns -Hessian of loglike evaluated at params.
"""
raise NotImplementedError
def fit(self, start_params=None, maxiter=10000, maxfun=5000,
method="newton", full_output=1, disp=None, callback=None, **kwds):
"""
Fits CLogit() model using maximum likelihood.
In a model linear the log-likelihood function of the sample, is
global concave for β parameters, which facilitates its numerical
maximization (McFadden, 1973).
Fixed Method = Newton, because it'll find the maximum in a few
iterations. Newton method require a likelihood function, a gradient,
and a Hessian. Since analytical solutions are known, we give it.
Initial parameters estimates from the standard logit
Returns
-------
Fit object for likelihood based models
See: GenericLikelihoodModelResults
"""
if start_params is None:
Logit_res = sm.Logit(self.endog, self.exog_matrix).fit(disp=0)
start_params = Logit_res.params.values
else:
start_params = np.asarray(start_params)
start_time = time.time()
model_fit = super(CLogit, self).fit(disp = disp,
start_params = start_params,
method=method, maxiter=maxiter,
maxfun=maxfun, **kwds)
self.params = model_fit.params
end_time = time.time()
self.elapsed_time = end_time - start_time
return model_fit
def predict(self, params, linear=False):
"""
Predict response variable of a model given exogenous variables.
Parameters
----------
params : array-like
Fitted parameters of the model.
linear : bool, optional
If True, returns the linear predictor dot(exog_bychoices,params).
| |
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: ec2_vpc_nacl
short_description: create and delete Network ACLs.
description:
- Read the AWS documentation for Network ACLS
U(https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html)
version_added: "2.2"
options:
name:
description:
- Tagged name identifying a network ACL.
- One and only one of the I(name) or I(nacl_id) is required.
required: false
nacl_id:
description:
- NACL id identifying a network ACL.
- One and only one of the I(name) or I(nacl_id) is required.
required: false
version_added: "2.4"
vpc_id:
description:
- VPC id of the requesting VPC.
- Required when state present.
required: false
subnets:
description:
- The list of subnets that should be associated with the network ACL.
- Must be specified as a list
- Each subnet can be specified as subnet ID, or its tagged name.
required: false
egress:
description:
- A list of rules for outgoing traffic. Each rule must be specified as a list.
Each rule may contain the rule number (integer 1-32766), protocol (one of ['tcp', 'udp', 'icmp', '-1', 'all']),
the rule action ('allow' or 'deny') the CIDR of the IPv4 network range to allow or deny,
the ICMP type (-1 means all types), the ICMP code (-1 means all codes), the last port in the range for
TCP or UDP protocols, and the first port in the range for TCP or UDP protocols.
See examples.
default: []
required: false
ingress:
description:
- List of rules for incoming traffic. Each rule must be specified as a list.
Each rule may contain the rule number (integer 1-32766), protocol (one of ['tcp', 'udp', 'icmp', '-1', 'all']),
the rule action ('allow' or 'deny') the CIDR of the IPv4 network range to allow or deny,
the ICMP type (-1 means all types), the ICMP code (-1 means all codes), the last port in the range for
TCP or UDP protocols, and the first port in the range for TCP or UDP protocols.
See examples.
default: []
required: false
tags:
description:
- Dictionary of tags to look for and apply when creating a network ACL.
required: false
state:
description:
- Creates or modifies an existing NACL
- Deletes a NACL and reassociates subnets to the default NACL
required: false
choices: ['present', 'absent']
default: present
author: <NAME> (@mmochan)
extends_documentation_fragment:
- aws
- ec2
requirements: [ botocore, boto3, json ]
'''
EXAMPLES = '''
# Complete example to create and delete a network ACL
# that allows SSH, HTTP and ICMP in, and all traffic out.
- name: "Create and associate production DMZ network ACL with DMZ subnets"
ec2_vpc_nacl:
vpc_id: vpc-12345678
name: prod-dmz-nacl
region: ap-southeast-2
subnets: ['prod-dmz-1', 'prod-dmz-2']
tags:
CostCode: CC1234
Project: phoenix
Description: production DMZ
ingress:
# rule no, protocol, allow/deny, cidr, icmp_type, icmp_code,
# port from, port to
- [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
- [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
- [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
egress:
- [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
state: 'present'
- name: "Remove the ingress and egress rules - defaults to deny all"
ec2_vpc_nacl:
vpc_id: vpc-12345678
name: prod-dmz-nacl
region: ap-southeast-2
subnets:
- prod-dmz-1
- prod-dmz-2
tags:
CostCode: CC1234
Project: phoenix
Description: production DMZ
state: present
- name: "Remove the NACL subnet associations and tags"
ec2_vpc_nacl:
vpc_id: 'vpc-12345678'
name: prod-dmz-nacl
region: ap-southeast-2
state: present
- name: "Delete nacl and subnet associations"
ec2_vpc_nacl:
vpc_id: vpc-12345678
name: prod-dmz-nacl
state: absent
- name: "Delete nacl by its id"
ec2_vpc_nacl:
nacl_id: acl-33b4ee5b
state: absent
'''
RETURN = '''
task:
description: The result of the create, or delete action.
returned: success
type: dict
'''
try:
import botocore
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
# VPC-supported IANA protocol numbers
# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
PROTOCOL_NUMBERS = {'all': -1, 'icmp': 1, 'tcp': 6, 'udp': 17, }
# Utility methods
def icmp_present(entry):
if len(entry) == 6 and entry[1] == 'icmp' or entry[1] == 1:
return True
def load_tags(module):
tags = []
if module.params.get('tags'):
for name, value in module.params.get('tags').items():
tags.append({'Key': name, 'Value': str(value)})
tags.append({'Key': "Name", 'Value': module.params.get('name')})
else:
tags.append({'Key': "Name", 'Value': module.params.get('name')})
return tags
def subnets_removed(nacl_id, subnets, client, module):
results = find_acl_by_id(nacl_id, client, module)
associations = results['NetworkAcls'][0]['Associations']
subnet_ids = [assoc['SubnetId'] for assoc in associations]
return [subnet for subnet in subnet_ids if subnet not in subnets]
def subnets_added(nacl_id, subnets, client, module):
results = find_acl_by_id(nacl_id, client, module)
associations = results['NetworkAcls'][0]['Associations']
subnet_ids = [assoc['SubnetId'] for assoc in associations]
return [subnet for subnet in subnets if subnet not in subnet_ids]
def subnets_changed(nacl, client, module):
changed = False
vpc_id = module.params.get('vpc_id')
nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
subnets = subnets_to_associate(nacl, client, module)
if not subnets:
default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0]
subnets = find_subnet_ids_by_nacl_id(nacl_id, client, module)
if subnets:
replace_network_acl_association(default_nacl_id, subnets, client, module)
changed = True
return changed
changed = False
return changed
subs_added = subnets_added(nacl_id, subnets, client, module)
if subs_added:
replace_network_acl_association(nacl_id, subs_added, client, module)
changed = True
subs_removed = subnets_removed(nacl_id, subnets, client, module)
if subs_removed:
default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0]
replace_network_acl_association(default_nacl_id, subs_removed, client, module)
changed = True
return changed
def nacls_changed(nacl, client, module):
changed = False
params = dict()
params['egress'] = module.params.get('egress')
params['ingress'] = module.params.get('ingress')
nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
nacl = describe_network_acl(client, module)
entries = nacl['NetworkAcls'][0]['Entries']
egress = [rule for rule in entries if rule['Egress'] is True and rule['RuleNumber'] < 32767]
ingress = [rule for rule in entries if rule['Egress'] is False and rule['RuleNumber'] < 32767]
if rules_changed(egress, params['egress'], True, nacl_id, client, module):
changed = True
if rules_changed(ingress, params['ingress'], False, nacl_id, client, module):
changed = True
return changed
def tags_changed(nacl_id, client, module):
changed = False
tags = dict()
if module.params.get('tags'):
tags = module.params.get('tags')
if module.params.get('name') and not tags.get('Name'):
tags['Name'] = module.params['name']
nacl = find_acl_by_id(nacl_id, client, module)
if nacl['NetworkAcls']:
nacl_values = [t.values() for t in nacl['NetworkAcls'][0]['Tags']]
nacl_tags = [item for sublist in nacl_values for item in sublist]
tag_values = [[key, str(value)] for key, value in tags.items()]
tags = [item for sublist in tag_values for item in sublist]
if sorted(nacl_tags) == sorted(tags):
changed = False
return changed
else:
delete_tags(nacl_id, client, module)
create_tags(nacl_id, client, module)
changed = True
return changed
return changed
def rules_changed(aws_rules, param_rules, Egress, nacl_id, client, module):
changed = False
rules = list()
for entry in param_rules:
rules.append(process_rule_entry(entry, Egress))
if rules == aws_rules:
return changed
else:
removed_rules = [x for x in aws_rules if x not in rules]
if removed_rules:
params = dict()
for rule in removed_rules:
params['NetworkAclId'] = nacl_id
params['RuleNumber'] = rule['RuleNumber']
params['Egress'] = Egress
delete_network_acl_entry(params, client, module)
changed = True
added_rules = [x for x in rules if x not in aws_rules]
if added_rules:
for rule in added_rules:
rule['NetworkAclId'] = nacl_id
create_network_acl_entry(rule, client, module)
changed = True
return changed
def process_rule_entry(entry, Egress):
params = dict()
params['RuleNumber'] = entry[0]
params['Protocol'] = str(PROTOCOL_NUMBERS[entry[1]])
params['RuleAction'] = entry[2]
params['Egress'] = Egress
params['CidrBlock'] = entry[3]
if icmp_present(entry):
params['IcmpTypeCode'] = {"Type": int(entry[4]), "Code": int(entry[5])}
else:
if entry[6] or entry[7]:
params['PortRange'] = {"From": entry[6], 'To': entry[7]}
return params
def restore_default_associations(assoc_ids, default_nacl_id, client, module):
if assoc_ids:
params = dict()
params['NetworkAclId'] = default_nacl_id[0]
for assoc_id in assoc_ids:
params['AssociationId'] = assoc_id
restore_default_acl_association(params, client, module)
return True
def construct_acl_entries(nacl, client, module):
for entry in module.params.get('ingress'):
params = process_rule_entry(entry, Egress=False)
params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId']
create_network_acl_entry(params, client, module)
for rule in module.params.get('egress'):
params = process_rule_entry(rule, Egress=True)
params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId']
create_network_acl_entry(params, client, module)
# Module invocations
def setup_network_acl(client, module):
changed = False
nacl = describe_network_acl(client, module)
if not nacl['NetworkAcls']:
nacl = create_network_acl(module.params.get('vpc_id'), client, module)
nacl_id = nacl['NetworkAcl']['NetworkAclId']
create_tags(nacl_id, client, module)
subnets = subnets_to_associate(nacl, client, module)
replace_network_acl_association(nacl_id, subnets, client, module)
construct_acl_entries(nacl, client, module)
changed = True
return(changed, nacl['NetworkAcl']['NetworkAclId'])
else:
changed = False
nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
subnet_result = subnets_changed(nacl, client, module)
nacl_result = nacls_changed(nacl, client, module)
tag_result = tags_changed(nacl_id, client, module)
if subnet_result is True or nacl_result is True or tag_result is True:
changed = True
return(changed, nacl_id)
return (changed, nacl_id)
def remove_network_acl(client, module):
changed = False
result = dict()
nacl = describe_network_acl(client, module)
if nacl['NetworkAcls']:
nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
vpc_id = nacl['NetworkAcls'][0]['VpcId']
associations = nacl['NetworkAcls'][0]['Associations']
assoc_ids = [a['NetworkAclAssociationId'] for a in associations]
default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)
if not default_nacl_id:
result | |
ts.sequence_length
n = np.array([len(u) for u in sample_sets])
def f(x):
return x * (x < n)
# Determine output_dim of the function
for mode in ("site", "branch", "node"):
sigma1 = ts.sample_count_stat(sample_sets, f, 3, windows=windows, mode=mode)
sigma2 = ts.sample_count_stat(
sample_sets, f, 3, windows=windows, mode=mode, span_normalise=True
)
sigma3 = ts.sample_count_stat(
sample_sets, f, 3, windows=windows, mode=mode, span_normalise=False
)
denom = np.diff(windows)[:, np.newaxis]
if mode == "node":
denom = np.diff(windows)[:, np.newaxis, np.newaxis]
self.assertEqual(sigma1.shape, sigma2.shape)
self.assertEqual(sigma1.shape, sigma3.shape)
self.assertArrayAlmostEqual(sigma1, sigma2)
self.assertArrayAlmostEqual(sigma1, sigma3 / denom)
class TestSampleSetIndexes(StatsTestCase):
"""
Tests that we get the correct behaviour from the indexes argument to
k-way stats functions.
"""
def get_example_ts(self):
ts = msprime.simulate(10, mutation_rate=1, random_seed=1)
self.assertGreater(ts.num_mutations, 0)
return ts
def test_2_way_default(self):
ts = self.get_example_ts()
sample_sets = np.array_split(ts.samples(), 2)
S1 = ts.divergence(sample_sets)
S2 = divergence(ts, sample_sets)[0, 0]
S3 = ts.divergence(sample_sets, [0, 1])
self.assertEqual(S1.shape, S2.shape)
self.assertArrayAlmostEqual(S1, S2)
self.assertArrayAlmostEqual(S1, S3)
sample_sets = np.array_split(ts.samples(), 3)
with self.assertRaises(ValueError):
_ = ts.divergence(sample_sets)
with self.assertRaises(ValueError):
_ = ts.divergence([sample_sets[0]])
def test_3_way_default(self):
ts = self.get_example_ts()
sample_sets = np.array_split(ts.samples(), 3)
S1 = ts.f3(sample_sets)
S2 = f3(ts, sample_sets)[0, 0]
S3 = ts.f3(sample_sets, [0, 1, 2])
self.assertEqual(S1.shape, S2.shape)
self.assertArrayAlmostEqual(S1, S2)
self.assertArrayAlmostEqual(S1, S3)
sample_sets = np.array_split(ts.samples(), 4)
with self.assertRaises(ValueError):
_ = ts.f3(sample_sets)
def test_4_way_default(self):
ts = self.get_example_ts()
sample_sets = np.array_split(ts.samples(), 4)
S1 = ts.f4(sample_sets)
S2 = f4(ts, sample_sets)
S3 = ts.f4(sample_sets, [0, 1, 2, 3])
self.assertEqual(S1.shape, S3.shape)
self.assertArrayAlmostEqual(S1, S2)
self.assertArrayAlmostEqual(S1, S3)
sample_sets = np.array_split(ts.samples(), 5)
with self.assertRaises(ValueError):
_ = ts.f4(sample_sets)
def test_2_way_combinations(self):
ts = self.get_example_ts()
sample_sets = np.array_split(ts.samples(), 4)
pairs = list(itertools.combinations(range(4), 2))
for k in range(1, len(pairs)):
S1 = ts.divergence(sample_sets, pairs[:k])
S2 = divergence(ts, sample_sets, pairs[:k])[0]
self.assertEqual(S1.shape[-1], k)
self.assertEqual(S1.shape, S2.shape)
self.assertArrayAlmostEqual(S1, S2)
def test_3_way_combinations(self):
ts = self.get_example_ts()
sample_sets = np.array_split(ts.samples(), 5)
triples = list(itertools.combinations(range(5), 3))
for k in range(1, len(triples)):
S1 = ts.Y3(sample_sets, triples[:k])
S2 = Y3(ts, sample_sets, triples[:k])[0]
self.assertEqual(S1.shape[-1], k)
self.assertEqual(S1.shape, S2.shape)
self.assertArrayAlmostEqual(S1, S2)
def test_4_way_combinations(self):
ts = self.get_example_ts()
sample_sets = np.array_split(ts.samples(), 5)
quads = list(itertools.combinations(range(5), 4))
for k in range(1, len(quads)):
S1 = ts.f4(sample_sets, quads[:k], windows=[0, ts.sequence_length])
S2 = f4(ts, sample_sets, quads[:k])
self.assertEqual(S1.shape[-1], k)
self.assertEqual(S2.shape, S2.shape)
self.assertArrayAlmostEqual(S1, S2)
def test_errors(self):
ts = self.get_example_ts()
sample_sets = np.array_split(ts.samples(), 2)
with self.assertRaises(ValueError):
ts.divergence(sample_sets, indexes=[])
with self.assertRaises(ValueError):
ts.divergence(sample_sets, indexes=[(1, 1, 1)])
with self.assertRaises(exceptions.LibraryError):
ts.divergence(sample_sets, indexes=[(1, 2)])
class TestGeneralStatInterface(StatsTestCase):
"""
Tests for the basic interface for general_stats.
"""
def get_tree_sequence(self):
ts = msprime.simulate(10, recombination_rate=2, mutation_rate=2, random_seed=1)
return ts
def test_function_cannot_update_state(self):
ts = self.get_tree_sequence()
def f(x):
out = x.copy()
x[:] = 0.0
return out
def g(x):
return x
x = ts.sample_count_stat(
[ts.samples()],
f,
output_dim=1,
strict=False,
mode="node",
span_normalise=False,
)
y = ts.sample_count_stat(
[ts.samples()],
g,
output_dim=1,
strict=False,
mode="node",
span_normalise=False,
)
self.assertArrayEqual(x, y)
def test_default_mode(self):
ts = msprime.simulate(10, recombination_rate=1, random_seed=2)
W = np.ones((ts.num_samples, 2))
sigma1 = ts.general_stat(W, self.identity_f(ts), W.shape[1])
sigma2 = ts.general_stat(W, self.identity_f(ts), W.shape[1], mode="site")
self.assertArrayEqual(sigma1, sigma2)
def test_bad_mode(self):
ts = msprime.simulate(10, recombination_rate=1, random_seed=2)
W = np.ones((ts.num_samples, 2))
for bad_mode in ["", "MODE", "x" * 8192]:
with self.assertRaises(ValueError):
ts.general_stat(W, self.identity_f(ts), W.shape[1], mode=bad_mode)
def test_bad_window_strings(self):
ts = self.get_tree_sequence()
with self.assertRaises(ValueError):
ts.diversity([ts.samples()], mode="site", windows="abc")
with self.assertRaises(ValueError):
ts.diversity([ts.samples()], mode="site", windows="")
with self.assertRaises(ValueError):
ts.diversity([ts.samples()], mode="tree", windows="abc")
def test_bad_summary_function(self):
ts = self.get_tree_sequence()
W = np.ones((ts.num_samples, 3))
with self.assertRaises(ValueError):
ts.general_stat(W, lambda x: x, 3, windows="sites")
with self.assertRaises(ValueError):
ts.general_stat(W, lambda x: np.array([1.0]), 1, windows="sites")
def test_nonnumpy_summary_function(self):
ts = self.get_tree_sequence()
W = np.ones((ts.num_samples, 3))
sigma1 = ts.general_stat(W, lambda x: [0.0], 1)
sigma2 = ts.general_stat(W, lambda x: np.array([0.0]), 1)
self.assertArrayEqual(sigma1, sigma2)
class TestGeneralBranchStats(StatsTestCase):
"""
Tests for general branch stats (using functions and arbitrary weights)
"""
def compare_general_stat(self, ts, W, f, windows=None, polarised=False):
# Determine output_dim of the function
M = len(f(W[0]))
sigma1 = naive_branch_general_stat(ts, W, f, windows, polarised=polarised)
sigma2 = ts.general_stat(W, f, M, windows, polarised=polarised, mode="branch")
sigma3 = branch_general_stat(ts, W, f, windows, polarised=polarised)
self.assertEqual(sigma1.shape, sigma2.shape)
self.assertEqual(sigma1.shape, sigma3.shape)
self.assertArrayAlmostEqual(sigma1, sigma2)
self.assertArrayAlmostEqual(sigma1, sigma3)
return sigma1
def test_simple_identity_f_w_zeros(self):
ts = msprime.simulate(12, recombination_rate=3, random_seed=2)
W = np.zeros((ts.num_samples, 3))
for polarised in [True, False]:
sigma = self.compare_general_stat(
ts, W, self.identity_f(ts), windows="trees", polarised=polarised
)
self.assertEqual(sigma.shape, (ts.num_trees, W.shape[1]))
self.assertTrue(np.all(sigma == 0))
def test_simple_identity_f_w_ones(self):
ts = msprime.simulate(10, recombination_rate=1, random_seed=2)
W = np.ones((ts.num_samples, 2))
sigma = self.compare_general_stat(
ts, W, self.identity_f(ts), windows="trees", polarised=True
)
self.assertEqual(sigma.shape, (ts.num_trees, W.shape[1]))
# A W of 1 for every node and identity f counts the samples in the subtree
# if polarised is True.
for tree in ts.trees():
s = sum(tree.num_samples(u) * tree.branch_length(u) for u in tree.nodes())
self.assertTrue(np.allclose(sigma[tree.index], s))
def test_simple_cumsum_f_w_ones(self):
ts = msprime.simulate(13, recombination_rate=1, random_seed=2)
W = np.ones((ts.num_samples, 8))
for polarised in [True, False]:
sigma = self.compare_general_stat(
ts, W, self.cumsum_f(ts), windows="trees", polarised=polarised
)
self.assertEqual(sigma.shape, (ts.num_trees, W.shape[1]))
def test_simple_cumsum_f_w_ones_many_windows(self):
ts = msprime.simulate(15, recombination_rate=3, random_seed=3)
self.assertGreater(ts.num_trees, 3)
windows = np.linspace(0, ts.sequence_length, num=ts.num_trees * 10)
W = np.ones((ts.num_samples, 3))
sigma = self.compare_general_stat(ts, W, self.cumsum_f(ts), windows=windows)
self.assertEqual(sigma.shape, (windows.shape[0] - 1, W.shape[1]))
def test_windows_equal_to_ts_breakpoints(self):
ts = msprime.simulate(14, recombination_rate=1, random_seed=2)
W = np.ones((ts.num_samples, 1))
for polarised in [True, False]:
sigma_no_windows = self.compare_general_stat(
ts, W, self.cumsum_f(ts), windows="trees", polarised=polarised
)
self.assertEqual(sigma_no_windows.shape, (ts.num_trees, W.shape[1]))
sigma_windows = self.compare_general_stat(
ts,
W,
self.cumsum_f(ts),
windows=ts.breakpoints(as_array=True),
polarised=polarised,
)
self.assertEqual(sigma_windows.shape, sigma_no_windows.shape)
self.assertTrue(np.allclose(sigma_windows.shape, sigma_no_windows.shape))
def test_single_tree_windows(self):
ts = msprime.simulate(15, random_seed=2, length=100)
W = np.ones((ts.num_samples, 2))
f = self.sum_f(ts)
# for num_windows in range(1, 10):
for num_windows in [2]:
windows = np.linspace(0, ts.sequence_length, num=num_windows + 1)
sigma = self.compare_general_stat(ts, W, f, windows)
self.assertEqual(sigma.shape, (num_windows, 1))
def test_simple_identity_f_w_zeros_windows(self):
ts = msprime.simulate(15, recombination_rate=3, random_seed=2)
W = np.zeros((ts.num_samples, 3))
f = self.identity_f(ts)
windows = np.linspace(0, ts.sequence_length, num=11)
for polarised in [True, False]:
sigma = self.compare_general_stat(ts, W, f, windows, polarised=polarised)
self.assertEqual(sigma.shape, (10, W.shape[1]))
self.assertTrue(np.all(sigma == 0))
class TestGeneralSiteStats(StatsTestCase):
"""
Tests for general site stats (using functions and arbitrary weights)
"""
def compare_general_stat(self, ts, W, f, windows=None, polarised=False):
# Determine output_dim of the function
M = len(f(W[0]))
sigma1 = naive_site_general_stat(ts, W, f, windows, polarised=polarised)
sigma2 = ts.general_stat(W, f, M, windows, polarised=polarised, mode="site")
sigma3 = site_general_stat(ts, W, f, windows, polarised=polarised)
self.assertEqual(sigma1.shape, sigma2.shape)
self.assertEqual(sigma1.shape, sigma3.shape)
self.assertArrayAlmostEqual(sigma1, sigma2)
self.assertArrayAlmostEqual(sigma1, sigma3)
return sigma1
def test_identity_f_W_0_multiple_alleles(self):
ts = msprime.simulate(20, recombination_rate=0, random_seed=2)
ts = tsutil.jukes_cantor(ts, 20, 1, seed=10)
W = np.zeros((ts.num_samples, 3))
for polarised in [True, False]:
sigma = self.compare_general_stat(
ts, W, self.identity_f(ts), windows="sites", polarised=polarised
)
self.assertEqual(sigma.shape, (ts.num_sites, W.shape[1]))
self.assertTrue(np.all(sigma == 0))
def test_identity_f_W_0_multiple_alleles_windows(self):
ts = msprime.simulate(34, recombination_rate=0, random_seed=2)
ts = tsutil.jukes_cantor(ts, 20, 1, seed=10)
W = np.zeros((ts.num_samples, 3))
windows = np.linspace(0, 1, num=11)
for polarised in [True, False]:
sigma = self.compare_general_stat(
ts, W, self.identity_f(ts), windows=windows, polarised=polarised
)
self.assertEqual(sigma.shape, (windows.shape[0] - 1, W.shape[1]))
self.assertTrue(np.all(sigma == 0))
def test_cumsum_f_W_1_multiple_alleles(self):
ts = msprime.simulate(3, recombination_rate=2, random_seed=2)
ts = tsutil.jukes_cantor(ts, 20, 1, seed=10)
W = np.ones((ts.num_samples, 3))
for polarised in [True, False]:
sigma = self.compare_general_stat(
ts, W, self.cumsum_f(ts), windows="sites", polarised=polarised
)
self.assertEqual(sigma.shape, (ts.num_sites, W.shape[1]))
def test_cumsum_f_W_1_two_alleles(self):
ts = msprime.simulate(33, recombination_rate=1, mutation_rate=2, random_seed=1)
W = np.ones((ts.num_samples, 5))
for polarised in [True, False]:
sigma = self.compare_general_stat(
ts, W, self.cumsum_f(ts), windows="sites", polarised=polarised
)
self.assertEqual(sigma.shape, (ts.num_sites, W.shape[1]))
class TestGeneralNodeStats(StatsTestCase):
"""
Tests for general node stats (using functions and arbitrary weights)
"""
def compare_general_stat(self, ts, W, f, windows=None, polarised=False):
# Determine output_dim of the function
M = len(f(W[0]))
sigma1 = naive_node_general_stat(ts, W, f, windows, polarised=polarised)
sigma2 = ts.general_stat(W, f, M, windows, polarised=polarised, mode="node")
sigma3 = node_general_stat(ts, W, f, windows, polarised=polarised)
self.assertEqual(sigma1.shape, sigma2.shape)
self.assertEqual(sigma1.shape, sigma3.shape)
self.assertArrayAlmostEqual(sigma1, sigma2)
self.assertArrayAlmostEqual(sigma1, sigma3)
return sigma1
def test_simple_sum_f_w_zeros(self):
ts = msprime.simulate(12, recombination_rate=3, random_seed=2)
W = np.zeros((ts.num_samples, 3))
for polarised in [True, False]:
sigma = self.compare_general_stat(
ts, W, self.identity_f(ts), windows="trees", polarised=polarised
)
self.assertEqual(sigma.shape, (ts.num_trees, ts.num_nodes, 3))
self.assertTrue(np.all(sigma == 0))
def test_simple_sum_f_w_ones(self):
ts = msprime.simulate(44, recombination_rate=1, random_seed=2)
W = np.ones((ts.num_samples, 2))
f = self.sum_f(ts)
sigma = self.compare_general_stat(ts, W, f, windows="trees", polarised=True)
self.assertEqual(sigma.shape, (ts.num_trees, ts.num_nodes, 1))
# Drop the last dimension
sigma = sigma.reshape((ts.num_trees, ts.num_nodes))
# A W of 1 for every node and f(x)=sum(x) counts the samples in the subtree
# times 2 if polarised is True.
for tree in ts.trees():
s = np.array(
[
tree.num_samples(u) if tree.num_samples(u) < ts.num_samples else 0
for u in range(ts.num_nodes)
]
)
self.assertArrayAlmostEqual(sigma[tree.index], 2 * s)
def test_simple_sum_f_w_ones_notstrict(self):
ts = msprime.simulate(44, recombination_rate=1, random_seed=2)
W = np.ones((ts.num_samples, 2))
sigma = ts.general_stat(
W,
lambda x: np.array([np.sum(x)]),
1,
windows="trees",
polarised=True,
mode="node",
strict=False,
)
self.assertEqual(sigma.shape, (ts.num_trees, ts.num_nodes, 1))
# Drop the last dimension
sigma = sigma.reshape((ts.num_trees, ts.num_nodes))
# A W of | |
# Copyright (c) 2021 Open Risk (https://www.openriskmanagement.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
MARKET_CONDITIONS_CHOICES = [(0, '(a) Few competing suppliers or substantial and durable advantage in location or technology. Demand is strong and growing'),
(1, '(b) Few competing suppliers or better than average location or technology but this situation may not last. Demand is strong and stable'),
(2, '(c) Project has no advantage in location or technology. Demand is adequate and stable'),
(3, '(d) Project has worse than average location or technology. Demand is weak and declining')]
FINANCIAL_RATIOS_CHOICES = [
(0, '(a) Strong financial ratios considering the level of project risk; very robust economic assumptions'),
(1, '(b) Strong to acceptable financial ratios considering the level of project risk; robust project economic assumptions'),
(2, '(c) Standard financial ratios considering the level of project risk'),
(3, '(d) Aggressive financial ratios considering the level of project risk')]
STRESS_ANALYSIS_CHOICES = [(0, '(a) The project can meet its financial obligations under sustained severely stressed economic or sectoral conditions'),
(1, '(b) The project can meet its financial obligations under normal stressed economic or sectoral conditions. The project is only likely to default under severe economic conditions'),
(2, '(c) The project is vulnerable to stresses that are not uncommon through an economic cycle and may default in a normal downturn'),
(3, '(d) The project is likely to default unless conditions improve soon')]
REFINANCING_RISK_CHOICES = [(0,
'(a) There is no or very limited exposure to market or cycle risk since the expected cashflows cover all future loan repayments during the tenor of the loan and there are no significant delays between the cashflows and the loan repayments. There is no or very low refinancing risk'),
(1,
'(b) The exposure to market or cycle risk is limited since the expected cashflows cover the majority of future loan repayments during the tenor of the loan and there are no significant delays between the cashflows and the loan repayments. There is low refinancing risk'),
(2,
'(c) There is moderate exposure to market or cycle risk since the expected cashflows cover only a part of future loan repayments during the tenor of the loan or there are some significant delays between the cashflows and the loan repayments. Average refinancing risk'),
(3,
'(d) There is significant exposure to market or cycle risk since the expected cashflows cover only a small part of future loan repayments during the tenor of the loan or there are some significant delays between the cashflows and the loan repayments. High refinancing risk')]
AMORTISATION_SCHEDULE_CHOICES = [(0, '(a) Amortising debt without bullet repayment'),
(1, '(b) Amortising debt wth ot or insignificant bullet repayment'),
(2, '(c) Amortising debt repayments with limited bullet payment'),
(3, '(d) Bullet repayment or amortising debt repayments with high bullet repayment')]
FOREIGN_EXCHANGE_RISK_CHOICES = [(0,
'(a) There is no foreign exchange risk because there is no difference in the currency of the loan and the income of the project or because the foreign exchange risk is fully hedged'),
(1,
'(b) There is no foreign exchange risk because there is no difference in the currency of the loan and the income of the project or because the foreign exchange risk is fully hedged'),
(2,
'(c) There is a difference in the currency of the loan and the income of the project but the foreign exchange risk is considered low because the exchange rate is stable or because the foreign exchange risk is hedged to a large extent'),
(3,
'(d) There is a difference in the currency of the loan and the income of the project and the foreign exchange risk is considered high because the exchange rate is volatile and the foreign exchange risk is not hedged to a large extent')]
POLITICAL_RISK_CHOICES = [(0, '(a) Very low exposure; strong mitigation instrument if needed'),
(1, '(b) Low exposure; satisfactory mitigation instrument if needed'),
(2, '(c) Moderate exposure; fair mitigation instruments'),
(3, '(d) High exposure; no or weak mitigation instruments')]
FORCE_MAJEURE_RISK_CHOICES = [(0, '(a) No or very low exposure to force majeure risk'),
(1, '(b) Limited exposure to force majeure risk'),
(2, '(c) Significant exposure to force majeure risk which is not sufficiently mitigated'),
(3, '(d) Significant exposure to force majeure risk which is not mitigated')]
GOVERNMENT_SUPPORT_CHOICES = [(0, '(a) Project of strategic importance for the country (preferably export-oriented) Strong support from Government'),
(1, '(b) Project considered important for the country. Good level of support from Government'),
(2, '(c) Project may not be strategic but brings unquestionable benefits for the country. Support from Government may not be explicit'),
(3, '(d) Project not key to the country. No or weak support from Government')]
LEGAL_AND_REGULATORY_RISK_CHOICES = [(0, '(a) Favourable and stable regulatory environment over the long term'),
(1, '(b) Favourable and stable regulatory environment over the medium term'),
(2, '(c) Regulatory changes can be predicted with a fair level of certainty'),
(3, '(d) Current or future regulatory issues may affect the project')]
PROJECT_APPROVAL_RISK_CHOICES = [(0, '(a) Strong'),
(1, '(b) Satisfactory'),
(2, '(c) Fair'),
(3, '(d) Weak')]
LEGAL_REGIME_CHOICES = [(0, '(a) Contract collateral and security are enforceable'),
(1, '(b) Contract collateral and security are enforceable'),
(2, '(c) Contract collateral and security are considered enforceable even if certain non-key issues may exist'),
(3, '(d) There are unresolved key issues in respect if actual enforcement of contract collateral and security')]
DESIGN_AND_TECHNOLOGY_RISK_CHOICES = [(0, '(a) Fully proven technology and design'),
(1, '(b) Fully proven technology and design'),
(2, '(c) Proven technology and design - start-up issues are mitigated by a strong completion package'),
(3,
'(d) Unproven technology and design; technology issues exist and/or complex design')]
PERMITTING_AND_SITING_CHOICES = [(0, '(a) All permits have been obtained'),
(1, '(b) Some permits are still outstanding but their receipt is considered very likely'),
(2, '(c) Some permits are still outstanding but the permitting process is well defined and they are considered routine'),
(3, '(d) Key permits still need to be obtained and are not considered routine. Significant conditions may be attached')]
TYPE_OF_CONSTRUCTION_CONTRACT_CHOICES = [
(0, '(a) Fixed-price date-certain turnkey construction EPC (engineering and procurement contract)'),
(1, '(b) Fixed-price date-certain turnkey construction EPC'),
(2, '(c) Fixed-price date-certain turnkey construction contract with one or several contractors'),
(3, '(d) No or partial fixed-price turnkey contract and/or interfacing issues with multiple contractors')]
COMPLETION_RISK_CHOICES = [(0, '(a) It is almost certain that the project will be finished within the agreed time horizon and at the agreed cost'),
(1, '(b) It is very likely that the project will be finished within the agreed time horizon and at the agreed cost'),
(2, '(c) It is uncertain whether the project will be finished within the agreed time horizon and at the agreed cost'),
(3, '(d) There are indications that the project will not be finished within the agreed time horizon and at the agreed cost')]
COMPLETION_GUARANTEES_AND_LIQUIDATED_DAMAGES_CHOICES = [(0, '(a) Substantial liquidated damages supported by financial substance and/or strong completion guarantee from sponsors with excellent financial standing'),
(1, '(b) Significant liquidated damages | |
On")
else:
wait["tag2"] = True
if wait["lang"] == "JP":
kr.sendText(msg.to,"Tag On")
else:
kr.sendText(msg.to,"already on")
elif msg.text in ["Tag2 off","tag2 off"]:
if msg.from_ in admin:
if wait["tag2"] == False:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Already off")
else:
kr.sendText(msg.to,"Tag Off")
else:
wait["tag2"] = False
if wait["lang"] == "JP":
kr.sendText(msg.to,"Tag Off")
else:
kr.sendText(msg.to,"Already off")
#==========================[Kris]===========================
elif msg.text in ["Auto on","auto on"]:
if msg.from_ in admin:
if wait["auto"] == True:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Bot join on")
else:
kr.sendText(msg.to,"Bot join On")
else:
wait["auto"] = True
if wait["lang"] == "JP":
kr.sendText(msg.to,"Bot join On")
else:
kr.sendText(msg.to,"Bot join On")
elif msg.text in ["Auto off","auto off"]:
if msg.from_ in admin:
if wait["auto"] == False:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Bot join off")
else:
kr.sendText(msg.to,"Bot join off")
else:
wait["auto"] = False
if wait["lang"] == "JP":
kr.sendText(msg.to,"Bot join off")
else:
kr.sendText(msg.to,"Bot join off")
#==========================[Kris]===========================
elif "Tambah admin @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff add executing"
_name = msg.text.replace("Tambah admin @","")
_nametarget = _name.rstrip(' ')
gs = kr.getGroup(msg.to)
gs = kr1.getGroup(msg.to)
gs = kr2.getGroup(msg.to)
gs = kr3.getGroup(msg.to)
gs = kr4.getGroup(msg.to)
gs = kr5.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kr.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.append(target)
kr.sendText(msg.to,"Admin Telah Ditambahkan")
except:
pass
print "[Command]Staff add executed"
else:
kr.sendText(msg.to,"Command Di Tolak Jangan Sedih")
kr.sendText(msg.to,"Sudah Menjadi Admin Maka Tidak Bisa Menjadi Admin Lagi")
#==========================[Kris]===========================
elif "Hapus admin @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff remove executing"
_name = msg.text.replace("Hapus admin @","")
_nametarget = _name.rstrip(' ')
gs = kr.getGroup(msg.to)
gs = kr1.getGroup(msg.to)
gs = kr2.getGroup(msg.to)
gs = kr3.getGroup(msg.to)
gs = kr4.getGroup(msg.to)
gs = kr5.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kr.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.remove(target)
kr.sendText(msg.to,"Admin Telah Dihapus")
except:
pass
print "[Command]Staff remove executed"
else:
kr.sendText(msg.to,"Command DiTolak")
kr.sendText(msg.to,"Admin Tidak Bisa Menggunakan")
#==========================[Kris]===========================
elif msg.text in ["Adminlist","admlist"]:
if msg.from_ in admin:
if admin == []:
kr.sendText(msg.to,"The adminlist is empty")
else:
kr.sendText(msg.to,"Sabar Dikit Boss Kris.....")
mc = ""
for mi_d in admin:
mc += "☄ " +kr.getContact(mi_d).displayName + "\n"
kr.sendText(msg.to,mc)
print "[Command]Stafflist executed"
#==========================[Kris]===========================
elif ("Ban " in msg.text):
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
kr.sendText(msg.to,"Succes Banned")
except:
pass
#==========================[Kris]===========================
elif msg.text in ["Time"]:
if msg.from_ in admin:
timeNow = datetime.now()
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.today()
hr = inihari.strftime('%A')
bln = inihari.strftime('%B')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
rst = hasil + ", " + inihari.strftime('%d') + " - " + bln + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]"
kr.sendText(msg.to, rst)
#client.sendText(msg.to, rst)
#==========================[Kris]===========================
elif msg.text in ["cab","Cab"]:
if msg.from_ in admin:
url = "https://scontent.fcgk2-1.fna.fbcdn.net/v/t1.0-9/26168676_131451404314083_3952554270011807487_n.jpg?oh=6e90aa78daaf5e06b1078bbf15d5aa0f&oe=5AB9882D"
kr.sendImageWithURL(msg.to, url)
#==========================[Kris]===========================
elif msg.text in ["Team","Logo"]:
if msg.from_ in owner:
url = "https://scontent.fcgk2-1.fna.fbcdn.net/v/t1.0-9/26168676_131451404314083_3952554270011807487_n.jpg?oh=6e90aa78daaf5e06b1078bbf15d5aa0f&oe=5AB9882D"
url1 = "https://scontent.fcgk2-1.fna.fbcdn.net/v/t1.0-9/26165506_131451400980750_8433498092579272217_n.jpg?oh=c85beaa35a6f5babd638edeaac9bccaa&oe=5AF760B2"
url2 = "https://scontent.fcgk2-1.fna.fbcdn.net/v/t1.0-9/26168227_131451417647415_680587542176648285_n.jpg?oh=e714a97fec8d8c1e178ab6c0a3ca39cf&oe=5AC96AD3"
url3 = "https://scontent.fcgk2-1.fna.fbcdn.net/v/t1.0-9/26195387_131462840979606_8781956575640573461_n.jpg?oh=27ba5e875917c20df7dd8916bdd64847&oe=5ABB27F4"
url4 = "https://scontent.fcgk2-1.fna.fbcdn.net/v/t1.0-9/26111928_131462844312939_2544207656543605714_n.jpg?oh=0fac796564e963d8b573826263bbc6c7&oe=5AFA67A8"
url5 = "https://scontent.fcgk2-1.fna.fbcdn.net/v/t1.0-9/26219732_131462837646273_1213898565647052451_n.jpg?oh=c5a8bcce115cdab488bde1b8e981e5dd&oe=5AC3A96E"
url6 = "https://scontent.fcgk2-1.fna.fbcdn.net/v/t1.0-9/26167549_131462897646267_3496884138024907307_n.jpg?oh=edc63b98f790e9bf2cbb57dce7df9b25&oe=5AB0DDF6"
url7 = "https://scontent.fcgk2-1.fna.fbcdn.net/v/t1.0-9/26111931_131462894312934_151942458148573227_n.jpg?oh=2b0473a6caf4446df430180a47ca3355&oe=5AC37B56"
kr.sendImageWithURL(msg.to, url)
kr.sendImageWithURL(msg.to, url1)
kr.sendImageWithURL(msg.to, url2)
kr.sendImageWithURL(msg.to, url3)
kr.sendImageWithURL(msg.to, url4)
kr.sendImageWithURL(msg.to, url5)
kr.sendImageWithURL(msg.to, url6)
kr.sendImageWithURL(msg.to, url7)
#==========================[Kris]===========================
elif msg.text in ["Kibar","kibar"]:
if msg.from_ in admin:
url = "https://scontent.fcgk2-1.fna.fbcdn.net/v/t1.0-9/26168676_131451404314083_3952554270011807487_n.jpg?oh=6e90aa78daaf5e06b1078bbf15d5aa0f&oe=5AB9882D"
url1 = "https://scontent.fcgk2-1.fna.fbcdn.net/v/t1.0-9/26165506_131451400980750_8433498092579272217_n.jpg?oh=c85beaa35a6f5babd638edeaac9bccaa&oe=5AF760B2"
url6 = "https://scontent.fcgk2-1.fna.fbcdn.net/v/t1.0-9/26167549_131462897646267_3496884138024907307_n.jpg?oh=edc63b98f790e9bf2cbb57dce7df9b25&oe=5AB0DDF6"
url7 = "https://scontent.fcgk2-1.fna.fbcdn.net/v/t1.0-9/26111931_131462894312934_151942458148573227_n.jpg?oh=2b0473a6caf4446df430180a47ca3355&oe=5AC37B56"
kr.sendImageWithURL(msg.to, url)
kr.sendImageWithURL(msg.to, url1)
kr.sendImageWithURL(msg.to, url6)
kr.sendImageWithURL(msg.to, url7)
#==========================[Kris]===========================
elif msg.text in ["setview","Setview"]:
if msg.from_ in admin:
subprocess.Popen("echo '' > dataSeen/"+msg.to+".txt", shell=True, stdout=subprocess.PIPE)
kr.sendText(msg.to, "Checkpoint checked!")
print "@setview"
elif msg.text in ["viewseen","Viewseen"]:
if msg.from_ in admin:
lurkGroup = ""
dataResult, timeSeen, contacts, userList, timelist, recheckData = [], [], [], [], [], []
with open('dataSeen/'+msg.to+'.txt','r') as rr:
contactArr = rr.readlines()
for v in xrange(len(contactArr) -1,0,-1):
num = re.sub(r'\n', "", contactArr[v])
contacts.append(num)
pass
contacts = list(set(contacts))
for z in range(len(contacts)):
arg = contacts[z].split('|')
userList.append(arg[0])
timelist.append(arg[1])
uL = list(set(userList))
for ll in range(len(uL)):
try:
getIndexUser = userList.index(uL[ll])
timeSeen.append(time.strftime("%H:%M:%S", time.localtime(int(timelist[getIndexUser]) / 1000)))
recheckData.append(userList[getIndexUser])
except IndexError:
cName.append('nones')
pass
contactId = kr.getContacts(recheckData)
for v in range(len(recheckData)):
dataResult.append(contactId[v].displayName + ' ('+timeSeen[v]+')')
pass
if len(dataResult) > 0:
tukang = "List Viewer\n*"
grp = '\n* '.join(str(f) for f in dataResult)
total = '\n\nTotal %i viewers (%s)' % (len(dataResult), datetime.now().strftime('%H:%M:%S') )
kr.sendText(msg.to, "%s %s %s" % (tukang, grp, total))
else:
kr.sendText(msg.to, "Belum ada viewers")
print "@viewseen"
#==========================[Kris]===========================
elif msg.text in ["Respon","respon"]:
if msg.from_ in admin:
kr.sendText(msg.to,"Bot Cctv Aktif...(^_^)")
elif msg.text in ["Absen","absen"]:
if msg.from_ in admin:
kr1.sendText(msg.to,"💡💡💡👉 Bot Cctv Hadir..!!!")
#==========================[Kris]===========================
elif msg.text in ["Cctv kadieu","cctv kadieu"]:
if msg.from_ in owner:
gid = kr1.getGroupIdsJoined()
gid = kr2.getGroupIdsJoined()
gid = kr3.getGroupIdsJoined()
gid = kr4.getGroupIdsJoined()
gid = kr5.getGroupIdsJoined()
for i in gid:
kr1.sendText(i,"Bye~Bye\n\nBots Dipaksa Keluar oleh Owner Bots...!!!\nuntuk konfirmasi => line://ti/p/~krissthea\nMakasih...!!!")
kr.sendImageWithURL(i, url123)
kr1.leaveGroup(i)
kr2.leaveGroup(i)
kr3.leaveGroup(i)
kr4.leaveGroup(i)
kr5.leaveGroup(i)
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\nBots Dipaksa Keluar oleh Owner Bots...!!!\nMakasih...!!!")
else:
kr1.sendText(msg.to,"He declined all invitations")
#==========================[Kris]===========================
elif msg.text.lower() == 'Bot':
kr.sendImageWithURL(msg.to, url123)
kr.sendText(msg.to,"↥↥↥↥↥↪ Pembuat Bots ↩↥↥↥↥↥")
elif "Bcgrup: " in msg.text:
if msg.from_ in owner:
bc = msg.text.replace("Bcgrup: ","")
gid = kr.getGroupIdsJoined()
for i in gid:
kr.sendText(i,"╠═════[BROADCAST]═════╣\n\n"+bc+"\n\nMAAF BROADCAST!!\n\n=>>line://ti/p/~krissthea\n●▬▬▬▬ஜ۩۞۩ஜ▬▬▬▬●")
kr.sendText(msg.to, "Brodcastgrup sukses")
#==========================[Kris]===========================
elif msg.text in ["Sp","sp"]:
if msg.from_ in admin:
start = time.time()
kr1.sendText(msg.to, "Proses..🔥🔥🔥")
elapsed_time = time.time() - start
kr1.sendText(msg.to, "%s/Detik" % (elapsed_time))
elif msg.text in ["Speed","speed"]:
if msg.from_ in admin:
start = time.time()
kr.sendText(msg.to, "Proses..🔥🔥🔥")
elapsed_time = time.time() - start
kr.sendText(msg.to, "%s/Detik" % (elapsed_time))
#==========================[Kris]===========================
elif msg.text in ["Clearban"]:
if msg.from_ in admin:
wait["blacklist"] = {}
kr.sendText(msg.to,"clear")
elif msg.text in ["Ban"]:
if msg.from_ in admin:
wait["wblacklist"] = True
kr.sendText(msg.to,"Kirim Kontak")
elif msg.text in ["Unban"]:
if msg.from_ in admin:
wait["dblacklist"] = True
kr.sendText(msg.to,"Kirim Kontak")
elif msg.text in ["Banlist"]:
if msg.from_ in admin:
if wait["blacklist"] == {}:
kr.sendText(msg.to,"Tidak Ada")
else:
kr.sendText(msg.to,"Tunggu Sebentar Memuat Data")
mc = ""
for mi_d in wait["blacklist"]:
mc += "☄" +kr.getContact(mi_d).displayName + "\n"
kr.sendText(msg.to,mc)
elif msg.text in ["Cek ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = kr.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += mm + "\n"
kr.sendText(msg.to,cocoa + "")
elif msg.text in ["Kill ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = kr.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
kr.sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
kr.kickoutFromGroup(msg.to,[jj])
kr1.kickoutFromGroup(msg.to,[jj])
kr.sendText(msg.to,"Bye...")
#==========================[Kris]===========================
elif msg.text in ["Batal","batal"]:
if msg.from_ in admin:
if msg.toType == 2:
X = kr.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
kr.cancelGroupInvitation(msg.to, gInviMids)
#==========================[Kris]===========================
elif "Cctv cium " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
kr1.kickoutFromGroup(msg.to,[target])
except:
kr1.sendText(msg.to,"Error")
#==========================[Kris]===========================
elif "Cctv megs " in msg.text:
if msg.from_ in owner:
gName = msg.text.replace("Cctv megs ","")
ap = kr1.getGroups([msg.to])
semua = [contact.mid for contact in ap[0].members]
nya = ap[0].members
for a in nya:
Mi_d = str(a.mid)
kr1.createGroup(gName, semua)
kr1.createGroup(gName, semua)
kr1.createGroup(gName, semua)
kr1.createGroup(gName, semua)
kr1.createGroup(gName, semua)
kr1.createGroup(gName, semua)
kr1.createGroup(gName, semua)
elif "#rmegs " in msg.text:
if msg.from_ in owner:
gName = msg.text.replace("#rmegs ","")
ap = kr1.getGroups([msg.to])
semua = findAndAddContactsByMid(Mi_d)
nya = ap[0].members
for a in nya:
Mi_d = str(a.mid)
klis=[kr,kr1]
team=random.choice(klis)
kr1.findAndAddContactsByMid(Mi_d)
kr1.createGroup(gName, semua)
kr1.createGroup(gName, semua)
kr1.createGroup(gName, semua)
kr1.createGroup(gName, semua)
kr1.createGroup(gName, semua)
kr1.createGroup(gName, semua)
team.findAndAddContactsByMid(Mi_d)
team.createGroup(gName, semua)
team.createGroup(gName, semua)
team.createGroup(gName, semua)
team.createGroup(gName, semua)
team.createGroup(gName, semua)
team.createGroup(gName, semua)
elif "Rrecover" in msg.text:
if msg.from_ in owner:
thisgroup = kr.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
kr.createGroup("Rrecover", mi_d)
kr.sendText(msg.to,"Success recover")
elif msg.text in ["Cctv spin"]:
if msg.from_ in owner:
thisgroup = kr1.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
kr1.createGroup("Nah kan", mi_d)
kr1.createGroup("Nah kan", mi_d)
kr1.createGroup("Nah kan", mi_d)
kr1.createGroup("Nah kan", mi_d)
kr1.createGroup("Nah kan", mi_d)
kr1.createGroup("Nah kan", mi_d)
kr1.createGroup("Nah kan", mi_d)
kr1.createGroup("Nah kan", mi_d)
kr1.createGroup("Nah kan", mi_d)
kr1.createGroup("Nah kan", mi_d)
kr1.sendText(msg.to,"Success...!!!!")
#==========================[Kris]===========================
elif msg.text in ["Remove all chat"]:
if msg.from_ | |
<reponame>wantedly/recsys2020-challenge
from typing import List, Tuple
from google.cloud import bigquery, bigquery_storage_v1beta1
import pandas as pd
from base import BaseFeature, reduce_mem_usage
class BertSimilarityBetweenTweetAndEngagingSurfacingTweetVectorsFeature(BaseFeature):
# 使わない
def import_columns(self) -> List[str]:
...
def make_features(
self, df_train_input: pd.DataFrame, df_test_input: pd.DataFrame
) -> Tuple[pd.DataFrame, pd.DataFrame]:
...
def read_and_save_features(
self,
train_table_name: str,
test_table_name: str,
train_output_path: str,
test_output_path: str,
) -> None:
df_train_features = self._read_from_bigquery(train_table_name)
df_test_features = self._read_from_bigquery(test_table_name)
df_train_features.columns = f"{self.name}_" + df_train_features.columns
df_test_features.columns = f"{self.name}_" + df_test_features.columns
if self.save_memory:
self._logger.info("Reduce memory size - train data")
df_train_features = reduce_mem_usage(df_train_features)
self._logger.info("Reduce memory size - test data")
df_test_features = reduce_mem_usage(df_test_features)
self._logger.info(f"Saving features to {train_output_path}")
df_train_features.to_feather(train_output_path)
self._logger.info(f"Saving features to {test_output_path}")
df_test_features.to_feather(test_output_path)
def _read_from_bigquery(self, table_name: str) -> pd.DataFrame:
self._logger.info(f"Reading from {table_name}")
query = _QUERY.format(table_name=table_name)
if self.debugging:
query += " limit 10000"
bqclient = bigquery.Client(project=self.PROJECT_ID)
bqstorageclient = bigquery_storage_v1beta1.BigQueryStorageClient()
df = (
bqclient.query(query)
.result()
.to_dataframe(bqstorage_client=bqstorageclient)
)
return df
_QUERY = r"""
with surfacing_tweets as (
select tweet_id, engaging_user_id
from `recsys2020.training` t
group by tweet_id, engaging_user_id
),
user_surfacing_tweet_vectors as (
select
engaging_user_id as user_id,
avg(gap_0) as gap_0,
avg(gap_1) as gap_1,
avg(gap_2) as gap_2,
avg(gap_3) as gap_3,
avg(gap_4) as gap_4,
avg(gap_5) as gap_5,
avg(gap_6) as gap_6,
avg(gap_7) as gap_7,
avg(gap_8) as gap_8,
avg(gap_9) as gap_9,
avg(gap_10) as gap_10,
avg(gap_11) as gap_11,
avg(gap_12) as gap_12,
avg(gap_13) as gap_13,
avg(gap_14) as gap_14,
avg(gap_15) as gap_15,
avg(gap_16) as gap_16,
avg(gap_17) as gap_17,
avg(gap_18) as gap_18,
avg(gap_19) as gap_19,
avg(gap_20) as gap_20,
avg(gap_21) as gap_21,
avg(gap_22) as gap_22,
avg(gap_23) as gap_23,
avg(gap_24) as gap_24,
avg(gap_25) as gap_25,
avg(gap_26) as gap_26,
avg(gap_27) as gap_27,
avg(gap_28) as gap_28,
avg(gap_29) as gap_29,
avg(gap_30) as gap_30,
avg(gap_31) as gap_31,
avg(gap_32) as gap_32,
avg(gap_33) as gap_33,
avg(gap_34) as gap_34,
avg(gap_35) as gap_35,
avg(gap_36) as gap_36,
avg(gap_37) as gap_37,
avg(gap_38) as gap_38,
avg(gap_39) as gap_39,
avg(gap_40) as gap_40,
avg(gap_41) as gap_41,
avg(gap_42) as gap_42,
avg(gap_43) as gap_43,
avg(gap_44) as gap_44,
avg(gap_45) as gap_45,
avg(gap_46) as gap_46,
avg(gap_47) as gap_47,
avg(gap_48) as gap_48,
avg(gap_49) as gap_49,
avg(gap_50) as gap_50,
avg(gap_51) as gap_51,
avg(gap_52) as gap_52,
avg(gap_53) as gap_53,
avg(gap_54) as gap_54,
avg(gap_55) as gap_55,
avg(gap_56) as gap_56,
avg(gap_57) as gap_57,
avg(gap_58) as gap_58,
avg(gap_59) as gap_59,
avg(gap_60) as gap_60,
avg(gap_61) as gap_61,
avg(gap_62) as gap_62,
avg(gap_63) as gap_63,
avg(gap_64) as gap_64,
avg(gap_65) as gap_65,
avg(gap_66) as gap_66,
avg(gap_67) as gap_67,
avg(gap_68) as gap_68,
avg(gap_69) as gap_69,
avg(gap_70) as gap_70,
avg(gap_71) as gap_71,
avg(gap_72) as gap_72,
avg(gap_73) as gap_73,
avg(gap_74) as gap_74,
avg(gap_75) as gap_75,
avg(gap_76) as gap_76,
avg(gap_77) as gap_77,
avg(gap_78) as gap_78,
avg(gap_79) as gap_79,
avg(gap_80) as gap_80,
avg(gap_81) as gap_81,
avg(gap_82) as gap_82,
avg(gap_83) as gap_83,
avg(gap_84) as gap_84,
avg(gap_85) as gap_85,
avg(gap_86) as gap_86,
avg(gap_87) as gap_87,
avg(gap_88) as gap_88,
avg(gap_89) as gap_89,
avg(gap_90) as gap_90,
avg(gap_91) as gap_91,
avg(gap_92) as gap_92,
avg(gap_93) as gap_93,
avg(gap_94) as gap_94,
avg(gap_95) as gap_95,
avg(gap_96) as gap_96,
avg(gap_97) as gap_97,
avg(gap_98) as gap_98,
avg(gap_99) as gap_99,
avg(gap_100) as gap_100,
avg(gap_101) as gap_101,
avg(gap_102) as gap_102,
avg(gap_103) as gap_103,
avg(gap_104) as gap_104,
avg(gap_105) as gap_105,
avg(gap_106) as gap_106,
avg(gap_107) as gap_107,
avg(gap_108) as gap_108,
avg(gap_109) as gap_109,
avg(gap_110) as gap_110,
avg(gap_111) as gap_111,
avg(gap_112) as gap_112,
avg(gap_113) as gap_113,
avg(gap_114) as gap_114,
avg(gap_115) as gap_115,
avg(gap_116) as gap_116,
avg(gap_117) as gap_117,
avg(gap_118) as gap_118,
avg(gap_119) as gap_119,
avg(gap_120) as gap_120,
avg(gap_121) as gap_121,
avg(gap_122) as gap_122,
avg(gap_123) as gap_123,
avg(gap_124) as gap_124,
avg(gap_125) as gap_125,
avg(gap_126) as gap_126,
avg(gap_127) as gap_127,
avg(gap_128) as gap_128,
avg(gap_129) as gap_129,
avg(gap_130) as gap_130,
avg(gap_131) as gap_131,
avg(gap_132) as gap_132,
avg(gap_133) as gap_133,
avg(gap_134) as gap_134,
avg(gap_135) as gap_135,
avg(gap_136) as gap_136,
avg(gap_137) as gap_137,
avg(gap_138) as gap_138,
avg(gap_139) as gap_139,
avg(gap_140) as gap_140,
avg(gap_141) as gap_141,
avg(gap_142) as gap_142,
avg(gap_143) as gap_143,
avg(gap_144) as gap_144,
avg(gap_145) as gap_145,
avg(gap_146) as gap_146,
avg(gap_147) as gap_147,
avg(gap_148) as gap_148,
avg(gap_149) as gap_149,
avg(gap_150) as gap_150,
avg(gap_151) as gap_151,
avg(gap_152) as gap_152,
avg(gap_153) as gap_153,
avg(gap_154) as gap_154,
avg(gap_155) as gap_155,
avg(gap_156) as gap_156,
avg(gap_157) as gap_157,
avg(gap_158) as gap_158,
avg(gap_159) as gap_159,
avg(gap_160) as gap_160,
avg(gap_161) as gap_161,
avg(gap_162) as gap_162,
avg(gap_163) as gap_163,
avg(gap_164) as gap_164,
avg(gap_165) as gap_165,
avg(gap_166) as gap_166,
avg(gap_167) as gap_167,
avg(gap_168) as gap_168,
avg(gap_169) as gap_169,
avg(gap_170) as gap_170,
avg(gap_171) as gap_171,
avg(gap_172) as gap_172,
avg(gap_173) as gap_173,
avg(gap_174) as gap_174,
avg(gap_175) as gap_175,
avg(gap_176) as gap_176,
avg(gap_177) as gap_177,
avg(gap_178) as gap_178,
avg(gap_179) as gap_179,
avg(gap_180) as gap_180,
avg(gap_181) as gap_181,
avg(gap_182) as gap_182,
avg(gap_183) as gap_183,
avg(gap_184) as gap_184,
avg(gap_185) as gap_185,
avg(gap_186) as gap_186,
avg(gap_187) as gap_187,
avg(gap_188) as gap_188,
avg(gap_189) as gap_189,
avg(gap_190) as gap_190,
avg(gap_191) as gap_191,
avg(gap_192) as gap_192,
avg(gap_193) as gap_193,
avg(gap_194) as gap_194,
avg(gap_195) as gap_195,
avg(gap_196) as gap_196,
avg(gap_197) as gap_197,
avg(gap_198) as gap_198,
avg(gap_199) as gap_199,
avg(gap_200) as gap_200,
avg(gap_201) as gap_201,
avg(gap_202) as gap_202,
avg(gap_203) as gap_203,
avg(gap_204) as gap_204,
avg(gap_205) as gap_205,
avg(gap_206) as gap_206,
avg(gap_207) as gap_207,
avg(gap_208) as gap_208,
avg(gap_209) as gap_209,
avg(gap_210) as gap_210,
avg(gap_211) as gap_211,
avg(gap_212) as gap_212,
avg(gap_213) as gap_213,
avg(gap_214) as gap_214,
avg(gap_215) as gap_215,
avg(gap_216) as gap_216,
avg(gap_217) as gap_217,
avg(gap_218) as gap_218,
avg(gap_219) as gap_219,
avg(gap_220) as gap_220,
avg(gap_221) as gap_221,
avg(gap_222) as gap_222,
avg(gap_223) as gap_223,
avg(gap_224) as gap_224,
avg(gap_225) as gap_225,
avg(gap_226) as gap_226,
avg(gap_227) as gap_227,
avg(gap_228) as gap_228,
avg(gap_229) as gap_229,
avg(gap_230) as gap_230,
avg(gap_231) as gap_231,
avg(gap_232) as gap_232,
avg(gap_233) as gap_233,
avg(gap_234) as gap_234,
avg(gap_235) as gap_235,
avg(gap_236) as gap_236,
avg(gap_237) as gap_237,
avg(gap_238) as gap_238,
avg(gap_239) as gap_239,
avg(gap_240) as gap_240,
avg(gap_241) as gap_241,
avg(gap_242) as gap_242,
avg(gap_243) as gap_243,
avg(gap_244) as gap_244,
avg(gap_245) as gap_245,
avg(gap_246) as gap_246,
avg(gap_247) as gap_247,
avg(gap_248) as gap_248,
avg(gap_249) as gap_249,
avg(gap_250) as gap_250,
avg(gap_251) as gap_251,
avg(gap_252) as gap_252,
avg(gap_253) as gap_253,
avg(gap_254) as gap_254,
avg(gap_255) as gap_255,
avg(gap_256) as gap_256,
avg(gap_257) as gap_257,
avg(gap_258) as gap_258,
avg(gap_259) as gap_259,
avg(gap_260) as gap_260,
avg(gap_261) as gap_261,
avg(gap_262) as gap_262,
avg(gap_263) as gap_263,
avg(gap_264) as gap_264,
avg(gap_265) as gap_265,
avg(gap_266) as gap_266,
avg(gap_267) as gap_267,
avg(gap_268) as gap_268,
avg(gap_269) as gap_269,
avg(gap_270) as gap_270,
avg(gap_271) as gap_271,
avg(gap_272) as gap_272,
avg(gap_273) as gap_273,
avg(gap_274) as gap_274,
avg(gap_275) as gap_275,
avg(gap_276) as gap_276,
avg(gap_277) as gap_277,
avg(gap_278) as gap_278,
avg(gap_279) as gap_279,
avg(gap_280) as gap_280,
avg(gap_281) as gap_281,
avg(gap_282) as gap_282,
avg(gap_283) as gap_283,
avg(gap_284) as gap_284,
avg(gap_285) as gap_285,
avg(gap_286) as gap_286,
avg(gap_287) as gap_287,
avg(gap_288) as gap_288,
avg(gap_289) as gap_289,
avg(gap_290) as gap_290,
avg(gap_291) as gap_291,
avg(gap_292) as gap_292,
avg(gap_293) as gap_293,
avg(gap_294) as gap_294,
avg(gap_295) as gap_295,
avg(gap_296) as gap_296,
avg(gap_297) as gap_297,
avg(gap_298) as gap_298,
avg(gap_299) as gap_299,
avg(gap_300) as gap_300,
avg(gap_301) as gap_301,
avg(gap_302) as gap_302,
avg(gap_303) as gap_303,
avg(gap_304) as gap_304,
avg(gap_305) as gap_305,
avg(gap_306) as gap_306,
avg(gap_307) as gap_307,
avg(gap_308) as gap_308,
avg(gap_309) as gap_309,
avg(gap_310) as gap_310,
avg(gap_311) as gap_311,
avg(gap_312) as gap_312,
avg(gap_313) as gap_313,
avg(gap_314) as gap_314,
avg(gap_315) as gap_315,
avg(gap_316) as gap_316,
avg(gap_317) as gap_317,
avg(gap_318) as gap_318,
avg(gap_319) as gap_319,
avg(gap_320) as gap_320,
avg(gap_321) as gap_321,
avg(gap_322) as gap_322,
avg(gap_323) as gap_323,
avg(gap_324) as gap_324,
avg(gap_325) as gap_325,
avg(gap_326) as gap_326,
avg(gap_327) as gap_327,
avg(gap_328) as gap_328,
avg(gap_329) as gap_329,
avg(gap_330) as gap_330,
avg(gap_331) as gap_331,
avg(gap_332) as gap_332,
avg(gap_333) as gap_333,
avg(gap_334) as gap_334,
avg(gap_335) as gap_335,
avg(gap_336) as gap_336,
avg(gap_337) as gap_337,
avg(gap_338) as gap_338,
avg(gap_339) as gap_339,
avg(gap_340) as gap_340,
avg(gap_341) as gap_341,
avg(gap_342) as gap_342,
avg(gap_343) as gap_343,
avg(gap_344) as gap_344,
avg(gap_345) as gap_345,
avg(gap_346) as gap_346,
avg(gap_347) as gap_347,
avg(gap_348) as gap_348,
avg(gap_349) as gap_349,
avg(gap_350) as gap_350,
avg(gap_351) as gap_351,
avg(gap_352) as gap_352,
avg(gap_353) as gap_353,
avg(gap_354) as gap_354,
avg(gap_355) as gap_355,
avg(gap_356) as gap_356,
avg(gap_357) as gap_357,
avg(gap_358) as gap_358,
avg(gap_359) as gap_359,
avg(gap_360) as gap_360,
avg(gap_361) as gap_361,
avg(gap_362) as gap_362,
avg(gap_363) as gap_363,
avg(gap_364) as gap_364,
avg(gap_365) as gap_365,
avg(gap_366) as gap_366,
avg(gap_367) as gap_367,
avg(gap_368) as gap_368,
avg(gap_369) as gap_369,
avg(gap_370) as gap_370,
avg(gap_371) as gap_371,
avg(gap_372) as gap_372,
avg(gap_373) as gap_373,
avg(gap_374) as gap_374,
avg(gap_375) as gap_375,
avg(gap_376) | |
"""Tests for CCAPI's methods."""
import datetime
import json
import shutil
import tempfile
from decimal import Decimal
from pathlib import Path
from ccapi import CCAPI, NewOrderItem, VatRates, cc_objects, requests
from .. import test_data, test_requests
from .test_CCAPI_class import TestCCAPIMethod
class Test_get_sku_Method(TestCCAPIMethod):
"""Test the get_sku method of CCAPI."""
SKU = test_requests.TestProductOperations.SKU
RESPONSE = test_requests.TestProductOperations.GENERATE_SKU_RESPONSE
def setUp(self):
"""Register request URI."""
super().setUp()
self.register_request(requests.ProductOperations, json=self.RESPONSE)
def test_get_sku(self):
"""Test the get_sku method of CCAPI."""
sku = CCAPI.get_sku()
self.assertEqual(sku, self.SKU)
def test_get_range_sku(self):
"""Test the get_sku method of CCAPI."""
range_sku = CCAPI.get_sku(range_sku=True)
self.assertEqual(range_sku, "RNG_" + self.SKU)
class Test_create_product_Method(TestCCAPIMethod):
"""Test the create_product method of CCAPI."""
RANGE_ID = "4347654"
NAME = "Product Name"
BARCODE = "12345678912"
SKU = "WUA-DU7-W6W"
DESCRIPTION = "Product Description"
VAT_RATE = 20
CREATED_PRODUCT_ID = test_requests.TestAddProduct.CREATED_PRODUCT_ID
RESPONSE = test_requests.TestAddProduct.SUCCESSFUL_RESPONSE
def setUp(self):
"""Register request URI."""
super().setUp()
self.register_request(requests.AddProduct, text=self.RESPONSE)
class Test_create_product_MethodPassingSKU(Test_create_product_Method):
"""Test the create_product method when an SKU is passed."""
def setUp(self):
"""Make request."""
super().setUp()
CCAPI.create_product(
range_id=self.RANGE_ID,
name=self.NAME,
barcode=self.BARCODE,
sku=self.SKU,
description=self.DESCRIPTION,
vat_rate=self.VAT_RATE,
)
def test_sends_product_ID(self):
"""Test a product ID is sent."""
self.assertDataSent("ProdRangeID", self.RANGE_ID)
def test_sends_product_name(self):
"""Test a product name is sent."""
self.assertDataSent("ProdName", self.NAME)
def test_sends_barcode(self):
"""Test a barcode is sent."""
self.assertDataSent("BarCode", self.BARCODE)
def test_sends_SKU(self):
"""Test a SKU is sent."""
self.assertDataSent("SKUCode", self.SKU)
def test_sends_description(self):
"""Test a description is sent."""
self.assertDataSent("ProdDescription", self.DESCRIPTION)
def test_sends_VAT_rate_ID(self):
"""Test a VAT rate ID is sent."""
self.assertDataSent(
"VatRateID", VatRates.get_vat_rate_id_by_rate(self.VAT_RATE)
)
class Test_create_product_MethodWithoutPassingSKU(Test_create_product_Method):
"""Test the create_product method without passing a SKU."""
def setUp(self):
"""Register URI and make request."""
super().setUp()
self.register_request(
requests.ProductOperations, json=Test_get_sku_Method.RESPONSE
)
CCAPI.create_product(
range_id=self.RANGE_ID,
name=self.NAME,
barcode=self.BARCODE,
sku=None,
description=self.DESCRIPTION,
vat_rate=self.VAT_RATE,
)
def test_sends_request_for_new_SKU(self):
"""Test a request is sent for a new SKU if none is provided."""
self.assertRequestUsesRequestClassURI(
requests.ProductOperations, request=self.get_sent_request(skip=2)
)
def test_sends_generated_SKU(self):
"""Test create_product sends the new SKU."""
self.assertDataSent("SKUCode", Test_get_sku_Method.SKU)
class Test_create_product_MethodWithoutPassingADescription(Test_create_product_Method):
"""Test the create_product method without passing a description."""
def setUp(self):
"""Make request."""
super().setUp()
CCAPI.create_product(
range_id=self.RANGE_ID,
name=self.NAME,
barcode=self.BARCODE,
sku=self.SKU,
description=None,
vat_rate=self.VAT_RATE,
)
def test_create_product_without_description(self):
"""Test create_product handles description not being passed."""
self.assertDataSent("ProdDescription", self.NAME)
class Test_delete_product_factory_links_Method(TestCCAPIMethod):
"""Test the CCAPI.delete_product_factory_links method."""
RESPONSE = test_requests.TestDeleteAllProductFactoryLink.RESPONSE
FACTORY_ID = "11782"
def setUp(self):
"""Make test request."""
super().setUp()
self.register_request(requests.DeleteAllProductFactoryLink, text=self.RESPONSE)
CCAPI.delete_product_factory_links(self.FACTORY_ID)
def test_sends_correct_factory_ID(self):
"""Test the correct factory ID is sent."""
self.assertDataSent("FactoryID", self.FACTORY_ID)
class Test_delete_image_Method(TestCCAPIMethod):
"""Test the CCAPI.delete_image method."""
IMAGE_ID = "28173405"
RESPONSE = test_requests.TestDeleteImage.RESPONSE
def setUp(self):
"""Make test request."""
super().setUp()
self.register_request(requests.DeleteImage, text=self.RESPONSE)
CCAPI.delete_image(self.IMAGE_ID)
def test_sends_passed_image_ID(self):
"""Test the correct image ID is sent."""
self.assertDataSent("imgID", self.IMAGE_ID)
class Test_delete_product_factory_link_Method(TestCCAPIMethod):
"""Test the CCAPI.delete_product_factory_link method."""
FACTORY_ID = "3544350"
RESPONSE = test_requests.TestDeleteProductFactoryLink.RESPONSE
def setUp(self):
"""Make test request."""
super().setUp()
self.register_request(requests.DeleteProductFactoryLink, text=self.RESPONSE)
CCAPI.delete_product_factory_link(self.FACTORY_ID)
def test_sends_passed_factory_ID(self):
"""Test the passed factory ID is sent."""
self.assertDataSent("factoryLinkIds", self.FACTORY_ID)
class Test_search_products_Method(TestCCAPIMethod):
"""Test the CCAPI.search_products method."""
RESPONSE = test_requests.TestDoSearch.SUCCESSFUL_RESPONSE
SEARCH_TEXT = "WUA-DU7-W6W"
def setUp(self):
"""Make test request."""
super().setUp()
self.register_request(requests.DoSearch, json=self.RESPONSE)
self.products = CCAPI.search_products(self.SEARCH_TEXT)
def test_sends_correct_product_ID(self):
"""Test sends the correct productID."""
self.assertDataSent("text", self.SEARCH_TEXT)
def test_returns_list(self):
"""Test returns a list instance."""
self.assertIsInstance(self.products, list)
def test_search_result(self):
"""Test returned list contains object with the correct attributes."""
self.assertTrue(hasattr(self.products[0], "id"))
self.assertTrue(hasattr(self.products[0], "variation_id"))
self.assertTrue(hasattr(self.products[0], "name"))
self.assertTrue(hasattr(self.products[0], "sku"))
self.assertTrue(hasattr(self.products[0], "thumbnail"))
class Test_get_product_factory_links_Method(TestCCAPIMethod):
"""Test the CCAPI.get_product_factory_links method."""
PRODUCT_ID = 6_909_316
RESPONSE = [test_requests.TestFindProductFactoryLinks.RESPONSE]
def setUp(self):
"""Make test request."""
super().setUp()
self.register_request(requests.FindProductFactoryLinks, json=self.RESPONSE)
self.factories = CCAPI.get_product_factory_links(self.PRODUCT_ID)
def test_sends_passed_product_ID(self):
"""Test sends the passed product ID."""
self.assertDataSent("ProductID", self.PRODUCT_ID)
def test_returns_FactoryLinks_instance(self):
"""Test returns FactoryLinks instance."""
self.assertIsInstance(self.factories, cc_objects.FactoryLinks)
class Test_get_product_Method(TestCCAPIMethod):
"""Test the CCAPI.get_product method."""
PRODUCT_ID = 6_909_316
RESPONSE = test_data.FIND_PRODUCT_SELECTED_OPTIONS_ONLY_TEST_RESLULT
def setUp(self):
"""Make test request."""
super().setUp()
self.register_request(
requests.FindProductSelectedOptionsOnly, json=self.RESPONSE
)
self.product = CCAPI.get_product(self.PRODUCT_ID)
def test_get_product_sends_correct_product_ID(self):
"""Test CCAPI.get_product sends the passed product ID ."""
self.assertIsInstance(self.product, cc_objects.Product)
def test_get_product_returns_a_product(self):
"""Test CCAPI.get_product returns an cc_objects.Product."""
self.assertDataSent("ProductID", self.PRODUCT_ID)
class Test_get_options_for_product_Method(TestCCAPIMethod):
"""Test the CCAPI.get_options_for_product method."""
PRODUCT_ID = 6_909_316
RESPONSE = test_data.FIND_PRODUCT_SELECTED_OPTIONS_ONLY_TEST_RESLULT
def setUp(self):
"""Make test request."""
super().setUp()
self.register_request(
requests.FindProductSelectedOptionsOnly, json=self.RESPONSE
)
self.product = CCAPI.get_options_for_product(self.PRODUCT_ID)
def test_get_product_sends_correct_product_ID(self):
"""Test CCAPI.get_product sends the passed product ID ."""
self.assertIsInstance(self.product, cc_objects.ProductOptions)
def test_get_product_returns_a_product(self):
"""Test CCAPI.get_product returns an cc_objects.Product."""
self.assertDataSent("ProductID", self.PRODUCT_ID)
class Test_barcode_is_in_use_Method(TestCCAPIMethod):
"""Test the CCAPI.barcode_is_in_use method."""
RESPONSE = test_requests.TestProductBarcodeInUse.UNUSED_RESPONSE
BARCODE = "1321564981"
def setUp(self):
"""Make test request."""
super().setUp()
self.register_request(requests.ProductBarcodeInUse, json=self.RESPONSE)
self.barcode_used = CCAPI.barcode_is_in_use(barcode=self.BARCODE)
def test_passed_barcode_is_sent(self):
"""Test the correct barcode is sent."""
self.assertDataSent("BarcodeNumber", self.BARCODE)
def test_bool_is_returned(self):
"""Test that the method returns a boolean."""
self.assertIsInstance(self.barcode_used, bool)
class Test_set_product_barcode_Method(TestCCAPIMethod):
"""Test the CCAPI.set_product_barcode method."""
BARCODE_USED_RESPONSE = test_requests.TestProductBarcodeInUse.USED_RESPONSE
BARCODE_UNUSED_RESPONSE = test_requests.TestProductBarcodeInUse.UNUSED_RESPONSE
RESPONSE = test_requests.TestSaveBarcode.RESPONSE
BARCODE = "1321564981"
PRODUCT_ID = "123654"
def setUp(self):
"""Make test request."""
super().setUp()
self.register_request(
requests.ProductBarcodeInUse, json=self.BARCODE_UNUSED_RESPONSE
)
self.register_request(requests.SaveBarcode, text=self.RESPONSE)
CCAPI.set_product_barcode(barcode=self.BARCODE, product_id=self.PRODUCT_ID)
def test_uses_ProductBarcodeInUse_request(self):
"""Test set_product_barcode uses the ProductBarcodeInUse request."""
self.assertRequestUsesRequestClassURI(
requests.ProductBarcodeInUse, self.get_sent_request(skip=2)
)
def test_passed_barcode_is_sent(self):
"""Test the correct barcode is sent."""
self.assertDataSent("bcode", self.BARCODE)
def test_passed_product_ID_is_sent(self):
"""Test the passed product ID is sent."""
self.assertDataSent("prodid", self.PRODUCT_ID)
def test_raises_for_used_barcode(self):
"""Test exception is raised if barcode is in use."""
self.register_request(
requests.ProductBarcodeInUse, json=self.BARCODE_USED_RESPONSE
)
with self.assertRaises(Exception):
CCAPI.set_product_barcode(barcode=self.BARCODE, product_id=self.PRODUCT_ID)
class Test_set_product_description_Method(TestCCAPIMethod):
"""Test the CCAPI.set_product_description method."""
RESPONSE = test_requests.TestSaveDescription.RESPONSE
PRODUCT_IDS = ["123654", "6909316"]
DESCRIPTION = "A description of a product\n"
def setUp(self):
"""Make test request."""
super().setUp()
self.register_request(requests.SaveDescription, text=self.RESPONSE)
CCAPI.set_product_description(
product_ids=[self.PRODUCT_IDS], description=self.DESCRIPTION
)
def test_passed_product_ID_is_sent(self):
"""Test that the passed product IDs are sent."""
sent_data = self.get_sent_request_data()
for product_id in self.PRODUCT_IDS:
self.assertIn(product_id, str(sent_data["prodids"]))
def test_passed_description_is_sent(self):
"""Test that the passed description is sent."""
self.assertDataSent("desc", self.DESCRIPTION)
def test_passing_single_product_ID_as_string(self):
"""Test passing a single product ID as a string."""
CCAPI.set_product_description(
product_ids=self.PRODUCT_IDS[0], description=self.DESCRIPTION
)
sent_data = self.get_sent_request_data()
self.assertIn(self.PRODUCT_IDS[0], str(sent_data["prodids"]))
class Test_set_product_handling_time_Method(TestCCAPIMethod):
"""Test the CCAPI.set_product_handling_time method."""
RESPONSE = test_requests.TestSaveHandlingTime.RESPONSE
PRODUCT_ID = "6909316"
HANDLING_TIME = 1
def setUp(self):
"""Make test request."""
super().setUp()
self.register_request(requests.SaveHandlingTime, text=self.RESPONSE)
CCAPI.set_product_handling_time(
product_id=self.PRODUCT_ID, handling_time=self.HANDLING_TIME
)
def test_passed_product_ID_is_sent(self):
"""Test that the passed product ID is sent."""
self.assertDataSent("ProductIDs", self.PRODUCT_ID)
def test_passed_handling_time_is_sent(self):
"""Test that the passed handling_time is sent."""
self.assertDataSent("handlingTime", self.HANDLING_TIME)
class Test_set_product_name_Method(TestCCAPIMethod):
"""Test the CCAPI.set_product_name method."""
RESPONSE = test_requests.TestSaveProductName.RESPONSE
PRODUCT_IDS = ["123654", "6909316"]
NAME = "Product Name"
def setUp(self):
"""Make test request."""
super().setUp()
self.register_request(requests.SaveProductName, text=self.RESPONSE)
CCAPI.set_product_name(product_ids=[self.PRODUCT_IDS], name=self.NAME)
def test_passed_product_ID_is_sent(self):
"""Test that the passed product IDs are sent."""
sent_data = self.get_sent_request_data()
for product_id in self.PRODUCT_IDS:
self.assertIn(product_id, str(sent_data["prodids"]))
def test_passed_description_is_sent(self):
"""Test that the passed name is sent."""
self.assertDataSent("name", self.NAME)
def test_passing_single_product_ID_as_string(self):
"""Test passing a single product ID as a string."""
CCAPI.set_product_name(product_ids=self.PRODUCT_IDS[0], name=self.NAME)
sent_data = self.get_sent_request_data()
self.assertIn(self.PRODUCT_IDS[0], str(sent_data["prodids"]))
class Test_set_image_order_Method(TestCCAPIMethod):
"""Test the CCAPI.set_image_order method."""
RESPONSE = test_requests.TestSetImageOrder.RESPONSE
IMAGE_IDS = ["28179547", "28179563", "28179581"]
PRODUCT_ID = "6909316"
def setUp(self):
"""Make test request."""
super().setUp()
self.register_request(requests.SetImageOrder, text=self.RESPONSE)
CCAPI.set_image_order(product_id=self.PRODUCT_ID, image_ids=self.IMAGE_IDS)
def test_passed_product_ID_is_sent(self):
"""Test that the passed product ID is sent."""
self.assertDataSent("prodid", self.PRODUCT_ID)
def test_passed_handling_time_is_sent(self):
"""Test that the passed image IDs are sent."""
self.assertDataSent("order", "^^".join(self.IMAGE_IDS))
class Test_set_product_option_value_Method(TestCCAPIMethod):
"""Test the CCAPI.set_product_option_value method."""
RESPONSE = test_requests.TestSetProductOptionValue.RESPONSE
PRODUCT_IDS = ["123654", "6909316"]
OPTION_ID = "32131"
OPTION_VALUE_ID = "3040649"
def setUp(self):
"""Make test request."""
super().setUp()
self.register_request(requests.SetProductOptionValue, text=self.RESPONSE)
CCAPI.set_product_option_value(
product_ids=self.PRODUCT_IDS,
option_id=self.OPTION_ID,
option_value_id=self.OPTION_VALUE_ID,
)
def test_passed_product_ID_is_sent(self):
"""Test that the passed product IDs are sent."""
sent_data = self.get_sent_request_data()
for product_id in self.PRODUCT_IDS:
self.assertIn(product_id, str(sent_data["prodids"]))
def test_passed_option_ID_is_sent(self):
"""Test that the passed option ID is sent."""
self.assertDataSent("OptionID", self.OPTION_ID)
def test_passed_option_value_is_sent(self):
"""Test that the passed option value is sent."""
self.assertDataSent("OptionValueID", self.OPTION_VALUE_ID)
def test_passing_single_product_ID_as_string(self):
"""Test passing a single product ID as a string."""
CCAPI.set_product_option_value(
product_ids=self.PRODUCT_IDS[0],
option_id=self.OPTION_ID,
option_value_id=self.OPTION_VALUE_ID,
)
sent_data = self.get_sent_request_data()
self.assertIn(self.PRODUCT_IDS[0], str(sent_data["prodids"]))
class Test_set_product_scope_Method(TestCCAPIMethod):
"""Test the CCAPI.set_product_scope method."""
RESPONSE = test_requests.TestSetProductScope.RESPONSE
PRODUCT_ID = "6909316"
WEIGHT = 50
HEIGHT = 25
LENGTH = 75
WIDTH = 90
LARGE_LETTER_COMPATIBLE = False
EXTERNAL_ID = "165481035"
def setUp(self):
"""Make test request."""
super().setUp()
self.register_request(requests.SetProductScope, text=self.RESPONSE)
CCAPI.set_product_scope(
product_id=self.PRODUCT_ID,
weight=self.WEIGHT,
height=self.HEIGHT,
length=self.LENGTH,
width=self.WIDTH,
large_letter_compatible=self.LARGE_LETTER_COMPATIBLE,
external_id=self.EXTERNAL_ID,
)
def test_product_ID_is_sent(self):
"""Test the passed product ID is sent."""
self.assertDataSent("ProductID", self.PRODUCT_ID)
def test_weight_is_sent(self):
"""Test the passed weight is sent."""
self.assertDataSent("Weight", self.WEIGHT)
def test_height_is_sent(self):
"""Test the passed height is sent."""
self.assertDataSent("Height", self.HEIGHT)
def test_length_is_sent(self):
"""Test the passed length is sent."""
self.assertDataSent("Length", self.LENGTH)
def test_width_is_sent(self):
"""Test the passed width is sent."""
self.assertDataSent("Width", self.WIDTH)
def test_large_letter_compatible_is_sent(self):
"""Test the passed large letter compatibilty is sent."""
self.assertDataSent("LargeLetterCompatible", int(self.LARGE_LETTER_COMPATIBLE))
def test_external_ID_is_sent(self):
"""Test the passed external ID is sent."""
self.assertDataSent("ExternalID", self.EXTERNAL_ID)
def test_external_ID_None(self):
"""Test no external ID is sent when None is passed."""
CCAPI.set_product_scope(
product_id=self.PRODUCT_ID,
weight=self.WEIGHT,
height=self.HEIGHT,
length=self.LENGTH,
width=self.WIDTH,
large_letter_compatible=self.LARGE_LETTER_COMPATIBLE,
external_id=None,
)
self.assertDataValueIsNone("ExternalID")
class Test_set_product_base_price_Method(TestCCAPIMethod):
"""Test the CCAPI.set_product_base_price method."""
RESPONSE = test_requests.TestUpdateProductBasePrice.RESPONSE
PRODUCT_ID = "6909316"
PRICE = "6.25"
def setUp(self):
"""Make test request."""
super().setUp()
self.register_request(requests.UpdateProductBasePrice, text=self.RESPONSE)
CCAPI.set_product_base_price(product_id=self.PRODUCT_ID, price=self.PRICE)
def test_passed_product_ID_is_sent(self):
"""Test the passed product ID is sent."""
self.assertDataSent("ProductIDs", [self.PRODUCT_ID])
def test_passed_price_is_sent(self):
"""Test the passed price is sent."""
self.assertDataSent("price", self.PRICE)
class Test_update_product_stock_level_Method(TestCCAPIMethod):
"""Test the CCAPI.update_product_stock_level method."""
RESPONSE = test_requests.TestUpdateProductStockLevel.RESPONSE
PRODUCT_ID = "6909316"
NEW_STOCK_LEVEL = 5
OLD_STOCK_LEVEL = 10
def setUp(self):
"""Make test request."""
super().setUp()
self.register_request(requests.UpdateProductStockLevel, text=self.RESPONSE)
CCAPI.update_product_stock_level(
product_id=self.PRODUCT_ID,
new_stock_level=self.NEW_STOCK_LEVEL,
old_stock_level=self.OLD_STOCK_LEVEL,
)
def test_passed_product_ID_is_sent(self):
"""Test the passed product ID is sent."""
| |
__all__ = ['query']
import os
import time
import pandas as pd
from pathlib import Path
from datetime import datetime
from factiva.core import const
from factiva.news.bulknews import BulkNewsBase, BulkNewsJob
from .query import SnapshotQuery
class Snapshot(BulkNewsBase):
"""
Class that represents a Factiva Snapshot.
Parameters
----------
api_user : str or APIKeyUser
String containing the 32-character long APi Key. If not provided, the
constructor will try to obtain its value from the FACTIVA_APIKEY
environment variable.
request_userinfo : boolean, optional (Default: False)
Indicates if user data has to be pulled from the server. This operation
fills account detail properties along with maximum, used and remaining
values. It may take several seconds to complete.
query : str or SnapshotQuery, optional
Query used to run any of the Snapshot-related operations. If a str is
provided, a simple query with a `where` clause is created. If other
query fields are required, either provide the SnapshotQuery object at
creation, or set the appropriate object values after creation. This
parameter is not compatible with snapshot_id.
snapshot_id: str, optional
String containing the 10-character long Snapshot ID. This parameter is
not compatible with query.
See Also
--------
Stream: Class that represents the continuous Factiva News document stream.
Examples
--------
Creating a new Snapshot with an key string and a Where statement. Then,
running a full Explain process.
>>> from factiva.news.snapshot import Snapshot
>>> my_key = "<KEY>"
>>> my_query = "publication_datetime >= '2020-01-01 00:00:00' AND LOWER(language_code) = 'en'"
>>> my_snapshot = Snapshot(api_key=my_key, query=my_query)
>>> my_snapshot.process_explain()
106535
Creating a new Snapshot from an APIKeyUser and a SnapshotQuery instances.
Then, running a full Analytics process.
>>> my_user = APIKeyUser()
>>> my_query = SnapshotQuery("publication_datetime >= '2020-01-01 00:00:00' AND LOWER(language_code) = 'en'")
>>> my_query.frequency = 'YEAR'
>>> my_query.group_by_source_code = True
>>> my_query.top = 20
>>> my_snapshot = Snapshot(api_user=my_user, query=my_query)
>>> analytics_df = my_snapshot.process_analytics()
>>> analytics_df.head()
count publication_datetime source_code
0 20921 1995 NGCIOS
1 20371 1995 LATAM
2 18303 1995 REUTES
3 10593 1995 EXPNSI
4 4212 1995 MUNDO
"""
query = None
folder_path = ''
file_format = ''
file_list = []
news_data = None
last_explain_job = None
last_analytics_job = None
last_extraction_job = None
def __init__(
self,
api_user=None,
request_userinfo=False,
query=None,
snapshot_id=None
):
super().__init__(api_user=api_user, request_userinfo=request_userinfo)
self.last_explain_job = ExplainJob()
self.last_analytics_job = AnalyticsJob()
if query and snapshot_id:
raise Exception("The query and snapshot_id parameters cannot be set simultaneously")
if query:
if type(query) == SnapshotQuery:
self.query = query
elif type(query) == str:
self.query = SnapshotQuery(query)
else:
raise ValueError("Unexpected value for the query-where clause")
self.last_extraction_job = ExtractionJob()
if snapshot_id:
self.query = SnapshotQuery('')
self.last_extraction_job = ExtractionJob(snapshot_id=snapshot_id, api_key=self.api_user.api_key)
self.get_extraction_job_results()
def submit_explain_job(self):
"""
Submits an Explain job to the Factiva Snapshots API, using the
assigned user in the `api_user`, and SnapshotQuery in the `query`
properties.
Returns
-------
Boolean : True if the submission was successful. An Exception otherwise.
"""
explain_url = f'{const.API_HOST}{const.API_SNAPSHOTS_BASEPATH}{const.API_EXPLAIN_SUFFIX}'
self.last_explain_job.submitted_datetime = datetime.now()
response = self.submit_job(endpoint_url=explain_url, payload=self.query.get_explain_query())
if response.status_code == 201:
self.last_explain_job.job_id = response.json()['data']['id']
self.last_explain_job.job_state = response.json()['data']['attributes']['current_state']
self.last_explain_job.link = response.json()['links']['self']
else: # TODO: Add InvalidQuery, invalid syntax error which returns a 400
raise RuntimeError('API Request returned an unexpected HTTP status')
return True
def get_explain_job_results(self):
"""
Obtains the Explain job results from the Factiva Snapshots API. Results
are stored in the `last_explain_job` class property.
Returns
-------
Boolean : True if the data was retrieved successfully. An Exception
otherwise.
"""
if self.last_explain_job.link == '':
raise RuntimeError('Explain job has not yet been submitted or job ID was not set')
response = self.get_job_results(self.last_explain_job.link)
if response.status_code == 200:
self.last_explain_job.job_state = response.json()['data']['attributes']['current_state']
if self.last_explain_job.job_state == 'JOB_STATE_DONE':
self.last_explain_job.document_volume = response.json()['data']['attributes']['counts']
else:
raise RuntimeError('API request returned an unexpected HTTP status')
return True
def process_explain(self):
"""
Submits an Explain job to the Factiva Snapshots API, using the same
parameters used by `submit_explain_job`. Then, monitors the job until
its status change to `JOB_STATE_DONE`. Finally, retrieves and stores
the results in the property `last_explain_job`.
Returns
-------
Boolean : True if the explain processing was successful. An Exception
otherwise.
"""
self.submit_explain_job()
self.get_explain_job_results()
while(True):
if self.last_explain_job.job_state not in const.API_JOB_EXPECTED_STATES:
raise RuntimeError('Unexpected explain job state')
if self.last_explain_job.job_state == 'JOB_STATE_DONE':
break
elif self.last_explain_job.job_state == 'JOB_STATE_FAILED':
raise Exception('Explain job failed')
else:
time.sleep(const.API_JOB_ACTIVE_WAIT_SPACING)
self.get_explain_job_results()
return True
def submit_analytics_job(self):
"""
Submits an Analytics job to the Factiva Snapshots API, using the
assigned user in the `api_user`, and SnapshotQuery in the `query`
properties.
Returns
-------
Boolean : True if the submission was successful. An Exception otherwise.
"""
analytics_url = f'{const.API_HOST}{const.API_ANALYTICS_BASEPATH}'
self.last_analytics_job.submitted_datetime = datetime.now()
response = self.submit_job(endpoint_url=analytics_url, payload=self.query.get_analytics_query())
if response.status_code == 201:
self.last_analytics_job.job_id = response.json()['data']['id']
self.last_analytics_job.job_state = response.json()['data']['attributes']['current_state']
self.last_analytics_job.link = response.json()['links']['self']
else: # TODO: Add InvalidQuery, invalid syntax error which returns a 400
raise RuntimeError('API Request returned an unexpected HTTP status')
return True
def get_analytics_job_results(self):
"""
Obtains the Analytics job results from the Factiva Snapshots API.
Results are stored in the `last_analytics_job` class property.
Returns
-------
Boolean : True if the data was retrieved successfully. An Exception
otherwise.
"""
if self.last_analytics_job.link == '':
raise RuntimeError('Analytics job has not yet been submitted or job ID was not set')
response = self.get_job_results(self.last_analytics_job.link)
if response.status_code == 200:
self.last_analytics_job.job_state = response.json()['data']['attributes']['current_state']
if self.last_analytics_job.job_state == 'JOB_STATE_DONE':
self.last_analytics_job.data = pd.DataFrame(response.json()['data']['attributes']['results'])
else:
raise RuntimeError('API request returned an unexpected HTTP status')
return True
def process_analytics(self):
"""
Submits an Analytics job to the Factiva Snapshots API, using the same
parameters used by `submit_analyttics_job`. Then, monitors the job until
its status change to `JOB_STATE_DONE`. Finally, retrieves and stores
the results in the property `last_analytics_job`.
Returns
-------
Boolean : True if the analytics processing was successful. An Exception
otherwise.
"""
self.submit_analytics_job()
self.get_analytics_job_results()
while(True):
if self.last_analytics_job.job_state not in const.API_JOB_EXPECTED_STATES:
raise RuntimeError('Unexpected analytics job state')
if self.last_analytics_job.job_state == 'JOB_STATE_DONE':
break
elif self.last_analytics_job.job_state == 'JOB_STATE_FAILED':
raise Exception('Analytics job failed')
else:
time.sleep(const.API_JOB_ACTIVE_WAIT_SPACING)
self.get_analytics_job_results()
return True
def submit_extraction_job(self):
"""
Submits an Extraction job to the Factiva Snapshots API, using the
assigned user in the `api_user`, and SnapshotQuery in the `query`
properties.
Returns
-------
Boolean : True if the submission was successful. An Exception otherwise.
"""
extraction_url = f'{const.API_HOST}{const.API_SNAPSHOTS_BASEPATH}'
self.last_extraction_job.submitted_datetime = datetime.now()
response = self.submit_job(endpoint_url=extraction_url, payload=self.query.get_extraction_query())
if response.status_code == 201:
self.last_extraction_job.job_id = response.json()['data']['id'].split('-')[-1]
self.last_extraction_job.job_state = response.json()['data']['attributes']['current_state']
self.last_extraction_job.link = response.json()['links']['self']
else: # TODO: Add InvalidQuery, invalid syntax error which returns a 400
raise RuntimeError('API Request returned an unexpected HTTP status')
return True
def get_extraction_job_results(self):
"""
Obtains the Extraction job results from the Factiva Snapshots API.
Results are stored in the `last_extraction_job` class property.
Returns
-------
Boolean : True if the data was retrieved successfully. An Exception
otherwise.
"""
if self.last_extraction_job.link == '':
raise RuntimeError('Extraction job has not yet been submitted or job ID was not set.')
response = self.get_job_results(self.last_extraction_job.link)
if response.status_code == 200:
self.last_extraction_job.job_state = response.json()['data']['attributes']['current_state']
if self.last_extraction_job.job_state == 'JOB_STATE_DONE':
self.last_extraction_job.file_format = response.json()['data']['attributes']['format']
file_list = response.json()['data']['attributes']['files']
self.last_extraction_job.files = []
for file_item in file_list:
self.last_extraction_job.files.append(file_item['uri'])
elif response.status_code == 404:
raise RuntimeError('Job ID does not exist.')
else:
det_text = response.text
raise RuntimeError(f'API request returned an unexpected HTTP status, with content [{det_text}]')
return True
def download_extraction_files(self):
"""
Downloads the list of files listed in the Snapshot.last_extraction_job.files
property, and store them in a folder with the same name as the snapshot ID.
Returns
-------
Boolean : True if the files were correctly downloaded, False otherwise.
"""
# TODO: Accept an alternative location to store the downloaded files.
base_path = os.path.join(os.getcwd(), self.last_extraction_job.job_id)
Path(base_path).mkdir(parents=True, exist_ok=True)
if len(self.last_extraction_job.files) > 0:
for file_uri in self.last_extraction_job.files:
file_name = file_uri.split('/')[-1]
local_path = f'{base_path}/{file_name}'
self.download_file(file_uri, local_path)
return True
def process_extraction(self):
"""
Submits an Extraction job to the Factiva Snapshots API, using the same
parameters used by `submit_extraction_job`. Then, monitors the job until
its status change to `JOB_STATE_DONE`. The final status is retrieved
and stored in the property `last_extraction_job`, which among other
properties, contains the list of files to download. The process then
downloads all files to a subfolder named equal to the `short_id`
property. Finally, the process ends after all files are downloaded.
Because the whole processing takes places in a single call, it's
expected that the execution of this operation takes several
minutes, or even hours.
Returns
-------
Boolean : True if the extraction processing was successful. An Exception
otherwise.
"""
self.submit_extraction_job()
self.get_extraction_job_results()
while(True):
if self.last_extraction_job.job_state not | |
<filename>confluent_server/confluent/config/attributes.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 IBM Corporation
# Copyright 2015-2019 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#This defines the attributes of various classes of things
# 'nic', meant to be a nested structure under node
# changing mind on design, flattening to a single attribute, a *touch* less
# flexible at the top end, but much easier on the low end
# now net.<name>.attribute scheme
# similarly, leaning toward comma delimited ip addresses, since 99.99% of the
# time each nic will have one ip address
# vlan specification will need to be thought about a tad, each ip could be on
# a distinct vlan, but could have a vlan without an ip for sake of putting
# to a bridge. Current thought is
# vlans attribute would be comma delimited referring to the same index
# as addresses, with either 'native' or a number for vlan id
# the 'joinbridge' attribute would have some syntax like @<vlanid> to indicate
# joining only a vlan of the nic to the bridge
# 'joinbond' attribute would not support vlans.
#nic = {
# 'name': {
# 'description': 'Name in ip/ifconfig as desired by administrator',
# },
# 'biosdevname': {
# 'description': '"biosdevname" scheme to identify the adapter. If not'
# 'mac address match is preferred, then biosdevname, then'
# 'name.',
# },
# 'port': {
# 'description': 'Port that this nic connects to',
# },
# 'switch': {
# 'description': 'Switch that this nic connects to',
# },
# 'customhardwareaddress': {
# 'description': 'Mac address to push to nic',
# },
# 'dnssuffix': {
# 'description': ('String to place after nodename, but before'
# 'Network.Domain to derive FQDN for this NIC'),
# },
# 'hardwareaddress': {
# 'description': 'Active mac address on this nic (factory or custom)'
# },
# 'ipaddresses': {
# 'description': 'Set of IPv4 and IPv6 addresses in CIDR format'
# },
# 'pvid': {
# 'description': 'PVID of port on switch this nic connects to',
# },
# 'mtu': {
# 'description': 'Requested MTU to configure on this interface',
# },
# 'vlans': {
# 'description': 'Tagged VLANs to apply to nic/switch',
# },
# 'dhcpv4enabled': {
# 'description': ('Whether DHCP should be attempted to acquire IPv4'
# 'address on this interface'),
# },
# 'dhcpv6enabled': {
# 'description': ('Whether DHCP should be attempted to acquire IPv6'
# 'address on this interface'),
# },
#}
user = {
'password': {
'description': 'The passphrase used to authenticate this user'
},
}
# 'node', which can be considered a 'system' or a 'vm'
node = {
'groups': {
'type': list,
'description': ('List of static groups for which this node is '
'considered a member'),
},
'type': {
'description': ('Classification of node as server or switch'),
'validvalues': ('switch', 'server'),
},
'crypted.rootpassword': {
'description': 'The password of the local root password. '
'This is stored as a non-recoverable hash. If '
'unspecified and confluent is used to deploy, then '
'login at console using password will be impossible '
'and only key based login can work for root.',
},
'crypted.grubpassword': {
'description': 'Password required to modify grub behavior at boot',
},
'crypted.selfapikey': {
'description': ('Crypt of api key for self api requests by node'),
},
'deployment.encryptboot': {
'description': ('Specify a strategy for encrypting the volume. Support '
'for this setting is currently only enabled for '
'RedHat 8 and CentOS 8 profiles. If blank or unset, '
'no encryption is done. If set to "tpm2" then the OS '
'will freely decrypt so long as the same '
'Trusted Platform Module is available to decrypt the '
'volume. Note that versions earlier than 8.2 may malfunction '
'at boot time if this feature is attempted, depending on configuration.'),
'validvalues': ('tpm2', 'none', ''),
},
'deployment.apiarmed': {
'description': ('Indicates whether the node authentication token interface '
'is armed. If set to once, it will grant only the next '
'request. If set to continuous, will allow many requests.'
'Should not be set unless an OS deployment is pending. '
'Generally this is not directly modified, but is modified '
'by the "nodedeploy" command'),
},
'deployment.sealedapikey': {
'description': 'This attribute is used by some images to save a sealed '
'version of a node apikey, so that a subsequent run with '
'same TPM2 will use the TPM2 to protect the API key rather '
'than local network verification. If this is set, then '
'an api key request will receive this if the api key grant '
'is not armed',
},
#'id': {
# 'description': ('Numeric identifier for node')
#},
# autonode is the feature of generating nodes based on connectivity to
# current node. In recursive autonode, for now we just allow endpoint to
# either be a server directly *or* a server enclosure. This precludes
# for the moment a concept of nested arbitrarily deep, but for now do this.
# hypothetically, one could imagine supporting an array and 'popping'
# names until reaching end. Not worth implementing at this point. If
# a traditional switch is added, it needs some care and feeding anyway.
# If a more exciting scheme presents itself, well we won't have to
# # own discovering switches anyway.
# 'autonode.servername': {
# 'description': ('Template for creating nodenames for automatic '
# 'creation of nodes detected as children of '
# 'this node. For example, a node in a server '
# 'enclosure bay or a server connected to a switch or '
# 'an enclosure manager connected to a switch. Certain '
# 'special template parameters are available and can '
# 'be used alongside usual config template directives. '
# '"discovered.nodenumber" will be replaced with the '
# 'bay or port number where the child node is connected.'
# ),
# },
# 'autonode.servergroups': {
# 'type': list,
# 'description': ('A list of groups to which discovered nodes will '
# 'belong to. As in autonode.servername, "discovered." '
# 'variable names will be substituted in special context')
# },
# 'autonode.enclosurename': {
# 'description': ('Template for creating nodenames when the discovered '
# 'node is an enclosure that will in turn generate nodes.'
# )
# },
# 'autonode.enclosuregroups': {
# 'type': list,
# 'description': ('A list of groups to which a discovered node will be'
# 'placed, presuming that node is an enclosure.')
# },
#For now, we consider this eventuality if needed. For now emphasize paradigm
# of group membership and see how far that goes.
# 'autonode.copyattribs': {
# 'type': list,
# 'description': ('A list of attributes to copy from the node generator '
# 'to the generated node. Expressions will be copied '
# 'over without evaluation, so will be evaluated '
# 'in the context of the generated node, rather than the'
# 'parent node. By default, an enclosure will copy over'
# 'autonode.servername, so that would not need to be '
# 'copied ')
# },
# 'collective.allowedmanagers': {
# 'description': ('Restricted set of deployment and managers in automatic selectien
# },
# ssh.equivnodes - control the list of nodes that go into equiv...
'collective.manager': {
'description': ('When in collective mode, the member of the '
'collective currently considered to be responsible '
'for this node. At a future date, this may be '
'modified automatically if another attribute '
'indicates candidate managers, either for '
'high availability or load balancing purposes.')
},
'collective.managercandidates': {
'description': ('A noderange of nodes permitted to be a manager for '
'the node. This controls failover and deployment. If '
'not defined, all managers may deploy and no '
'automatic failover will be performed. '
'Using this requires that collective members be '
'defined as nodes for noderange expansion')
},
'deployment.pendingprofile': {
'description': ('An OS profile that is pending deployment. This indicates to '
'the network boot subsystem what should be offered when a potential '
'network boot request comes in')
},
'deployment.stagedprofile': {
'description': ('A profile | |
<filename>sktime/classification/dictionary_based/_tde.py
# -*- coding: utf-8 -*-
"""TDE classifiers.
Dictionary based TDE classifiers based on SFA transform. Contains a single
IndividualTDE and TDE.
"""
__author__ = ["MatthewMiddlehurst"]
__all__ = ["TemporalDictionaryEnsemble", "IndividualTDE", "histogram_intersection"]
import math
import time
import warnings
from collections import defaultdict
import numpy as np
from joblib import Parallel, delayed
from numba import njit, types
from numba.typed import Dict
from sklearn import preprocessing
from sklearn.kernel_ridge import KernelRidge
from sklearn.utils import check_random_state
from sktime.classification.base import BaseClassifier
from sktime.transformations.panel.dictionary_based import SFA
from sktime.utils.validation.panel import check_X_y
class TemporalDictionaryEnsemble(BaseClassifier):
"""Temporal Dictionary Ensemble (TDE).
Implementation of the dictionary based Temporal Dictionary Ensemble as described
in [1]_.
Overview: Input "n" series length "m" with "d" dimensions
TDE searches "k" parameter values selected using a Gaussian processes
regressor, evaluating each with a LOOCV. It then retains "s"
ensemble members.
There are six primary parameters for individual classifiers:
- alpha: alphabet size
- w: window length
- l: word length
- p: normalise/no normalise
- h: levels
- b: MCB/IGB
For any combination, an individual TDE classifier slides a window of
length w along the series. The w length window is shortened to
an l length word through taking a Fourier transform and keeping the
first l/2 complex coefficients. These lcoefficients are then discretised
into alpha possible values, to form a word length l using breakpoints
found using b. A histogram of words for each series is formed and stored,
using a spatial pyramid of h levels. For multivariate series, accuracy
from a reduced histogram is used to select dimensions.
fit involves finding n histograms.
predict uses 1 nearest neighbour with a the histogram intersection
distance function.
Parameters
----------
n_parameter_samples : int, default=250
Number of parameter combinations to consider for the final ensemble.
max_ensemble_size : int, default=50
Maximum number of estimators in the ensemble.
max_win_len_prop : float, default=1
Maximum window length as a proportion of series length, must be between 0 and 1.
min_window : int, default=10
Minimum window length.
randomly_selected_params: int, default=50
Number of parameters randomly selected before the Gaussian process parameter
selection is used.
bigrams : boolean or None, default=None
Whether to use bigrams, defaults to true for univariate data and false for
multivariate data.
dim_threshold : float, default=0.85
Dimension accuracy threshold for multivariate data, must be between 0 and 1.
max_dims : int, default=20
Max number of dimensions per classifier for multivariate data.
time_limit_in_minutes : int, default=0
Time contract to limit build time in minutes, overriding n_parameter_samples.
Default of 0 means n_parameter_samples is used.
contract_max_n_parameter_samples : int, default=np.inf
Max number of parameter combinations to consider when time_limit_in_minutes is
set.
typed_dict : bool, default=True
Use a numba typed Dict to store word counts. May increase memory usage, but will
be faster for larger datasets. As the Dict cannot be pickled currently, there
will be some overhead converting it to a python dict with multiple threads and
pickling.
save_train_predictions : bool, default=False
Save the ensemble member train predictions in fit for use in _get_train_probs
leave-one-out cross-validation.
n_jobs : int, default=1
The number of jobs to run in parallel for both `fit` and `predict`.
``-1`` means using all processors.
random_state : int or None, default=None
Seed for random number generation.
Attributes
----------
n_classes_ : int
The number of classes.
classes_ : list
The classes labels.
n_instances_ : int
The number of train cases.
n_dims_ : int
The number of dimensions per case.
series_length_ : int
The length of each series.
n_estimators_ : int
The final number of classifiers used (<= max_ensemble_size)
estimators_ : list of shape (n_estimators) of IndividualTDE
The collections of estimators trained in fit.
weights_ : list of shape (n_estimators) of float
Weight of each estimator in the ensemble.
See Also
--------
IndividualTDE, ContractableBOSS
Notes
-----
For the Java version, see
`TSML <https://github.com/uea-machine-learning/tsml/blob/master/src/main/java/
tsml/classifiers/dictionary_based/TDE.java>`_.
References
----------
.. [1] <NAME>, <NAME>, <NAME> and <NAME>
"The Temporal Dictionary Ensemble (TDE) Classifier for Time Series
Classification", in proceedings of the European Conference on Machine Learning
and Principles and Practice of Knowledge Discovery in Databases, 2020.
Examples
--------
>>> from sktime.classification.dictionary_based import TemporalDictionaryEnsemble
>>> from sktime.datasets import load_unit_test
>>> X_train, y_train = load_unit_test(split="train", return_X_y=True)
>>> X_test, y_test = load_unit_test(split="test", return_X_y=True)
>>> clf = TemporalDictionaryEnsemble(
... n_parameter_samples=10,
... max_ensemble_size=5,
... randomly_selected_params=5,
... )
>>> clf.fit(X_train, y_train)
TemporalDictionaryEnsemble(...)
>>> y_pred = clf.predict(X_test)
"""
_tags = {
"capability:multivariate": True,
"capability:train_estimate": True,
"capability:contractable": True,
"capability:multithreading": True,
"classifier_type": "dictionary",
}
def __init__(
self,
n_parameter_samples=250,
max_ensemble_size=50,
max_win_len_prop=1,
min_window=10,
randomly_selected_params=50,
bigrams=None,
dim_threshold=0.85,
max_dims=20,
time_limit_in_minutes=0.0,
contract_max_n_parameter_samples=np.inf,
typed_dict=True,
save_train_predictions=False,
n_jobs=1,
random_state=None,
):
self.n_parameter_samples = n_parameter_samples
self.max_ensemble_size = max_ensemble_size
self.max_win_len_prop = max_win_len_prop
self.min_window = min_window
self.randomly_selected_params = randomly_selected_params
self.bigrams = bigrams
# multivariate
self.dim_threshold = dim_threshold
self.max_dims = max_dims
self.time_limit_in_minutes = time_limit_in_minutes
self.contract_max_n_parameter_samples = contract_max_n_parameter_samples
self.typed_dict = typed_dict
self.save_train_predictions = save_train_predictions
self.random_state = random_state
self.n_jobs = n_jobs
self.n_instances_ = 0
self.n_dims_ = 0
self.series_length_ = 0
self.n_estimators_ = 0
self.estimators_ = []
self.weights_ = []
self._word_lengths = [16, 14, 12, 10, 8]
self._norm_options = [True, False]
self._levels = [1, 2, 3]
self._igb_options = [True, False]
self._alphabet_size = 4
self._weight_sum = 0
self._prev_parameters_x = []
self._prev_parameters_y = []
super(TemporalDictionaryEnsemble, self).__init__()
def _fit(self, X, y):
"""Fit an ensemble on cases (X,y), where y is the target variable.
Build an ensemble of base TDE classifiers from the training set (X,
y), through an optimised selection over the para space to make a fixed size
ensemble of the best.
Parameters
----------
X : 3D np.array of shape = [n_instances, n_dimensions, series_length]
The training data.
y : array-like, shape = [n_instances]
The class labels.
Returns
-------
self :
Reference to self.
Notes
-----
Changes state by creating a fitted model that updates attributes
ending in "_" and sets is_fitted flag to True.
"""
if self.n_parameter_samples <= self.randomly_selected_params:
warnings.warn(
"TDE Warning: n_parameter_samples <= randomly_selected_params, "
+ "ensemble member parameters will be fully randomly selected."
)
self.n_instances_, self.n_dims_, self.series_length_ = X.shape
self.estimators_ = []
self.weights_ = []
self._prev_parameters_x = []
self._prev_parameters_y = []
# Window length parameter space dependent on series length
max_window_searches = self.series_length_ / 4
max_window = int(self.series_length_ * self.max_win_len_prop)
win_inc = int((max_window - self.min_window) / max_window_searches)
if win_inc < 1:
win_inc = 1
if self.min_window > max_window + 1:
raise ValueError(
f"Error in TemporalDictionaryEnsemble, min_window ="
f"{self.min_window} is bigger"
f" than max_window ={max_window}."
f" Try set min_window to be smaller than series length in "
f"the constructor, but the classifier may not work at "
f"all with very short series"
)
possible_parameters = self._unique_parameters(max_window, win_inc)
num_classifiers = 0
subsample_size = int(self.n_instances_ * 0.7)
lowest_acc = 1
lowest_acc_idx = 0
time_limit = self.time_limit_in_minutes * 60
start_time = time.time()
train_time = 0
if time_limit > 0:
n_parameter_samples = 0
contract_max_n_parameter_samples = self.contract_max_n_parameter_samples
else:
n_parameter_samples = self.n_parameter_samples
contract_max_n_parameter_samples = np.inf
rng = check_random_state(self.random_state)
if self.bigrams is None:
if self.n_dims_ > 1:
use_bigrams = False
else:
use_bigrams = True
else:
use_bigrams = self.bigrams
# use time limit or n_parameter_samples if limit is 0
while (
(
train_time < time_limit
and num_classifiers < contract_max_n_parameter_samples
)
or num_classifiers < n_parameter_samples
) and len(possible_parameters) > 0:
if num_classifiers < self.randomly_selected_params:
parameters = possible_parameters.pop(
rng.randint(0, len(possible_parameters))
)
else:
scaler = preprocessing.StandardScaler()
scaler.fit(self._prev_parameters_x)
gp = KernelRidge(kernel="poly", degree=1)
gp.fit(
scaler.transform(self._prev_parameters_x), self._prev_parameters_y
)
preds = gp.predict(scaler.transform(possible_parameters))
parameters = possible_parameters.pop(
rng.choice(np.flatnonzero(preds == preds.max()))
)
subsample = rng.choice(
self.n_instances_, size=subsample_size, replace=False
)
X_subsample = X[subsample]
y_subsample = y[subsample]
tde = IndividualTDE(
*parameters,
alphabet_size=self._alphabet_size,
bigrams=use_bigrams,
dim_threshold=self.dim_threshold,
max_dims=self.max_dims,
typed_dict=self.typed_dict,
n_jobs=self._threads_to_use,
random_state=self.random_state,
)
tde.fit(X_subsample, y_subsample)
tde._subsample = subsample
tde._accuracy = self._individual_train_acc(
tde,
y_subsample,
subsample_size,
0 if num_classifiers < self.max_ensemble_size else lowest_acc,
)
if tde._accuracy > 0:
weight = math.pow(tde._accuracy, 4)
else:
weight = 0.000000001
if num_classifiers < self.max_ensemble_size:
if tde._accuracy < lowest_acc:
lowest_acc = tde._accuracy
lowest_acc_idx = num_classifiers
self.weights_.append(weight)
self.estimators_.append(tde)
elif tde._accuracy > lowest_acc:
self.weights_[lowest_acc_idx] = weight
self.estimators_[lowest_acc_idx] = tde
lowest_acc, lowest_acc_idx = self._worst_ensemble_acc()
self._prev_parameters_x.append(parameters)
self._prev_parameters_y.append(tde._accuracy)
num_classifiers += 1
train_time = time.time() - start_time
self.n_estimators_ = len(self.estimators_)
self._weight_sum = np.sum(self.weights_)
return self
def _predict(self, X) -> np.ndarray:
"""Predict class values of n instances in X.
Parameters
----------
X : 3D np.array of | |
recursively merge members of 2 json objects
def member_wise_merge(j1, j2):
for key in j2.keys():
if key not in j1.keys():
j1[key] = j2[key]
elif type(j1[key]) is dict:
j1[key] = member_wise_merge(j1[key], j2[key])
return j1
# remove comments, taken from stub_format.py ()
def remove_comments(file_data):
lines = file_data.split("\n")
inside_block = False
conditioned = ""
for line in lines:
if inside_block:
ecpos = line.find("*/")
if ecpos != -1:
inside_block = False
line = line[ecpos+2:]
else:
continue
cpos = line.find("//")
mcpos = line.find("/*")
if cpos != -1:
conditioned += line[:cpos] + "\n"
elif mcpos != -1:
conditioned += line[:mcpos] + "\n"
inside_block = True
else:
conditioned += line + "\n"
return conditioned
# tidy shader source with consistent spaces, remove tabs and comments to make subsequent operations easier
def sanitize_shader_source(shader_source):
# replace tabs with spaces
shader_source = shader_source.replace("\t", " ")
# replace all spaces with single space
shader_source = re.sub(' +', ' ', shader_source)
# remove comments
shader_source = remove_comments(shader_source)
return shader_source
# parse and split into an array, from a list of textures or cbuffers etc
def parse_and_split_block(code_block):
start = code_block.find("{") + 1
end = code_block.find("};")
block_conditioned = code_block[start:end].replace(";", " ")
block_conditioned = block_conditioned.replace(":", " ")
block_conditioned = block_conditioned.replace("(", " ")
block_conditioned = block_conditioned.replace(")", " ")
block_conditioned = block_conditioned.replace(",", " ")
block_conditioned = re.sub(' +', ' ', block_conditioned)
return block_conditioned.split()
# find the end of a body text enclosed in brackets
def enclose_brackets(text):
body_pos = text.find("{")
bracket_stack = ["{"]
text_len = len(text)
while len(bracket_stack) > 0 and body_pos < text_len:
body_pos += 1
character = text[body_pos:body_pos+1]
if character == "{":
bracket_stack.insert(0, "{")
if character == "}" and bracket_stack[0] == "{":
bracket_stack.pop(0)
body_pos += 1
return body_pos
# replace all "input" and "output" tokens to "_input" and "_ouput" to avoid glsl keywords
# todo: this should be replaced with "replace_token"
def replace_io_tokens(text):
token_io = ["input", "output"]
token_io_replace = ["_input", "_output"]
token_post_delimiters = ['.', ';', ' ', '(', ')', ',', '-', '+', '*', '/']
token_pre_delimiters = [' ', '\t', '\n', '(', ')', ',', '-', '+', '*', '/']
split = text.split(' ')
split_replace = []
for token in split:
for i in range(0, len(token_io)):
if token_io[i] in token:
last_char = len(token_io[i])
first_char = token.find(token_io[i])
t = token[first_char:first_char+last_char+1]
l = len(t)
if first_char > 0 and token[first_char-1] not in token_pre_delimiters:
continue
if l > last_char:
c = t[last_char]
if c in token_post_delimiters:
token = token.replace(token_io[i], token_io_replace[i])
continue
elif l == last_char:
token = token.replace(token_io[i], token_io_replace[i])
continue
split_replace.append(token)
replaced_text = ""
for token in split_replace:
replaced_text += token + " "
return replaced_text
# get info filename for dependency checking
def get_resource_info_filename(filename, build_dir):
global _info
base_filename = os.path.basename(filename)
dir_path = os.path.dirname(filename)
info_filename = os.path.join(_info.output_dir, os.path.splitext(base_filename)[0], "info.json")
return info_filename, base_filename, dir_path
# check file time stamps and build times to determine if rebuild needs to happen
# returns true if the file does not need re-building, false if a file/dependency is out of date or input has changed
def check_dependencies(filename, included_files):
global _info
# look for .json file
file_list = list()
file_list.append(sanitize_file_path(os.path.join(_info.root_dir, filename)))
file_list.append(sanitize_file_path(_info.this_file))
file_list.append(sanitize_file_path(_info.macros_file))
file_list.append(sanitize_file_path(_info.platform_macros_file))
info_filename, base_filename, dir_path = get_resource_info_filename(filename, _info.output_dir)
for f in included_files:
file_list.append(sanitize_file_path(os.path.join(_info.root_dir, f)))
if os.path.exists(info_filename) and os.path.getsize(info_filename) > 0:
info_file = open(info_filename, "r")
info = json.loads(info_file.read())
if "cmdline" not in info or _info.cmdline_string != info["cmdline"]:
return False
for prev_built_with_file in info["files"]:
sanitized_name = sanitize_file_path(prev_built_with_file["name"])
if sanitized_name in file_list:
if not os.path.exists(sanitized_name):
return False
if prev_built_with_file["timestamp"] < os.path.getmtime(sanitized_name):
info_file.close()
print(os.path.basename(sanitized_name) + " is out of date", flush=True)
return False
else:
print(sanitized_name + " is not in list", flush=True)
return False
if "failures" in info.keys():
if len(info["failures"]) > 0:
return False
info_file.close()
else:
return False
return True
# find generic structs
def find_structs(shader_text, special_structs):
struct_list = []
start = 0
while start != -1:
op = start
start = find_token("struct", shader_text[start:])
if start == -1:
break
start = op + start
end = shader_text.find("};", start)
if end != -1:
end += 2
found_struct = shader_text[start:end]
valid = True
for ss in special_structs:
if ss in found_struct:
valid = False
if valid:
struct_list.append(shader_text[start:end] + "\n")
start = end
return struct_list
def find_c_structs(shader_text):
special_structs = ["vs_output", "ps_input", "ps_output"]
return find_structs(shader_text, special_structs)
def find_struct_declarations(shader_text):
special_structs = ["vs_input", "vs_output", "ps_input", "ps_output", "vs_instance_input"]
return find_structs(shader_text, special_structs)
# find shader resources
def find_shader_resources(shader_text):
start = shader_text.find("declare_texture_samplers")
if start == -1:
start = shader_text.find("shader_resources")
if start == -1:
return "\n"
start = shader_text.find("{", start) + 1
end = shader_text.find("};", start)
texture_sampler_text = shader_text[start:end] + "\n"
texture_sampler_text = texture_sampler_text.replace("\t", "")
texture_sampler_text += "\n"
return texture_sampler_text
# find struct in shader source
def find_struct(shader_text, decl):
delimiters = [" ", "\n", "{"]
start = 0
while True:
start = shader_text.find(decl, start)
if start == -1:
return ""
for d in delimiters:
if shader_text[start+len(decl)] == d:
end = shader_text.find("};", start)
end += 2
if start != -1 and end != -1:
return shader_text[start:end] + "\n\n"
else:
return ""
start += len(decl)
# find cbuffers in source
def find_constant_buffers(shader_text):
cbuffer_list = []
start = 0
while start != -1:
start = shader_text.find("cbuffer", start)
if start == -1:
break
end = shader_text.find("};", start)
if end != -1:
end += 2
cbuffer_list.append(shader_text[start:end] + "\n")
start = end
return cbuffer_list
# find function source
def find_function(shader_text, decl):
start = shader_text.find(decl)
if start == -1:
return ""
body_pos = shader_text.find("{", start)
bracket_stack = ["{"]
text_len = len(shader_text)
while len(bracket_stack) > 0 and body_pos < text_len:
body_pos += 1
character = shader_text[body_pos:body_pos+1]
if character == "{":
bracket_stack.insert(0, "{")
if character == "}" and bracket_stack[0] == "{":
bracket_stack.pop(0)
body_pos += 1
return shader_text[start:body_pos] + "\n\n"
# find functions in source
def find_functions(shader_text):
deliminator_list = [";", "\n"]
function_list = []
start = 0
while 1:
start = shader_text.find("(", start)
if start == -1:
break
# make sure the { opens before any other deliminator
deliminator_pos = shader_text.find(";", start)
body_pos = shader_text.find("{", start)
if deliminator_pos < body_pos:
start = deliminator_pos
continue
# find the function name and return type
function_name = shader_text.rfind(" ", 0, start)
name_str = shader_text[function_name:start]
if name_str.find("if:") != -1:
start = deliminator_pos
continue
function_return_type = 0
for delim in deliminator_list:
decl_start = shader_text.rfind(delim, 0, function_name)
if decl_start != -1:
function_return_type = decl_start
bracket_stack = ["{"]
text_len = len(shader_text)
while len(bracket_stack) > 0 and body_pos < text_len:
body_pos += 1
character = shader_text[body_pos:body_pos+1]
if character == "{":
bracket_stack.insert(0, "{")
if character == "}" and bracket_stack[0] == "{":
bracket_stack.pop(0)
body_pos += 1
function_list.append(shader_text[function_return_type:body_pos] + "\n\n")
start = body_pos
return function_list
# find #include statements
def find_includes(file_text, root):
global added_includes
include_list = []
start = 0
while 1:
start = file_text.find("#include", start)
if start == -1:
break
start = file_text.find("\"", start) + 1
end = file_text.find("\"", start)
if start == -1 or end == -1:
break
include_name = file_text[start:end]
include_path = os.path.join(root, include_name)
include_path = sanitize_file_path(include_path)
if include_path not in added_includes:
include_list.append(include_path)
added_includes.append(include_path)
return include_list
# recursively search for #includes
def add_files_recursive(filename, root):
file_path = filename
if not os.path.exists(filename):
file_path = os.path.join(root, filename)
included_file = open(file_path, "r")
shader_source = included_file.read()
included_file.close()
shader_source = sanitize_shader_source(shader_source)
sub_root = os.path.dirname(file_path)
include_list = find_includes(shader_source, sub_root)
for include_file in reversed(include_list):
included_source, sub_includes = add_files_recursive(include_file, sub_root)
shader_source = included_source + "\n" + shader_source
include_list = include_list + sub_includes
return shader_source, include_list
# gather include files and
def create_shader_set(filename, root):
global _info
global added_includes
added_includes = []
shader_file_text, included_files = add_files_recursive(filename, root)
shader_base_name = os.path.basename(filename)
shader_set_dir = os.path.splitext(shader_base_name)[0]
shader_set_build_dir = os.path.join(_info.output_dir, shader_set_dir)
if not os.path.exists(shader_set_build_dir):
os.makedirs(shader_set_build_dir)
return shader_file_text, included_files
# gets constants only for this current permutation
def get_permutation_conditionals(pmfx_json, permutation):
block = pmfx_json.copy()
if "constants" in block:
# find conditionals
conditionals = []
cblock = block["constants"]
for key in cblock.keys():
if key.find("permutation(") != -1:
conditionals.append((key, cblock[key]))
# check conditionals valid
for c in conditionals:
# remove conditional permutation
del block["constants"][c[0]]
full_condition = c[0].replace("permutation", "")
full_condition = full_condition.replace("&&", "and")
full_condition = full_condition.replace("||", "or")
gv = dict()
for v in permutation:
gv[str(v[0])] = v[1]
try:
if eval(full_condition, gv):
block["constants"] = member_wise_merge(block["constants"], c[1])
except NameError:
pass
return block
# get list of technique / permutation specific
def generate_technique_texture_variables(_tp):
technique_textures = []
if "texture_samplers" not in _tp.technique.keys():
return
textures = _tp.technique["texture_samplers"]
for t in textures.keys():
technique_textures.append((textures[t]["type"], t, textures[t]["unit"]))
return technique_textures
# | |
in parallel.
- **MaxErrors** *(string) --*
The maximum number of errors allowed before this task stops being scheduled.
- **Name** *(string) --*
The task name.
- **Description** *(string) --*
A description of the task.
:type WindowId: string
:param WindowId: **[REQUIRED]**
The ID of the Maintenance Window whose tasks should be retrieved.
:type Filters: list
:param Filters:
Optional filters used to narrow down the scope of the returned tasks. The supported filter keys are WindowTaskId, TaskArn, Priority, and TaskType.
- *(dict) --*
Filter used in the request. Supported filter keys are Name and Enabled.
- **Key** *(string) --*
The name of the filter.
- **Values** *(list) --*
The filter values.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeMaintenanceWindows(Paginator):
def paginate(self, Filters: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`SSM.Client.describe_maintenance_windows`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeMaintenanceWindows>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filters=[
{
'Key': 'string',
'Values': [
'string',
]
},
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'WindowIdentities': [
{
'WindowId': 'string',
'Name': 'string',
'Description': 'string',
'Enabled': True|False,
'Duration': 123,
'Cutoff': 123,
'Schedule': 'string',
'ScheduleTimezone': 'string',
'EndDate': 'string',
'StartDate': 'string',
'NextExecutionTime': 'string'
},
],
}
**Response Structure**
- *(dict) --*
- **WindowIdentities** *(list) --*
Information about the Maintenance Windows.
- *(dict) --*
Information about the Maintenance Window.
- **WindowId** *(string) --*
The ID of the Maintenance Window.
- **Name** *(string) --*
The name of the Maintenance Window.
- **Description** *(string) --*
A description of the Maintenance Window.
- **Enabled** *(boolean) --*
Whether the Maintenance Window is enabled.
- **Duration** *(integer) --*
The duration of the Maintenance Window in hours.
- **Cutoff** *(integer) --*
The number of hours before the end of the Maintenance Window that Systems Manager stops scheduling new tasks for execution.
- **Schedule** *(string) --*
The schedule of the Maintenance Window in the form of a cron or rate expression.
- **ScheduleTimezone** *(string) --*
The time zone that the scheduled Maintenance Window executions are based on, in Internet Assigned Numbers Authority (IANA) format.
- **EndDate** *(string) --*
The date and time, in ISO-8601 Extended format, for when the Maintenance Window is scheduled to become inactive.
- **StartDate** *(string) --*
The date and time, in ISO-8601 Extended format, for when the Maintenance Window is scheduled to become active.
- **NextExecutionTime** *(string) --*
The next time the Maintenance Window will actually run, taking into account any specified times for the Maintenance Window to become active or inactive.
:type Filters: list
:param Filters:
Optional filters used to narrow down the scope of the returned Maintenance Windows. Supported filter keys are **Name** and **Enabled** .
- *(dict) --*
Filter used in the request. Supported filter keys are Name and Enabled.
- **Key** *(string) --*
The name of the filter.
- **Values** *(list) --*
The filter values.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeMaintenanceWindowsForTarget(Paginator):
def paginate(self, Targets: List, ResourceType: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`SSM.Client.describe_maintenance_windows_for_target`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeMaintenanceWindowsForTarget>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Targets=[
{
'Key': 'string',
'Values': [
'string',
]
},
],
ResourceType='INSTANCE',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'WindowIdentities': [
{
'WindowId': 'string',
'Name': 'string'
},
],
}
**Response Structure**
- *(dict) --*
- **WindowIdentities** *(list) --*
Information about the Maintenance Window targets and tasks an instance is associated with.
- *(dict) --*
The Maintenance Window to which the specified target belongs.
- **WindowId** *(string) --*
The ID of the Maintenance Window.
- **Name** *(string) --*
The name of the Maintenance Window.
:type Targets: list
:param Targets: **[REQUIRED]**
The instance ID or key/value pair to retrieve information about.
- *(dict) --*
An array of search criteria that targets instances using a Key,Value combination that you specify. ``Targets`` is required if you don\'t provide one or more instance IDs in the call.
- **Key** *(string) --*
User-defined criteria for sending commands that target instances that meet the criteria. ``Key`` can be ``tag:<Amazon EC2 tag>`` or ``InstanceIds`` . For more information about how to send commands that target instances using ``Key,Value`` parameters, see `Using Targets and Rate Controls to Send Commands to a Fleet <https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html#send-commands-targeting>`__ in the *AWS Systems Manager User Guide* .
- **Values** *(list) --*
User-defined criteria that maps to ``Key`` . For example, if you specified ``tag:ServerRole`` , you could specify ``value:WebServer`` to run a command on instances that include Amazon EC2 tags of ``ServerRole,WebServer`` . For more information about how to send commands that target instances using ``Key,Value`` parameters, see `Using Targets and Rate Controls to Send Commands to a Fleet <https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html>`__ in the *AWS Systems Manager User Guide* .
- *(string) --*
:type ResourceType: string
:param ResourceType: **[REQUIRED]**
The type of resource you want to retrieve information about. For example, \"INSTANCE\".
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeParameters(Paginator):
def paginate(self, Filters: List = None, ParameterFilters: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`SSM.Client.describe_parameters`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeParameters>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filters=[
{
'Key': 'Name'|'Type'|'KeyId',
'Values': [
'string',
]
},
],
ParameterFilters=[
{
'Key': 'string',
'Option': 'string',
'Values': [
'string',
]
},
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Parameters': [
{
'Name': 'string',
'Type': 'String'|'StringList'|'SecureString',
'KeyId': 'string',
'LastModifiedDate': datetime(2015, 1, 1),
'LastModifiedUser': 'string',
'Description': 'string',
'AllowedPattern': 'string',
'Version': 123,
'Tier': 'Standard'|'Advanced',
'Policies': [
{
'PolicyText': 'string',
'PolicyType': 'string',
'PolicyStatus': 'string'
},
]
},
],
}
**Response Structure**
- *(dict) --*
- **Parameters** *(list) --*
Parameters returned by the request.
- *(dict) --*
Metada includes information like the ARN of the last user and the date/time the parameter was last used.
- **Name** *(string) --*
The parameter name.
- **Type** *(string) --*
The type of parameter. Valid parameter types include the following: String, String list, Secure string.
- **KeyId** *(string) --*
The ID | |
have had enough contention since I came, I want no more. (Putnam crosses L. to above table, gets hat, crosses and exits.)',
'You have sent your spirit out upon this child, have you not? Are you gathering souls for the Devil? ',
'She send her spirit on me in church, she make me laugh at prayer! ',
'She have often laughed at prayer! ',
'She comes to me every night to go and drink blood! ',
'You beg me to conjure, She beg me make charm',
'I‘ll tell you something. She comes to me while I sleep; she‘s always making me dream corruptions! TITUBA: Abby! ',
'I always hear her laughing in my sleep. I hear her singing her Barbados songs and tempting me with',
'<NAME>, I never',
'When did you compact with the Devil? ',
'I don‘t compact with no devil! ',
'You will confess yourself or I will take you out and whip you to your death, Tituba! ',
'This woman must be hanged! She must be taken and hanged!',
'You have sent your spirit out upon this child, have you not? Are you gathering souls for the Devil? ',
'She send her spirit on me in church, she make me laugh at prayer! ',
'She have often laughed at prayer! ',
'She comes to me every night to go and drink blood! ',
'You beg me to conjure, She beg me make charm',
'I‘ll tell you something. She comes to me while I sleep; she‘s always making me dream corruptions! TITUBA: Abby! ',
'I always hear her laughing in my sleep. I hear her singing her Barbados songs and tempting me with',
'<NAME>, I never',
'When did you compact with the Devil? ',
'I don‘t compact with no devil! ',
'You will confess yourself or I will take you out and whip you to your death, Tituba! ',
'This woman must be hanged! She must be taken and hanged!',
'Out, out upon thee, witch!',
'Dost call me witch?',
'I do, witch, I do, and worse I would, knew I a name more hateful.',
'What makest thou upon my ground?',
'Gather a few rotten sticks to warm me.',
'Down with them when I bid thee quickly',
'I will make thy bones rattle in thy skin else.',
'You will not, churl, cut-throat, miser!—there they be',
'would they stuck cross thy throat, thy bowels, thy maw, thy midriff!',
'Sayest thou me so, hag? Out of my ground! ',
'Dost strike me, slave, curmudgeon! Now, thy bones ache, thy joints cramp, and convulsions stretch and crack thy sinews!',
'Cursing, thou hag! take that and that.',
'Ho! have I found thee cursing? now thou art Mine own.',
'Thine! what art thou?',
'He thou hast so often Importuned to appear to thee, the devil.',
'Bless me! the devil?',
'Come, do not fear; I love thee much too well To hurt or fright thee; if I seem terrible, It is to such as hate me. I have found Thy love unfeigned; have seen and pitied Thy open wrongs; and come, out of my love, To give thee just revenge against thy foes.',
'May I believe thee?',
'To confirm it, command me Do any mischief unto man or beast, And I will effect it, on condition That, uncompelled, thou make a deed of gift Of soul and body to me.',
'Out, alas! My soul and body?',
'And that instantly, And seal it with thy blood: if thou deniest, I will tear thy body in a thousand pieces.',
'I know not where to seek relief: but shall I, After such covenants sealed, see full revenge On all that wrong me?',
'Ha, ha! silly woman! The devil is no liar to such as he loves: Didst ever know or hear the devil a liar To such as he affects?',
'Then I am thine; at least so much of me As I can call mine own—',
'Equivocations? Art mine or no? speak, or I will tear—',
'All thine.',
'Seal it with thy blood.',
'Can you perchance get me a love potion that has power over a pious man of the church?',
'Here, young maiden, take a potion of cat feces and dove hearts, boiled during the full moon. A drop of this in a mans drink will soften his heart at once.',
'Karna, can i have an even stronger potion?',
'If the maiden wishes to drive the man out of his wits for love... I have a potion boiled in May from a young and playful male sparrow. Hold your coins, maiden! First smell my ointment! This ointment is good, should you wish to travel to Brocken one night. Secretly smear this ointment on and the pious monk might come directly to your chamber. You will then fly high up in the night air together and he will bestow upon you many hungry kisses.',
'They say you have had commerce with the devil.',
'Why do you ask?',
'For very personal reasons. I, too, want to meet him.',
'Why?',
'I must ask him about God. Surely he knows.',
'You can see him any time.',
'How?',
'If you do as I say. Look into my eyes.',
'Well, do you see him?',
'I see terror. Nothing else.',
'Nothing? No one? Nothing?',
'No.',
'Is he not behind you?',
'No. There is no one.',
'He is with me everywhere. If I stretch out my hand, I feel him. Even now. The fire will not hurt me.',
'Has he said so?',
'I know.',
'Has he said so?!',
'I know! I know! You must see him, too. The priests could see him, and the soldiers. They dare not touch me.',
'Why have you broken her hands?',
'It is not us.',
'Who?',
'Ask the monk there.',
'What have you done to the child?',
'Will you never stop asking questions?',
'No. Never.',
'But you get no answer.',
'When next we meet, the hour will strike for you and your friends.',
'And you will reveal your secrets?',
' I have no secrets.',
'So you know nothing?',
'I am unknowing.',
'I want to confess as best I can, but my heart is void. The void is a mirror. I see my face and feel loathing and horror. My indifference to men has shut me out. I live now in a world of ghosts, a prisoner in my dreams.',
'Yet you do not want to die.',
'Yes, I do.',
'What are you waiting for?',
'Knowledge.',
'You want a guarantee.',
'Call it what you will.',
'Is it so hard to conceive God with ones senses? Why must He hide in a midst of vague promises and invisible miracles? How are we to believe the believers when we do not believe ourselves? What will become of us who want to believe but cannot? And what of those who neither will nor can believe? Why can I not kill God within me? Why does He go on living in a painful, humiliating way? I want to tear Him out of my heart, but He remains a mocking reality which I cannot get rid of. Do you hear me?',
'I hear you.',
'I want knowledge. Not belief. Not surmise. But knowledge. I want God to put out His hand, show His face, speak to me.',
'But He is silent.',
'I cry to Him in the dark, but there seems to be no one there.',
'Perhaps there is no one there.',
'Then life is a senseless terror. No man can live with Death and know that everything is nothing.',
'Most people think neither of Death nor nothingness.',
'Until they stand on the edge of life and see the Darkness.',
'Ah, that day.',
'I see. We must make an idol of our fear, and call it God.',
| |
# pragma: no cover
if output:
return string
else:
print(string)
return None
# Define ANSI colors
ansicolors = OD([
('black', '30'),
('red', '31'),
('green', '32'),
('yellow', '33'),
('blue', '34'),
('magenta', '35'),
('cyan', '36'),
('gray', '37'),
('bgblack', '40'),
('bgred', '41'),
('bggreen', '42'),
('bgyellow', '43'),
('bgblue', '44'),
('bgmagenta', '45'),
('bgcyan', '46'),
('bggray', '47'),
('reset', '0'),
])
for key, val in ansicolors.items(): ansicolors[key] = '\033[' + val + 'm'
# Determine what color to use
colorlist = promotetolist(color) # Make sure it's a list
for color in colorlist:
if color not in ansicolors.keys(): # pragma: no cover
print(f'Color "{color}" is not available, use colorize(showhelp=True) to show options.')
return None # Don't proceed if the color isn't found
ansicolor = ''
for color in colorlist:
ansicolor += ansicolors[color]
# Modify string, if supplied
if string is None: ansistring = ansicolor # Just return the color
else: ansistring = ansicolor + str(string) + ansicolors['reset'] # Add to start and end of the string
if not ansi_support: ansistring = str(string) # To avoid garbling output on unsupported systems
if showhelp:
print('Available colors are:')
for key in ansicolors.keys():
if key[:2] == 'bg':
darks = ['bgblack', 'bgred', 'bgblue', 'bgmagenta']
if key in darks: foreground = 'gray'
else: foreground = 'black'
helpcolor = [foreground, key]
else:
helpcolor = key
colorize(helpcolor, ' ' + key)
elif output:
return ansistring # Return the modified string
else:
try: print(ansistring) # Content, so print with newline
except: print(string) # If that fails, just go with plain version
return None
def heading(string=None, *args, color=None, divider=None, spaces=None, minlength=None, maxlength=None, sep=' ', output=True, **kwargs):
'''
Create a colorful heading. If just supplied with a string (or list of inputs like print()),
create blue text with horizontal lines above and below and 3 spaces above. You
can customize the color, the divider character, how many spaces appear before
the heading, and the minimum length of the divider (otherwise will expand to
match the length of the string, up to a maximum length).
Args:
string (str): The string to print as the heading (or object to convert to astring)
args (list): Additional strings to print
color (str): The color to use for the heading (default blue)
divider (str): The symbol to use for the divider (default em dash)
spaces (int): The number of spaces to put before the heading
minlength (int): The minimum length of the divider
maxlength (int): The maximum length of the divider
sep (str): If multiple arguments are supplied, use this separator to join them
output (bool): Whether to return the string as output (else, print)
kwargs (dict): Arguments to pass to sc.colorize()
Returns:
String, unless output=False.
Examples
--------
>>> import sciris as sc
>>> sc.heading('This is a heading')
>>> sc.heading(string='This is also a heading', color='red', divider='*', spaces=0, minlength=50)
'''
if string is None: string = ''
if color is None: color = 'cyan' # Reasonable defualt for light and dark consoles
if divider is None: divider = '—' # Em dash for a continuous line
if spaces is None: spaces = 2
if minlength is None: minlength = 30
if maxlength is None: maxlength = 120
# Convert to single string
args = list(args)
if string is not None:
args = [string] + args
string = sep.join(str(item) for item in args)
# Add header and footer
length = int(np.median([minlength, len(string), maxlength]))
space = '\n'*spaces
if divider and length: fulldivider = '\n'+divider*length+'\n'
else: fulldivider = ''
fullstring = space + fulldivider + string + fulldivider
# Create output
outputstring = colorize(color=color, string=fullstring, **kwargs)
if output:
return outputstring
else:
print(outputstring)
return
def percentcomplete(step=None, maxsteps=None, stepsize=1, prefix=None):
'''
Display progress.
**Example**::
maxiters = 500
for i in range(maxiters):
sc.percentcomplete(i, maxiters) # will print on every 5th iteration
sc.percentcomplete(i, maxiters, stepsize=10) # will print on every 50th iteration
sc.percentcomplete(i, maxiters, prefix='Completeness: ') # will print e.g. 'Completeness: 1%'
'''
if prefix is None:
prefix = ' '
elif isnumber(prefix):
prefix = ' '*prefix
onepercent = max(stepsize,round(maxsteps/100*stepsize)); # Calculate how big a single step is -- not smaller than 1
if not step%onepercent: # Does this value lie on a percent
thispercent = round(step/maxsteps*100) # Calculate what percent it is
print(prefix + '%i%%'% thispercent) # Display the output
return None
def progressbar(i, maxiters, label='', length=30, empty='—', full='•', newline=False):
'''
Call in a loop to create terminal progress bar.
Args:
i (int): current iteration
maxiters (int): maximum number of iterations
label (str): initial label to print
length (int): length of progress bar
empty (str): character for empty steps
full (str): character for empty steps
**Example**::
import pylab as pl
for i in range(100):
progressbar(i+1, 100)
pl.pause(0.05)
Adapted from example by Greenstick (https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console)
'''
ending = None if newline else '\r'
pct = i/maxiters*100
percent = f'{pct:0.0f}%'
filled = int(length*i//maxiters)
bar = full*filled + empty*(length-filled)
print(f'\r{label} {bar} {percent}', end=ending)
if i == maxiters: print()
return
##############################################################################
#%% Type functions
##############################################################################
__all__ += ['flexstr', 'isiterable', 'checktype', 'isnumber', 'isstring', 'isarray',
'promotetoarray', 'promotetolist', 'toarray', 'tolist', 'transposelist',
'mergedicts', 'mergelists']
def flexstr(arg, force=True):
'''
Try converting any object to a "regular" string (i.e. ``str``), but proceed
if it fails. Note: this function calls ``repr()`` rather than ``str()`` to
ensure a more robust representation of objects.
'''
if isinstance(arg, str):
return arg
elif isinstance(arg, bytes):
try:
output = arg.decode() # If it's bytes, decode to unicode
except: # pragma: no cover
if force: output = repr(arg) # If that fails, just print its representation
else: output = arg
else: # pragma: no cover
if force: output = repr(arg)
else: output = arg # Optionally don't do anything for non-strings
return output
def isiterable(obj):
'''
Simply determine whether or not the input is iterable.
Works by trying to iterate via iter(), and if that raises an exception, it's
not iterable.
From http://stackoverflow.com/questions/1952464/in-python-how-do-i-determine-if-an-object-is-iterable
'''
try:
iter(obj)
return True
except:
return False
def checktype(obj=None, objtype=None, subtype=None, die=False):
'''
A convenience function for checking instances. If objtype is a type,
then this function works exactly like isinstance(). But, it can also
be one of the following strings:
- 'str', 'string': string or bytes object
- 'num', 'number': any kind of number
- 'arr', 'array': a Numpy array (equivalent to np.ndarray)
- 'listlike': a list, tuple, or array
- 'arraylike': a list, tuple, or array with numeric entries
If subtype is not None, then checktype will iterate over the object and check
recursively that each element matches the subtype.
Args:
obj (any): the object to check the type of
objtype (str or type): the type to confirm the object belongs to
subtype (str or type): optionally check the subtype if the object is iterable
die (bool): whether or not to raise an exception if the object is the wrong type
**Examples**::
sc.checktype(rand(10), 'array', 'number') # Returns True
sc.checktype(['a','b','c'], 'listlike') # Returns True
sc.checktype(['a','b','c'], 'arraylike') # Returns False
sc.checktype([{'a':3}], list, dict) # Returns True
'''
# Handle "objtype" input
if objtype in ['str','string']: objinstance = _stringtypes
elif objtype in ['num', 'number']: objinstance = _numtype
elif objtype in ['arr', 'array']: objinstance = np.ndarray
elif objtype in ['listlike', 'arraylike']: objinstance = (list, tuple, np.ndarray) # Anything suitable as a numerical array
elif type(objtype) == type: objinstance = objtype # Don't need to do anything
elif objtype is None: return None # If not supplied, exit
else: # pragma: no cover
errormsg = f'Could not understand what type you want to check: should be either a string or a type, not "{objtype}"'
raise ValueError(errormsg)
# Do first-round checking
result = isinstance(obj, objinstance)
# Do second round checking
if result and objtype in ['listlike', 'arraylike']: # Special case for handling arrays which may be multi-dimensional
obj = promotetoarray(obj).flatten() # Flatten all elements
if objtype == 'arraylike' and subtype is None: subtype = 'number'
if isiterable(obj) and subtype is not None:
for item in obj:
result = result and checktype(item, subtype)
# Decide what to do with the | |
import math
import sys
import os
import io
import re
import struct
import collections
import argparse
import textwrap
import crcmod
import usb1
from . import VID_CYPRESS, PID_FX2, FX2Config, FX2Device, FX2DeviceError
from .format import input_data, output_data, diff_data
class VID_PID(collections.namedtuple("VID_PID", "vid pid")):
@classmethod
def parse(cls, s):
match = re.match(r"^([0-9a-f]{1,4}):([0-9a-f]{1,4})$", s, re.I)
if not match:
raise ValueError("{} is not a VID:PID pair".format(s))
vid = int(match.group(1), 16)
pid = int(match.group(2), 16)
return cls(vid, pid)
def __str__(self):
return "{:04x}:{:04x}".format(self.vid, self.pid)
class TextHelpFormatter(argparse.HelpFormatter):
def _fill_text(self, text, width, indent):
def filler(match):
text = match[0]
list_match = re.match(r"(\s*)\*", text)
if list_match:
return text
text = textwrap.fill(text, width,
initial_indent=indent,
subsequent_indent=indent)
text = re.sub(r"(\w-) (\w)", r"\1\2", text)
text = text + (match[2] or "")
return text
text = textwrap.dedent(text).strip()
return re.sub(r"((?!\n\n)(?!\n\s+\*).)+(\n*)?", filler, text, flags=re.S)
def get_argparser():
def usb_id(arg):
if re.match(r"^([0-9a-f]{1,4})$", arg, re.I) and int(arg, 16) not in (0x0000, 0xffff):
return int(arg, 16)
else:
raise argparse.ArgumentTypeError("{} is not an USB ID".format(arg))
def vid_pid(arg):
try:
return VID_PID.parse(arg)
except ValueError:
raise argparse.ArgumentTypeError("{} is not a VID:PID pair".format(arg))
def int_with_base(arg):
try:
return int(arg, 0)
except ValueError:
raise argparse.ArgumentTypeError("{} is not an integer".format(arg))
def power_of_two(arg):
size = int_with_base(arg)
size_log = math.log2(size)
if int(size_log) != size_log:
raise argparse.ArgumentTypeError("{} is not a power of 2".format(size))
return int(size_log)
parser = argparse.ArgumentParser(
formatter_class=TextHelpFormatter,
description="""
Cypress FX2/FX2LP bootloader tool
This tool can read and write data in three formats:
* hex: contiguous hexadecimal dump with non-significant whitespace
* bin (.bin extension): contiguous binary
* ihex (.ihex, .ihx, .hex extensions): discontiguous Intel HEX records
By default, the format is automatically determined from the file extension,
and if reading from standard input or writing to standard output, whether
the stream is a terminal (human-readable hexadecimal is used) or not
(binary is used).
""")
parser.add_argument(
"-d", "--device", type=vid_pid, default=(VID_CYPRESS, PID_FX2),
help="device VID:PID pair")
parser.add_argument(
"-F", "--format", choices=["hex", "bin", "ihex", "auto"], default="auto",
help="data input/output format")
parser.add_argument(
"-S", "--stage2", metavar="FILENAME", type=argparse.FileType("rb"),
help="load the specified second stage bootloader before any further action")
parser.add_argument(
"-B", "--bootloader", action="store_true",
help="load the second stage bootloader provided with fx2tool")
subparsers = parser.add_subparsers(dest="action", metavar="COMMAND")
subparsers.required = True
p_load = subparsers.add_parser(
"load", formatter_class=TextHelpFormatter,
help="load and run firmware",
description="Loads firmware into on-chip code memory and runs it.")
p_load.add_argument(
"firmware", metavar="FIRMWARE", type=argparse.FileType("rb"),
help="read firmware from the specified file")
def add_read_args(parser):
parser.add_argument(
"-f", "--file", metavar="FILENAME", type=argparse.FileType("wb"), default="-",
help="write data to the specified file")
parser.add_argument(
"address", metavar="ADDRESS", type=int_with_base,
help="starting address")
parser.add_argument(
"length", metavar="LENGTH", type=int_with_base,
help="amount of bytes to read")
def add_write_args(parser):
parser.add_argument(
"-a", "--offset", metavar="ADDRESS", type=int_with_base, default=0,
help="starting address")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"-f", "--file", metavar="FILENAME", type=argparse.FileType("rb"),
help="read data from the specified file")
group.add_argument(
"-d", "--data", metavar="DATA", type=str,
help="hexadecimal bytes to write")
ram_note = textwrap.dedent("""
\"External\" RAM means on-chip RAM external to the 8051 core, i.e. RAM where
__code or __xdata objects are placed by the toolchain. Internal RAM of the 8051
is not accessible by this tool.
""")
p_read_ram = subparsers.add_parser("read_ram",
formatter_class=TextHelpFormatter,
help="read data from code RAM",
description="Reads data from on-chip code RAM.\n" + ram_note)
add_read_args(p_read_ram)
p_write_ram = subparsers.add_parser("write_ram",
formatter_class=TextHelpFormatter,
help="write data to code RAM",
description="Writes data to on-chip code RAM.\n" + ram_note)
add_write_args(p_write_ram)
p_read_xram = subparsers.add_parser("read_xram",
formatter_class=TextHelpFormatter,
help="read data from code RAM, external RAM, or registers",
description="Reads data from RAM using the `movx` instruction.\n" + ram_note)
add_read_args(p_read_xram)
p_write_xram = subparsers.add_parser("write_xram",
formatter_class=TextHelpFormatter,
help="write data to code RAM, external RAM, or registers",
description="Writes data to RAM using the `movx` instruction.\n" + ram_note)
add_write_args(p_write_xram)
def add_eeprom_args(parser):
parser.add_argument(
"-W", "--address-width", metavar="WIDTH", type=int, choices=[1, 2], default=2,
help="EEPROM address width in bytes")
def add_eeprom_write_args(parser):
parser.add_argument(
"-p", "--page-size", metavar="SIZE", type=power_of_two, default=1,
help="power-of-two EEPROM page size (default: %(default)d)")
bootloader_note = textwrap.dedent("""
An appropriate second stage bootloader must be loaded for this command to work,
see the --stage2 option. The format of the bootloader firmware file is auto-detected.
""")
p_read_eeprom = subparsers.add_parser("read_eeprom",
formatter_class=TextHelpFormatter,
help="read data from boot EEPROM",
description="Reads data from boot EEPROM.\n" + bootloader_note)
add_eeprom_args(p_read_eeprom)
add_read_args(p_read_eeprom)
p_write_eeprom = subparsers.add_parser("write_eeprom",
formatter_class=TextHelpFormatter,
help="write data to boot EEPROM",
description="Writes data to boot EEPROM.\n" + bootloader_note)
add_eeprom_args(p_write_eeprom)
add_write_args(p_write_eeprom)
add_eeprom_write_args(p_write_eeprom)
p_reenumerate = subparsers.add_parser("reenumerate",
formatter_class=TextHelpFormatter,
help="re-enumerate",
description="Simulates device disconnection and reconnection.\n" + bootloader_note)
def add_program_args(parser):
parser.add_argument(
"-V", "--vid", dest="vendor_id", metavar="ID", type=usb_id, default=VID_CYPRESS,
help="USB vendor ID (default: %(default)04x)")
parser.add_argument(
"-P", "--pid", dest="product_id", metavar="ID", type=usb_id, default=PID_FX2,
help="USB product ID (default: %(default)04x)")
parser.add_argument(
"-D", "--did", dest="device_id", metavar="ID", type=usb_id, default=0x0000,
help="USB device ID (default: %(default)04x)")
parser.add_argument(
"-N", "--disconnect", dest="disconnect", default=False, action="store_true",
help="do not automatically enumerate on startup")
parser.add_argument(
"-F", "--fast", dest="i2c_400khz", default=False, action="store_true",
help="use 400 kHz clock for loading firmware via I2C")
p_program = subparsers.add_parser("program",
formatter_class=TextHelpFormatter,
help="program USB IDs or firmware",
description="Writes USB VID, PID, and DID, and if specified, firmware, "
"into boot EEPROM.\n" + bootloader_note)
add_eeprom_args(p_program)
add_eeprom_write_args(p_program)
add_program_args(p_program)
p_program.add_argument(
"-f", "--firmware", metavar="FILENAME", type=argparse.FileType("rb"),
help="read firmware from the specified file")
p_update = subparsers.add_parser("update",
formatter_class=TextHelpFormatter,
help="update USB IDs or firmware",
description="Writes USB VID, PID, DID, boot options, and if specified, firmware, "
"into boot EEPROM, without changing any omitted parameters.\n" + bootloader_note)
add_eeprom_args(p_update)
add_eeprom_write_args(p_update)
p_update.add_argument(
"-V", "--vid", dest="vendor_id", metavar="ID", type=usb_id,
help="USB vendor ID")
p_update.add_argument(
"-P", "--pid", dest="product_id", metavar="ID", type=usb_id,
help="USB product ID")
p_update.add_argument(
"-D", "--did", dest="device_id", metavar="ID", type=usb_id,
help="USB device ID")
p_update.add_argument(
"-N", "--disconnect", default=None, dest="disconnect", action="store_true",
help="do not automatically enumerate on startup")
p_update.add_argument(
"-E", "--no-disconnect", default=None, dest="disconnect", action="store_false",
help="do automatically enumerate on startup")
p_update.add_argument(
"-F", "--fast", default=None, dest="i2c_400khz", action="store_true",
help="use 400 kHz clock for loading firmware via I2C")
p_update.add_argument(
"-S", "--slow", default=None, dest="i2c_400khz", action="store_false",
help="use 100 kHz clock for loading firmware via I2C")
g_update_firmware = p_update.add_mutually_exclusive_group()
g_update_firmware.add_argument(
"-f", "--firmware", metavar="FILENAME", type=argparse.FileType("rb"),
help="read firmware from the specified file")
g_update_firmware.add_argument(
"-n", "--no-firmware", default=False, action="store_true",
help="remove any firmware present")
p_dump = subparsers.add_parser("dump",
formatter_class=TextHelpFormatter,
help="read out USB IDs or firmware",
description="Reads USB VID, PID, DID, boot options, and if present, firmware, "
"from boot EEPROM.\n" + bootloader_note)
add_eeprom_args(p_dump)
p_dump.add_argument(
"-f", "--firmware", metavar="FILENAME", type=argparse.FileType("wb"),
help="write firmware to the specified file")
p_uf2 = subparsers.add_parser("uf2",
formatter_class=TextHelpFormatter,
help="prepare UF2 firmware update images",
description="Assembles USB VID, PID, DID, boot options and firmware "
"into an image that can be flashed into the boot EEPROM using "
"the UF2 firmware update protocol.")
add_program_args(p_uf2)
p_uf2.add_argument(
"firmware_file", metavar="FIRMWARE-FILE", type=argparse.FileType("rb"),
help="read firmware from the specified file")
p_uf2.add_argument(
"uf2_file", metavar="UF2-FILE", type=argparse.FileType("wb"),
help="write UF2 firmware update image to the specified file")
p_dfu = subparsers.add_parser("dfu",
formatter_class=TextHelpFormatter,
help="prepare DFU firmware update images",
description="Assembles USB VID, PID, DID, boot options and firmware "
"into an image that can be flashed into the boot EEPROM using "
"the standard Device Firmware Update protocol.")
add_program_args(p_dfu)
p_dfu.add_argument(
"--dfu-pid", dest="dfu_product_id", metavar="ID", type=usb_id,
help="DFU mode USB product ID (default: firmware product ID)")
p_dfu.add_argument(
"firmware_file", metavar="FIRMWARE-FILE", type=argparse.FileType("rb"),
help="read firmware from the specified file")
p_dfu.add_argument(
"dfu_file", metavar="UF2-FILE", type=argparse.FileType("wb"),
help="write DFU image to the specified file")
return parser
def read_entire_boot_eeprom(device, address_width):
# We don't know how large the EEPROM is, so we use a heuristic tailored
# for the C2 load: if we detect a chunk identical to the first chunk
# *or* chunk consisting only of erased bytes, we stop.
addr = 0
data = bytearray()
while addr < 0x10000: # never larger than 64k
chunk = device.read_boot_eeprom(addr, 0x100, address_width)
if addr == 0:
first_chunk = chunk
elif chunk == first_chunk:
break
if re.match(rb"\xff{256}", chunk):
break
else:
data += chunk
addr += len(chunk)
return data
def main():
resource_dir = os.path.dirname(os.path.abspath(__file__))
args = get_argparser().parse_args()
if args.action in ("uf2", "dfu"):
device = None
else:
try:
vid, pid = args.device
device = FX2Device(vid, pid)
except FX2DeviceError as e:
raise SystemExit(e)
try:
if device is not None:
if args.bootloader:
bootloader_ihex = os.path.join(resource_dir, "boot-cypress.ihex")
device.load_ram(input_data(open(bootloader_ihex)))
elif args.stage2:
device.load_ram(input_data(args.stage2))
if args.action == "load":
device.load_ram(input_data(args.firmware, args.format))
elif args.action == "read_ram":
device.cpu_reset(True)
data = device.read_ram(args.address, args.length)
output_data(args.file, data, args.format, args.address)
elif args.action == "write_ram":
data = input_data(args.file or args.data, args.format, args.offset)
device.cpu_reset(True)
for address, chunk in data:
device.write_ram(address, chunk)
elif args.action == "read_xram":
device.cpu_reset(False)
data = device.read_ext_ram(args.address, args.length)
output_data(args.file, data, args.format, args.address)
elif args.action == "write_xram":
data = input_data(args.file or args.data, args.format, args.offset)
device.cpu_reset(False)
for address, chunk in data:
device.write_ext_ram(address, chunk)
elif args.action == "read_eeprom":
device.cpu_reset(False)
data = device.read_boot_eeprom(args.address, args.length, args.address_width)
output_data(args.file, data, args.format, args.address)
elif args.action == "write_eeprom":
data = input_data(args.file | |
space.parse("if 3; end") == ast.Main(ast.Block([
ast.Statement(ast.If(ast.ConstantInt(3), ast.Nil(), ast.Nil()))
]))
r = space.parse("""
if 0
puts 2
puts 3
puts 4
end
""")
assert r == ast.Main(ast.Block([
ast.Statement(ast.If(ast.ConstantInt(0), ast.Block([
ast.Statement(ast.Send(ast.Self(3), "puts", [ast.ConstantInt(2)], None, 3)),
ast.Statement(ast.Send(ast.Self(4), "puts", [ast.ConstantInt(3)], None, 4)),
ast.Statement(ast.Send(ast.Self(5), "puts", [ast.ConstantInt(4)], None, 5)),
]), ast.Nil()))
]))
def test_else(self, space):
r = space.parse("""if 3 then 5 else 4 end""")
assert r == ast.Main(ast.Block([
ast.Statement(ast.If(ast.ConstantInt(3), ast.Block([
ast.Statement(ast.ConstantInt(5))
]), ast.Block([
ast.Statement(ast.ConstantInt(4))
])))
]))
assert space.parse("if nil; else; end") == ast.Main(ast.Block([
ast.Statement(ast.If(ast.Nil(), ast.Nil(), ast.Nil()))
]))
def test_elsif(self, space):
r = space.parse("""
if 3
5
elsif 4 == 2
3
elsif 3 == 1
2
end
""")
assert r == ast.Main(ast.Block([
ast.Statement(ast.If(ast.ConstantInt(3), ast.Block([
ast.Statement(ast.ConstantInt(5))
]), ast.If(ast.Send(ast.ConstantInt(4), "==", [ast.ConstantInt(2)], None, 4), ast.Block([
ast.Statement(ast.ConstantInt(3))
]), ast.If(ast.Send(ast.ConstantInt(3), "==", [ast.ConstantInt(1)], None, 6), ast.Block([
ast.Statement(ast.ConstantInt(2))
]), ast.Nil()))))
]))
def test_elsif_else(self, space):
r = space.parse("""
if nil
5
elsif nil
10
else
200
end
""")
assert r == ast.Main(ast.Block([
ast.Statement(ast.If(ast.Nil(), ast.Block([
ast.Statement(ast.ConstantInt(5))
]), ast.If(ast.Nil(), ast.Block([
ast.Statement(ast.ConstantInt(10)),
]), ast.Block([
ast.Statement(ast.ConstantInt(200))
]))))
]))
def test_comparison_ops(self, space):
assert space.parse("1 == 2; 1 < 2; 1 > 2; 1 != 2; 1 <= 2; 1 >= 2; 1 <=> 2") == ast.Main(ast.Block([
ast.Statement(ast.Send(ast.ConstantInt(1), "==", [ast.ConstantInt(2)], None, 1)),
ast.Statement(ast.Send(ast.ConstantInt(1), "<", [ast.ConstantInt(2)], None, 1)),
ast.Statement(ast.Send(ast.ConstantInt(1), ">", [ast.ConstantInt(2)], None, 1)),
ast.Statement(ast.Send(ast.ConstantInt(1), "!=", [ast.ConstantInt(2)], None, 1)),
ast.Statement(ast.Send(ast.ConstantInt(1), "<=", [ast.ConstantInt(2)], None, 1)),
ast.Statement(ast.Send(ast.ConstantInt(1), ">=", [ast.ConstantInt(2)], None, 1)),
ast.Statement(ast.Send(ast.ConstantInt(1), "<=>", [ast.ConstantInt(2)], None, 1)),
]))
def test_while(self, space):
expected = ast.Main(ast.Block([
ast.Statement(ast.While(ast.ConstantBool(True), ast.Block([
ast.Statement(ast.Send(ast.Self(1), "puts", [ast.ConstantInt(5)], None, 1))
])))
]))
assert space.parse("while true do puts 5 end") == expected
assert space.parse("while true do; puts 5 end") == expected
assert space.parse("while true; puts 5 end") == expected
assert space.parse("while true; end") == ast.Main(ast.Block([
ast.Statement(ast.While(ast.ConstantBool(True), ast.Nil()))
]))
res = space.parse("""
i = 0
while i < 10 do
puts i
puts 1
puts i
puts true
end
""")
assert res == ast.Main(ast.Block([
ast.Statement(ast.Assignment(ast.Variable("i", 2), ast.ConstantInt(0))),
ast.Statement(ast.While(ast.Send(ast.Variable("i", 3), "<", [ast.ConstantInt(10)], None, 3), ast.Block([
ast.Statement(ast.Send(ast.Self(4), "puts", [ast.Variable("i", 4)], None, 4)),
ast.Statement(ast.Send(ast.Self(5), "puts", [ast.ConstantInt(1)], None, 5)),
ast.Statement(ast.Send(ast.Self(6), "puts", [ast.Variable("i", 6)], None, 6)),
ast.Statement(ast.Send(ast.Self(7), "puts", [ast.ConstantBool(True)], None, 7)),
])))
]))
def test_until(self, space):
r = space.parse("""
until 3
5
end
""")
assert r == ast.Main(ast.Block([
ast.Statement(ast.Until(ast.ConstantInt(3), ast.Block([
ast.Statement(ast.ConstantInt(5))
])))
]))
def test_for(self, space):
expected = ast.Main(ast.Block([
ast.Statement(ast.Send(ast.Array([]), "each", [], ast.SendBlock(
[ast.Argument("0")], None, None, ast.Block([
ast.Statement(ast.Assignment(ast.Variable("i", 1), ast.Variable("0", 1))),
ast.Statement(ast.Send(ast.Self(1), "puts", [ast.Variable("i", 1)], None, 1))
])
), 1))
]))
assert space.parse("for i in [] do puts i end") == expected
assert space.parse("for i in [] do; puts i end") == expected
assert space.parse("for i in []; puts i end") == expected
assert space.parse("for i, in []; end") == ast.Main(ast.Block([
ast.Statement(ast.Send(ast.Array([]), "each", [], ast.SendBlock(
[ast.Argument("0")], None, None, ast.Block([
ast.Statement(ast.MultiAssignment(
ast.MultiAssignable([ast.Variable("i", 1)]),
ast.Variable("0", 1)
))
])
), 1))
]))
res = space.parse("""
a = [0]
for i in a
puts i
puts 1
puts i
end
""")
assert res == ast.Main(ast.Block([
ast.Statement(ast.Assignment(ast.Variable("a", 2), ast.Array([ast.ConstantInt(0)]))),
ast.Statement(ast.Send(ast.Variable("a", 3), "each", [], ast.SendBlock(
[ast.Argument("0")], None, None, ast.Block([
ast.Statement(ast.Assignment(ast.Variable("i", 3), ast.Variable("0", 3))),
ast.Statement(ast.Send(ast.Self(4), "puts", [ast.Variable("i", 4)], None, 4)),
ast.Statement(ast.Send(ast.Self(5), "puts", [ast.ConstantInt(1)], None, 5)),
ast.Statement(ast.Send(ast.Self(6), "puts", [ast.Variable("i", 6)], None, 6)),
])
), 3))
]))
res = space.parse("""
for @a, *b, $c in []
end
""")
assert res == ast.Main(ast.Block([
ast.Statement(ast.Send(
ast.Array([]),
"each",
[],
ast.SendBlock(
[ast.Argument("0")], None, None, ast.Block([
ast.Statement(ast.MultiAssignment(
ast.MultiAssignable([
ast.InstanceVariable("@a"),
ast.Splat(ast.Variable("b", 2)),
ast.Global("$c")
]),
ast.Variable("0", 2)
))
])
),
2
))
]))
def test_return(self, space):
assert space.parse("return 4") == ast.Main(ast.Block([
ast.Return(ast.ConstantInt(4))
]))
assert space.parse("return") == ast.Main(ast.Block([
ast.Return(ast.Nil())
]))
assert space.parse("return 3, 4, 5") == ast.Main(ast.Block([
ast.Return(ast.Array([
ast.ConstantInt(3),
ast.ConstantInt(4),
ast.ConstantInt(5),
]))
]))
assert space.parse("return *3") == ast.Main(ast.Block([
ast.Return(ast.Splat(ast.ConstantInt(3)))
]))
assert space.parse("return f 1, 2") == ast.Main(ast.Block([
ast.Return(ast.Send(ast.Self(1), "f", [ast.ConstantInt(1), ast.ConstantInt(2)], None, 1))
]))
def test_array(self, space):
assert space.parse("[]") == ast.Main(ast.Block([
ast.Statement(ast.Array([]))
]))
assert space.parse("[1, 2, 3]") == ast.Main(ast.Block([
ast.Statement(ast.Array([
ast.ConstantInt(1),
ast.ConstantInt(2),
ast.ConstantInt(3),
]))
]))
assert space.parse("[[1], [2], [3]]") == ast.Main(ast.Block([
ast.Statement(ast.Array([
ast.Array([ast.ConstantInt(1)]),
ast.Array([ast.ConstantInt(2)]),
ast.Array([ast.ConstantInt(3)]),
]))
]))
assert space.parse("[1, 2,]") == ast.Main(ast.Block([
ast.Statement(ast.Array([
ast.ConstantInt(1),
ast.ConstantInt(2),
]))
]))
r = space.parse("""
[
f()
]
""")
assert r == ast.Main(ast.Block([
ast.Statement(ast.Array([
ast.Send(ast.Self(3), "f", [], None, 3),
])),
]))
assert space.parse("[1, *2, *3]") == ast.Main(ast.Block([
ast.Statement(ast.Array([
ast.ConstantInt(1),
ast.Splat(ast.ConstantInt(2)),
ast.Splat(ast.ConstantInt(3)),
]))
]))
assert space.parse("[:abc => 3]") == ast.Main(ast.Block([
ast.Statement(ast.Array([
ast.Hash([(ast.ConstantSymbol("abc"), ast.ConstantInt(3))])
]))
]))
assert space.parse("[1, :abc => 3]") == ast.Main(ast.Block([
ast.Statement(ast.Array([
ast.ConstantInt(1),
ast.Hash([(ast.ConstantSymbol("abc"), ast.ConstantInt(3))])
]))
]))
def test_subscript(self, space):
assert space.parse("[1][0]") == ast.Main(ast.Block([
ast.Statement(ast.Send(ast.Array([ast.ConstantInt(1)]), "[]", [ast.ConstantInt(0)], None, 1))
]))
assert space.parse("self[i]") == ast.Main(ast.Block([
ast.Statement(ast.Send(ast.Self(1), "[]", [ast.Send(ast.Self(1), "i", [], None, 1)], None, 1))
]))
assert space.parse("self[i].to_s") == ast.Main(ast.Block([
ast.Statement(ast.Send(ast.Send(ast.Self(1), "[]", [ast.Send(ast.Self(1), "i", [], None, 1)], None, 1), "to_s", [], None, 1))
]))
assert space.parse("a[:a][:a]") == ast.Main(ast.Block([
ast.Statement(ast.Send(
ast.Send(
ast.Send(ast.Self(1), "a", [], None, 1),
"[]",
[ast.ConstantSymbol("a")],
None,
1
),
"[]",
[ast.ConstantSymbol("a")],
None,
1,
))
]))
assert space.parse("x.y[0]") == ast.Main(ast.Block([
ast.Statement(ast.Send(
ast.Send(ast.Send(ast.Self(1), "x", [], None, 1), "y", [], None, 1),
"[]",
[ast.ConstantInt(0)],
None,
1,
))
]))
assert space.parse("r[0, 0]") == ast.Main(ast.Block([
ast.Statement(ast.Send(
ast.Send(ast.Self(1), "r", [], None, 1),
"[]",
[ast.ConstantInt(0), ast.ConstantInt(0)],
None,
1,
))
]))
assert space.parse("r[]") == ast.Main(ast.Block([
ast.Statement(ast.Send(ast.Send(ast.Self(1), "r", [], None, 1), "[]", [], None, 1))
]))
assert space.parse("f()[]") == ast.Main(ast.Block([
ast.Statement(ast.Send(ast.Send(ast.Self(1), "f", [], None, 1), "[]", [], None, 1))
]))
def test_subscript_assginment(self, space):
assert space.parse("x[0] = 5") == ast.Main(ast.Block([
ast.Statement(ast.Assignment(ast.Subscript(ast.Send(ast.Self(1), "x", [], None, 1), [ast.ConstantInt(0)], 1), ast.ConstantInt(5)))
]))
assert space.parse("x[] = 5") == ast.Main(ast.Block([
ast.Statement(ast.Assignment(ast.Subscript(ast.Send(ast.Self(1), "x", [], None, 1), [], 1), ast.ConstantInt(5)))
]))
def test_subscript_augmented_assignment(self, space):
assert space.parse("x[] += 5") == ast.Main(ast.Block([
ast.Statement(ast.AugmentedAssignment("+", ast.Subscript(ast.Send(ast.Self(1), "x", [], None, 1), [], 1), ast.ConstantInt(5)))
]))
def test_def(self, space):
assert space.parse("def f() end") == ast.Main(ast.Block([
ast.Statement(ast.Function(None, "f", [], None, None, ast.Nil()))
]))
r = space.parse("""
def
f
end
""")
assert r == ast.Main(ast.Block([
ast.Statement(ast.Function(None, "f", [], None, None, ast.Nil()))
]))
assert space.parse("def []; end") == ast.Main(ast.Block([
ast.Statement(ast.Function(None, "[]", [], None, None, ast.Nil()))
]))
assert space.parse("def []=; end") == ast.Main(ast.Block([
ast.Statement(ast.Function(None, "[]=", [], None, None, ast.Nil()))
]))
assert space.parse("def f(a, b) a + b end") == ast.Main(ast.Block([
ast.Statement(ast.Function(None, "f", [ast.Argument("a"), ast.Argument("b")], None, None, ast.Block([
ast.Statement(ast.Send(ast.Variable("a", 1), "+", [ast.Variable("b", 1)], None, 1))
])))
]))
r = space.parse("""
def f(a)
puts a
puts a
puts a
end
""")
assert r == ast.Main(ast.Block([
ast.Statement(ast.Function(None, "f", [ast.Argument("a")], None, None, ast.Block([
ast.Statement(ast.Send(ast.Self(3), "puts", [ast.Variable("a", 3)], None, 3)),
ast.Statement(ast.Send(ast.Self(4), "puts", [ast.Variable("a", 4)], None, 4)),
ast.Statement(ast.Send(ast.Self(5), "puts", [ast.Variable("a", 5)], None, 5)),
])))
]))
assert space.parse("x = def f() end") == ast.Main(ast.Block([
ast.Statement(ast.Assignment(ast.Variable("x", 1), ast.Function(None, "f", [], None, None, ast.Nil())))
]))
r = space.parse("""
def f a, b
a + b
end
""")
assert r == ast.Main(ast.Block([
ast.Statement(ast.Function(None, "f", [ast.Argument("a"), ast.Argument("b")], None, None, ast.Block([
ast.Statement(ast.Send(ast.Variable("a", 3), "+", [ast.Variable("b", 3)], None, 3))
])))
]))
r = space.parse("""
def f(&b)
b
end
""")
assert r == ast.Main(ast.Block([
ast.Statement(ast.Function(None, "f", [], None, "b", ast.Block([
ast.Statement(ast.Variable("b", 3))
])))
]))
r = space.parse("""
def f(a=nil, *b)
end
""")
assert r == ast.Main(ast.Block([
ast.Statement(ast.Function(None, "f", [ast.Argument("a", ast.Nil())], "b", None, ast.Nil()))
]))
r = space.parse("""
def f(a, b=nil, *c)
end
""")
assert r == ast.Main(ast.Block([
ast.Statement(ast.Function(None, "f", [ast.Argument("a"), ast.Argument("b", ast.Nil())], "c", None, ast.Nil()))
]))
with self.raises(space, "SyntaxError"):
space.parse("""
def f(&b, a)
b
end
""")
with self.raises(space, "SyntaxError"):
space.parse("""
def f(&b, &c)
b
end
""")
assert space.parse("def f(*a,b,&blk); end") == ast.Main(ast.Block([
ast.Statement(ast.Function(
None,
"f",
[],
"2",
"blk",
ast.Block([ast.Statement(
ast.MultiAssignment(
ast.MultiAssignable([
ast.Splat(ast.Variable("a", -1)),
ast.Variable("b", -1),
]),
ast.Variable("2", -1)
)
)])
))
]))
def test_def_names(self, space):
def test_name(s):
r = space.parse("""
def %s
end
""" % s)
assert r == ast.Main(ast.Block([
ast.Statement(ast.Function(None, s, [], None, None, ast.Nil()))
]))
test_name("abc")
test_name("<=>")
test_name("foo=")
test_name("===")
test_name(">")
test_name("<")
test_name(">=")
test_name("<=")
test_name("==")
test_name("=~")
test_name("<<")
test_name("-")
def test_string(self, space):
assert space.parse('"abc"') == ast.Main(ast.Block([
ast.Statement(ast.ConstantString("abc"))
]))
assert space.parse('"abc".size') == ast.Main(ast.Block([
ast.Statement(ast.Send(ast.ConstantString("abc"), "size", [], None, 1))
]))
assert space.parse("'abc'") == ast.Main(ast.Block([
ast.Statement(ast.ConstantString("abc"))
]))
assert space.parse('"\n"') == ast.Main(ast.Block([
ast.Statement(ast.ConstantString("\n"))
]))
assert space.parse('"\\n"') == ast.Main(ast.Block([
ast.Statement(ast.ConstantString("\n"))
]))
assert space.parse("'\\n'") == ast.Main(ast.Block([
ast.Statement(ast.ConstantString("\\n"))
]))
assert space.parse("?-") == ast.Main(ast.Block([
ast.Statement(ast.ConstantString("-"))
]))
assert space.parse('""') == ast.Main(ast.Block([
ast.Statement(ast.ConstantString(""))
]))
assert space.parse("'\\'<>'") == ast.Main(ast.Block([
ast.Statement(ast.ConstantString("'<>"))
]))
assert space.parse('"\\"<>"') == ast.Main(ast.Block([
ast.Statement(ast.ConstantString('"<>'))
]))
def test_escape_character(self, space):
string = lambda content: ast.Main(ast.Block([
ast.Statement(ast.ConstantString(content))
]))
assert space.parse('?\\\\') == string("\\")
assert space.parse('?\\n') == string("\n")
assert | |
the latest checked in version of the files that
# the user will actually be executing. Specifically, GetRevisionId()
# doesn't appear to change even if a user checks out a different version
# of the hooks repo (via git checkout) nor if a user commits their own revs.
#
# NOTE: Local (non-committed) changes will not be factored into this hash.
# I think this is OK, since we're really only worried about warning the user
# about upstream changes.
return self._hooks_project.work_git.rev_parse('HEAD')
def _GetMustVerb(self):
"""Return 'must' if the hook is required; 'should' if not."""
if self._abort_if_user_denies:
return 'must'
else:
return 'should'
def _CheckForHookApproval(self):
"""Check to see whether this hook has been approved.
We'll accept approval of manifest URLs if they're using secure transports.
This way the user can say they trust the manifest hoster. For insecure
hosts, we fall back to checking the hash of the hooks repo.
Note that we ask permission for each individual hook even though we use
the hash of all hooks when detecting changes. We'd like the user to be
able to approve / deny each hook individually. We only use the hash of all
hooks because there is no other easy way to detect changes to local imports.
Returns:
True if this hook is approved to run; False otherwise.
Raises:
HookError: Raised if the user doesn't approve and abort_if_user_denies
was passed to the consturctor.
"""
if self._ManifestUrlHasSecureScheme():
return self._CheckForHookApprovalManifest()
else:
return self._CheckForHookApprovalHash()
def _CheckForHookApprovalHelper(self, subkey, new_val, main_prompt,
changed_prompt):
"""Check for approval for a particular attribute and hook.
Args:
subkey: The git config key under [repo.hooks.<hook_type>] to store the
last approved string.
new_val: The new value to compare against the last approved one.
main_prompt: Message to display to the user to ask for approval.
changed_prompt: Message explaining why we're re-asking for approval.
Returns:
True if this hook is approved to run; False otherwise.
Raises:
HookError: Raised if the user doesn't approve and abort_if_user_denies
was passed to the consturctor.
"""
hooks_config = self._hooks_project.config
git_approval_key = 'repo.hooks.%s.%s' % (self._hook_type, subkey)
# Get the last value that the user approved for this hook; may be None.
old_val = hooks_config.GetString(git_approval_key)
if old_val is not None:
# User previously approved hook and asked not to be prompted again.
if new_val == old_val:
# Approval matched. We're done.
return True
else:
# Give the user a reason why we're prompting, since they last told
# us to "never ask again".
prompt = 'WARNING: %s\n\n' % (changed_prompt,)
else:
prompt = ''
# Prompt the user if we're not on a tty; on a tty we'll assume "no".
if sys.stdout.isatty():
prompt += main_prompt + ' (yes/always/NO)? '
response = input(prompt).lower()
print()
# User is doing a one-time approval.
if response in ('y', 'yes'):
return True
elif response == 'always':
hooks_config.SetString(git_approval_key, new_val)
return True
# For anything else, we'll assume no approval.
if self._abort_if_user_denies:
raise HookError('You must allow the %s hook or use --no-verify.' %
self._hook_type)
return False
def _ManifestUrlHasSecureScheme(self):
"""Check if the URI for the manifest is a secure transport."""
secure_schemes = ('file', 'https', 'ssh', 'persistent-https', 'sso', 'rpc')
parse_results = urllib.parse.urlparse(self._manifest_url)
return parse_results.scheme in secure_schemes
def _CheckForHookApprovalManifest(self):
"""Check whether the user has approved this manifest host.
Returns:
True if this hook is approved to run; False otherwise.
"""
return self._CheckForHookApprovalHelper(
'approvedmanifest',
self._manifest_url,
'Run hook scripts from %s' % (self._manifest_url,),
'Manifest URL has changed since %s was allowed.' % (self._hook_type,))
def _CheckForHookApprovalHash(self):
"""Check whether the user has approved the hooks repo.
Returns:
True if this hook is approved to run; False otherwise.
"""
prompt = ('Repo %s run the script:\n'
' %s\n'
'\n'
'Do you want to allow this script to run')
return self._CheckForHookApprovalHelper(
'approvedhash',
self._GetHash(),
prompt % (self._GetMustVerb(), self._script_fullpath),
'Scripts have changed since %s was allowed.' % (self._hook_type,))
def _ExecuteHook(self, **kwargs):
"""Actually execute the given hook.
This will run the hook's 'main' function in our python interpreter.
Args:
kwargs: Keyword arguments to pass to the hook. These are often specific
to the hook type. For instance, pre-upload hooks will contain
a project_list.
"""
# Keep sys.path and CWD stashed away so that we can always restore them
# upon function exit.
orig_path = os.getcwd()
orig_syspath = sys.path
try:
# Always run hooks with CWD as topdir.
os.chdir(self._topdir)
# Put the hook dir as the first item of sys.path so hooks can do
# relative imports. We want to replace the repo dir as [0] so
# hooks can't import repo files.
sys.path = [os.path.dirname(self._script_fullpath)] + sys.path[1:]
# Exec, storing global context in the context dict. We catch exceptions
# and convert to a HookError w/ just the failing traceback.
context = {'__file__': self._script_fullpath}
try:
exec(compile(open(self._script_fullpath).read(),
self._script_fullpath, 'exec'), context)
except Exception:
raise HookError('%s\nFailed to import %s hook; see traceback above.' %
(traceback.format_exc(), self._hook_type))
# Running the script should have defined a main() function.
if 'main' not in context:
raise HookError('Missing main() in: "%s"' % self._script_fullpath)
# Add 'hook_should_take_kwargs' to the arguments to be passed to main.
# We don't actually want hooks to define their main with this argument--
# it's there to remind them that their hook should always take **kwargs.
# For instance, a pre-upload hook should be defined like:
# def main(project_list, **kwargs):
#
# This allows us to later expand the API without breaking old hooks.
kwargs = kwargs.copy()
kwargs['hook_should_take_kwargs'] = True
# Call the main function in the hook. If the hook should cause the
# build to fail, it will raise an Exception. We'll catch that convert
# to a HookError w/ just the failing traceback.
try:
context['main'](**kwargs)
except Exception:
raise HookError('%s\nFailed to run main() for %s hook; see traceback '
'above.' % (traceback.format_exc(),
self._hook_type))
finally:
# Restore sys.path and CWD.
sys.path = orig_syspath
os.chdir(orig_path)
def Run(self, user_allows_all_hooks, **kwargs):
"""Run the hook.
If the hook doesn't exist (because there is no hooks project or because
this particular hook is not enabled), this is a no-op.
Args:
user_allows_all_hooks: If True, we will never prompt about running the
hook--we'll just assume it's OK to run it.
kwargs: Keyword arguments to pass to the hook. These are often specific
to the hook type. For instance, pre-upload hooks will contain
a project_list.
Raises:
HookError: If there was a problem finding the hook or the user declined
to run a required hook (from _CheckForHookApproval).
"""
# No-op if there is no hooks project or if hook is disabled.
if ((not self._hooks_project) or (self._hook_type not in
self._hooks_project.enabled_repo_hooks)):
return
# Bail with a nice error if we can't find the hook.
if not os.path.isfile(self._script_fullpath):
raise HookError('Couldn\'t find repo hook: "%s"' % self._script_fullpath)
# Make sure the user is OK with running the hook.
if (not user_allows_all_hooks) and (not self._CheckForHookApproval()):
return
# Run the hook with the same version of python we're using.
self._ExecuteHook(**kwargs)
class Project(object):
# These objects can be shared between several working trees.
shareable_files = ['description', 'info']
shareable_dirs = ['hooks', 'objects', 'rr-cache', 'svn']
# These objects can only be used by a single working tree.
working_tree_files = ['config', 'packed-refs', 'shallow']
working_tree_dirs = ['logs', 'refs']
def __init__(self,
manifest,
name,
remote,
gitdir,
objdir,
worktree,
relpath,
revisionExpr,
revisionId,
rebase=True,
groups=None,
sync_c=False,
sync_s=False,
sync_tags=True,
clone_depth=None,
upstream=None,
parent=None,
is_derived=False,
dest_branch=None,
optimized_fetch=False,
old_revision=None):
"""Init a Project object.
Args:
manifest: The XmlManifest object.
name: The `name` attribute of manifest.xml's project element.
remote: RemoteSpec object specifying its remote's properties.
gitdir: Absolute path of git directory.
objdir: Absolute path of directory to store git objects.
worktree: Absolute path of git working tree.
relpath: Relative path of git working tree to repo's top directory.
revisionExpr: The `revision` attribute of manifest.xml's project element.
revisionId: git commit id for checking out.
rebase: The `rebase` attribute of manifest.xml's project element.
groups: The `groups` attribute of manifest.xml's project element.
sync_c: The `sync-c` attribute of manifest.xml's project element.
sync_s: The `sync-s` attribute of manifest.xml's project element.
sync_tags: The `sync-tags` | |
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <node> element')
self.node.append(obj_)
elif nodeName_ == 'rootedge':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <rootedge> element')
self.set_rootedge(obj_)
elif nodeName_ == 'edge':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <edge> element')
self.edge.append(obj_)
elif nodeName_ == 'set':
obj_ = NodeAndRootEdgeAndEdgeSet.factory()
obj_.build(child_)
self.set.append(obj_)
super(AbstractTree, self).buildChildren(child_, node, nodeName_, True)
# end class AbstractTree
class AbstractRootEdge(IDTagged):
"""The AbstractRootEdge complex type is a superclass for the edge that
leads into a root, i.e. an edge with only a target attribute,
but no source attribute. This type of edge is used for
coalescent trees, where the initial lineage has a certain length
before things start splitting up."""
subclass = None
superclass = IDTagged
def __init__(self, about=None, meta=None, label=None, id=None, length=None, target=None, valueOf_=None):
super(AbstractRootEdge, self).__init__(about, meta, label, id, )
self.length = _cast(None, length)
self.target = _cast(None, target)
pass
def factory(*args_, **kwargs_):
if AbstractRootEdge.subclass:
return AbstractRootEdge.subclass(*args_, **kwargs_)
else:
return AbstractRootEdge(*args_, **kwargs_)
factory = staticmethod(factory)
def get_length(self): return self.length
def set_length(self, length): self.length = length
def get_target(self): return self.target
def set_target(self, target): self.target = target
def export(self, outfile, level, namespace_='', name_='AbstractRootEdge', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractRootEdge')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractRootEdge"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractRootEdge'):
super(AbstractRootEdge, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractRootEdge')
if self.length is not None and 'length' not in already_processed:
already_processed.append('length')
outfile.write(' length=%s' % (quote_attrib(self.length), ))
if self.target is not None and 'target' not in already_processed:
already_processed.append('target')
outfile.write(' target=%s' % (self.gds_format_string(quote_attrib(self.target).encode(ExternalEncoding), input_name='target'), ))
def exportChildren(self, outfile, level, namespace_='', name_='AbstractRootEdge', fromsubclass_=False):
super(AbstractRootEdge, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(AbstractRootEdge, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractRootEdge'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.length is not None and 'length' not in already_processed:
already_processed.append('length')
showIndent(outfile, level)
outfile.write('length = %s,\n' % (self.length,))
if self.target is not None and 'target' not in already_processed:
already_processed.append('target')
showIndent(outfile, level)
outfile.write('target = "%s",\n' % (self.target,))
super(AbstractRootEdge, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractRootEdge, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('length', node)
if value is not None and 'length' not in already_processed:
already_processed.append('length')
self.length = value
value = find_attr_value_('target', node)
if value is not None and 'target' not in already_processed:
already_processed.append('target')
self.target = value
super(AbstractRootEdge, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(AbstractRootEdge, self).buildChildren(child_, node, nodeName_, True)
pass
# end class AbstractRootEdge
class AbstractEdge(IDTagged):
"""The AbstractEdge superclass is what concrete edges inherit from by
restriction. It represents an edge element much like that of
GraphML, i.e. an element that connects node elements."""
subclass = None
superclass = IDTagged
def __init__(self, about=None, meta=None, label=None, id=None, source=None, length=None, target=None, valueOf_=None):
super(AbstractEdge, self).__init__(about, meta, label, id, )
self.source = _cast(None, source)
self.length = _cast(None, length)
self.target = _cast(None, target)
pass
def factory(*args_, **kwargs_):
if AbstractEdge.subclass:
return AbstractEdge.subclass(*args_, **kwargs_)
else:
return AbstractEdge(*args_, **kwargs_)
factory = staticmethod(factory)
def get_source(self): return self.source
def set_source(self, source): self.source = source
def get_length(self): return self.length
def set_length(self, length): self.length = length
def get_target(self): return self.target
def set_target(self, target): self.target = target
def export(self, outfile, level, namespace_='', name_='AbstractEdge', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractEdge')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractEdge"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractEdge'):
super(AbstractEdge, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractEdge')
if self.source is not None and 'source' not in already_processed:
already_processed.append('source')
outfile.write(' source=%s' % (self.gds_format_string(quote_attrib(self.source).encode(ExternalEncoding), input_name='source'), ))
if self.length is not None and 'length' not in already_processed:
already_processed.append('length')
outfile.write(' length=%s' % (quote_attrib(self.length), ))
if self.target is not None and 'target' not in already_processed:
already_processed.append('target')
outfile.write(' target=%s' % (self.gds_format_string(quote_attrib(self.target).encode(ExternalEncoding), input_name='target'), ))
def exportChildren(self, outfile, level, namespace_='', name_='AbstractEdge', fromsubclass_=False):
super(AbstractEdge, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(AbstractEdge, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractEdge'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.source is not None and 'source' not in already_processed:
already_processed.append('source')
showIndent(outfile, level)
outfile.write('source = "%s",\n' % (self.source,))
if self.length is not None and 'length' not in already_processed:
already_processed.append('length')
showIndent(outfile, level)
outfile.write('length = %s,\n' % (self.length,))
if self.target is not None and 'target' not in already_processed:
already_processed.append('target')
showIndent(outfile, level)
outfile.write('target = "%s",\n' % (self.target,))
super(AbstractEdge, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractEdge, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('source', node)
if value is not None and 'source' not in already_processed:
already_processed.append('source')
self.source = value
value = find_attr_value_('length', node)
if value is not None and 'length' not in already_processed:
already_processed.append('length')
self.length = value
value = find_attr_value_('target', node)
if value is not None and 'target' not in already_processed:
already_processed.append('target')
self.target = value
super(AbstractEdge, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(AbstractEdge, self).buildChildren(child_, node, nodeName_, True)
pass
# end class AbstractEdge
class IntTree(AbstractTree):
"""A concrete tree implementation, with integer edge lengths."""
subclass = None
superclass = AbstractTree
def __init__(self, about=None, meta=None, label=None, id=None, node=None, rootedge=None, edge=None, set=None, valueOf_=None):
super(IntTree, self).__init__(about, meta, label, id, node, rootedge, edge, set, )
if meta is None:
self.meta = []
else:
self.meta = meta
if node is None:
self.node = []
else:
self.node = node
self.rootedge = rootedge
if edge is None:
self.edge = []
else:
self.edge = edge
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if IntTree.subclass:
return IntTree.subclass(*args_, **kwargs_)
else:
return IntTree(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_node(self): return self.node
def set_node(self, node): self.node = node
def add_node(self, value): self.node.append(value)
def insert_node(self, index, value): self.node[index] = value
def get_rootedge(self): return self.rootedge
def set_rootedge(self, rootedge): self.rootedge = rootedge
def get_edge(self): return self.edge
def set_edge(self, edge): self.edge = edge
def add_edge(self, value): self.edge.append(value)
def insert_edge(self, index, value): self.edge[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='IntTree', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='IntTree')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='IntTree'):
super(IntTree, self).exportAttributes(outfile, level, already_processed, namespace_, name_='IntTree')
def exportChildren(self, outfile, level, namespace_='', name_='IntTree', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
for node_ in self.node:
node_.export(outfile, level, namespace_, name_='node')
if self.rootedge:
self.rootedge.export(outfile, level, namespace_, name_='rootedge')
for edge_ in self.edge:
edge_.export(outfile, level, namespace_, name_='edge')
for set_ | |
# -*- coding:utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django import forms
from CadVlan.messages import error_messages
from CadVlan.Net.business import is_valid_ipv4
from CadVlan.Net.business import is_valid_ipv6
from CadVlan.Util.forms import fields
from CadVlan.Util.forms.decorators import autostrip
@autostrip
class GenerateTokenForm(forms.Form):
user = forms.CharField(required=True)
requestVip = forms.CharField(required=False)
p = forms.CharField(required=False)
is_ldap_user = forms.BooleanField(required=False)
@autostrip
class PoolForm(forms.Form):
def __init__(self, enviroments_choices, optionsvips_choices,
servicedownaction_choices, *args, **kwargs):
super(PoolForm, self).__init__(*args, **kwargs)
self.fields['environment'].choices = enviroments_choices
self.fields['balancing'].choices = optionsvips_choices
self.fields['servicedownaction'].choices = servicedownaction_choices
identifier = forms.CharField(
label=u'Identifier',
min_length=3,
max_length=200,
required=True,
error_messages=error_messages,
widget=forms.TextInput(
attrs={
"style": "width: 300px"}
)
)
default_port = forms.IntegerField(
label=u'Default Port',
min_value=1,
max_value=65535,
required=True,
error_messages=error_messages,
widget=forms.TextInput(
attrs={
"style": "width: 100px"}
)
)
environment = forms.ChoiceField(
label=u'Environment',
choices=[],
required=True,
error_messages=error_messages,
widget=forms.Select(attrs={
"style": "",
"style": "width: 310px",
'class': 'select2'}
)
)
balancing = forms.ChoiceField(
label=u'Balanceamento',
choices=[],
required=True,
error_messages=error_messages,
widget=forms.Select(attrs={
"style": "width: 310px",
'class': 'select2'}
)
)
servicedownaction = forms.ChoiceField(
label=u'Action on ServiceDown',
choices=[],
required=True,
error_messages=error_messages,
widget=forms.Select(attrs={
"style": "width: 310px",
'class': 'select2'}
)
)
maxcon = forms.IntegerField(
label=u'Número máximo de conexões (maxconn)',
required=True,
error_messages=error_messages,
widget=forms.TextInput(
attrs={
"style": "width: 100px"}
)
)
@autostrip
class PoolHealthcheckForm(forms.Form):
def __init__(self, healthcheck_choices=[], *args, **kwargs):
super(PoolHealthcheckForm, self).__init__(*args, **kwargs)
self.fields['healthcheck'].choices = healthcheck_choices
healthcheck = forms.ChoiceField(
label=u'HealthCheck',
choices=[],
required=False,
error_messages=error_messages,
widget=forms.Select(attrs={
"style": "width: 310px",
'class': 'select2'}
)
)
healthcheck_request = forms.CharField(
label=u'Healthcheck Request',
required=False,
error_messages=error_messages,
widget=forms.TextInput(
attrs={
"style": "width: 310px"}
)
)
healthcheck_expect = forms.CharField(
label=u'HTTP Expect String',
required=False,
error_messages=error_messages,
widget=forms.TextInput(
attrs={
"style": "width: 310px"}
)
)
healthcheck_destination = forms.IntegerField(
label=u'Porta',
min_value=1,
max_value=65535,
required=False,
error_messages=error_messages,
widget=forms.TextInput(
attrs={
"style": "width: 100px"}
)
)
@autostrip
class RequestVipBasicForm(forms.Form):
def __init__(self, forms_aux, *args, **kwargs):
super(RequestVipBasicForm, self).__init__(*args, **kwargs)
business = forms.CharField(
label=u'Área de negócio',
min_length=3,
max_length=100,
required=True,
error_messages=error_messages,
widget=forms.TextInput(attrs={'style': "width: 300px"}))
service = forms.CharField(
label=u'Nome do serviço',
min_length=3,
max_length=100,
required=True,
error_messages=error_messages,
widget=forms.TextInput(attrs={'style': "width: 300px"}))
name = forms.CharField(
label=u'Nome do VIP (Host FQDN)',
min_length=3,
max_length=100,
required=True,
error_messages=error_messages,
widget=forms.TextInput(attrs={'style': "width: 300px"}))
created = forms.BooleanField(
label="",
required=False,
widget=forms.HiddenInput(),
error_messages=error_messages)
@autostrip
class RequestVipGroupUsersForm(forms.Form):
def __init__(self, forms_aux, edit, *args, **kwargs):
super(RequestVipGroupUsersForm, self).__init__(*args, **kwargs)
self.fields['group_users'].choices = [(gu["id"], gu["nome"]) for gu in forms_aux["group_users"]["user_group"]]
if not edit:
del self.fields['overwrite']
else:
self.fields['overwrite'].check_test = False
group_users = forms.MultipleChoiceField(
label=u'Grupo de usuários',
required=False,
error_messages=error_messages,
widget=forms.SelectMultiple(attrs={'style': "width: 310px"})
)
overwrite = forms.BooleanField(
label='Sobrescrever permissões?',
required=False,
error_messages=error_messages,
widget=forms.CheckboxInput()
)
@autostrip
class PoolModalGroupUsersForm(forms.Form):
def __init__(self, forms_aux, edit, *args, **kwargs):
super(PoolModalGroupUsersForm, self).__init__(*args, **kwargs)
self.fields['group_users_modal'].choices = [(gu["id"], gu["nome"]) for gu in forms_aux["group_users"]["user_group"]]
if not edit:
del self.fields['overwrite']
else:
self.fields['overwrite'].check_test = False
group_users_modal = forms.MultipleChoiceField(
label=u'Grupo de usuários',
required=False,
error_messages=error_messages,
widget=forms.SelectMultiple(attrs={'style': "width: 310px"})
)
overwrite = forms.BooleanField(
label='Sobrescrever permissões?',
required=False,
error_messages=error_messages,
widget=forms.CheckboxInput()
)
@autostrip
class RequestVipEnvironmentVipForm(forms.Form):
def __init__(self, forms_aux, *args, **kwargs):
super(RequestVipEnvironmentVipForm, self).__init__(*args, **kwargs)
if forms_aux.get('finalities'):
self.fields['step_finality'].choices = \
[(env['finalidade_txt'], env["finalidade_txt"]) for env in forms_aux["finalities"]]
self.fields['step_finality'].choices.insert(0, ('', ''))
if forms_aux.get('clients'):
self.fields['step_client'].choices = \
[(env['cliente_txt'], env["cliente_txt"]) for env in forms_aux["clients"]]
self.fields['step_client'].choices.insert(0, ('', ''))
if forms_aux.get('environments'):
self.fields['step_environment'].choices = \
[(env['ambiente_p44_txt'], {
'label': env["ambiente_p44_txt"],
'attrs':{'attr': env["id"]}
}) for env in forms_aux["environments"]]
self.fields['step_environment'].choices.insert(0, ('', ''))
step_finality = forms.ChoiceField(
label=u'Finalidade',
required=True,
error_messages=error_messages,
widget=forms.Select(attrs={
"style": "width: 300px",
'class': 'select2'}))
step_client = forms.ChoiceField(
label=u'Cliente',
required=True,
error_messages=error_messages,
widget=forms.Select(attrs={
"style": "width: 300px",
'class': 'select2'}))
step_environment = forms.ChoiceField(
label=u'Ambiente',
required=True,
error_messages=error_messages,
widget=fields.SelectWithAttr(attrs={
"style": "width: 300px",
'class': 'select2'}))
environment_vip = forms.IntegerField(
label="",
required=False,
widget=forms.HiddenInput(),
error_messages=error_messages)
@autostrip
class RequestVipOptionVipForm(forms.Form):
logger = logging.getLogger(__name__)
def __init__(self, forms_aux, *args, **kwargs):
super(RequestVipOptionVipForm, self).__init__(*args, **kwargs)
if forms_aux.get('timeout'):
self.fields['timeout'].choices = \
[(env['id'], env["nome_opcao_txt"]) for env in forms_aux["timeout"]]
if forms_aux.get('caches'):
self.fields['caches'].choices = \
[(env['id'], env["nome_opcao_txt"]) for env in forms_aux["caches"]]
if forms_aux.get('persistence'):
self.fields['persistence'].choices = \
[(env['id'], env["nome_opcao_txt"]) for env in forms_aux["persistence"]]
if forms_aux.get('trafficreturn'):
self.fields['trafficreturn'].choices = \
[(env['id'], env["nome_opcao_txt"]) for env in forms_aux["trafficreturn"]]
timeout = forms.ChoiceField(
label="Timeout",
required=True,
error_messages=error_messages,
widget=forms.Select(attrs={
"style": "width: 300px",
'class': 'select2'}))
caches = forms.ChoiceField(
label="Grupos de caches",
required=True,
error_messages=error_messages,
widget=forms.Select(attrs={
"style": "width: 300px",
'class': 'select2'}))
persistence = forms.ChoiceField(
label="Persistência",
required=True,
error_messages=error_messages,
widget=forms.Select(attrs={
"style": "width: 300px",
'class': 'select2'}))
trafficreturn = forms.ChoiceField(
label="Traffic Return",
required=True,
error_messages=error_messages,
widget=forms.Select(attrs={
"style": "width: 300px",
'class': 'select2'}))
@autostrip
class RequestVipOptionVipEditForm(forms.Form):
logger = logging.getLogger(__name__)
def __init__(self, forms_aux, *args, **kwargs):
super(RequestVipOptionVipEditForm, self).__init__(*args, **kwargs)
if forms_aux.get('timeout'):
self.fields['timeout'].choices = \
[(env['id'], env["nome_opcao_txt"])
for env in forms_aux["timeout"] if env]
if forms_aux.get('caches'):
self.fields['caches'].choices = \
[(env['id'], env["nome_opcao_txt"])
for env in forms_aux["caches"] if env]
if forms_aux.get('persistence'):
self.fields['persistence'].choices = \
[(env['id'], env["nome_opcao_txt"])
for env in forms_aux["persistence"] if env]
if forms_aux.get('trafficreturn'):
self.fields['trafficreturn'].choices = \
[(env['id'], env["nome_opcao_txt"])
for env in forms_aux["trafficreturn"] if env]
environment_vip = forms.IntegerField(
label="",
required=False,
widget=forms.HiddenInput(),
error_messages=error_messages)
timeout = forms.ChoiceField(
label="Timeout",
required=True,
error_messages=error_messages,
widget=forms.Select(attrs={
"style": "width: 300px",
'class': 'select2'}))
caches = forms.ChoiceField(
label="Grupos de caches",
required=False,
error_messages=error_messages,
widget=forms.Select(attrs={
"style": "width: 300px",
'class': 'select2',
"readonly": "true"}))
persistence = forms.ChoiceField(
label="Persistência",
required=True,
error_messages=error_messages,
widget=forms.Select(attrs={
"style": "width: 300px",
'class': 'select2'}))
trafficreturn = forms.ChoiceField(
label="Traffic Return",
required=False,
error_messages=error_messages,
widget=forms.Select(attrs={
"style": "width: 300px",
'class': 'select2',
"readonly": "true"}))
class RequestVipPortOptionVipForm(forms.Form):
logger = logging.getLogger(__name__)
def __init__(self, forms_aux, *args, **kwargs):
super(RequestVipPortOptionVipForm, self).__init__(*args, **kwargs)
if forms_aux.get('l4_protocol'):
self.fields['l4_protocol'].choices = [(env['id'], env["nome_opcao_txt"]) for env in forms_aux["l4_protocol"]]
self.fields['l4_protocol'].choices.insert(0, ('', ''))
if forms_aux.get('l7_protocol'):
self.fields['l7_protocol'].choices = [(env['id'], env["nome_opcao_txt"]) for env in forms_aux["l7_protocol"]]
self.fields['l7_protocol'].choices.insert(0, ('', ''))
if forms_aux.get('l7_rule'):
self.fields['l7_rule'].choices = [(env['id'], env["nome_opcao_txt"]) for env in forms_aux["l7_rule"]]
self.fields['l7_rule'].choices.insert(0, ('', ''))
if forms_aux.get('pools'):
self.fields['pools'].choices = [(env['id'], env["identifier"]) for env in forms_aux["pools"]['server_pools']]
self.fields['pools'].choices.insert(0, ('', ''))
port_vip = forms.ChoiceField(
label="Porta Vip",
required=False,
error_messages=error_messages,
widget=forms.TextInput(attrs={
"style": "width: 50px"}))
l4_protocol = forms.ChoiceField(
label="Protocolo L4",
required=False,
error_messages=error_messages,
widget=forms.Select(attrs={
"style": "width: 150px",
'class': 'select2'}))
l7_protocol = forms.ChoiceField(
label="Protocolo L7",
required=False,
error_messages=error_messages,
widget=forms.Select(attrs={
"style": "width: 150px",
'class': 'select2'}))
l7_rule_check = forms.BooleanField(
label=u'Tem regra de L7?', required=False, error_messages=error_messages)
order = forms.ChoiceField(
label="Posição do L7",
required=False,
error_messages=error_messages,
widget=forms.TextInput(attrs={
"style": "width: 50px"}))
l7_rule = forms.ChoiceField(
label="Tipo Regra de L7",
required=False,
error_messages=error_messages,
widget=forms.Select(attrs={
"style": "width: 150px",
'class': 'select2'}))
l7_value = forms.ChoiceField(
label="Valor de L7",
required=False,
error_messages=error_messages,
widget=forms.TextInput(attrs={
"style": "width: 50px"}))
pools = forms.ChoiceField(
label=u'Pools',
required=False,
error_messages=error_messages,
widget=forms.Select(attrs={
"style": "width: 300px",
'class': 'select2'}))
@autostrip
class RequestVipIPForm(forms.Form):
def __init__(self, forms_aux, *args, **kwargs):
super(RequestVipIPForm, self).__init__(*args, **kwargs)
CHOICES = (('0', 'Alocar automaticamente'), ('1', 'Especificar IP'))
ipv4_check = forms.BooleanField(
label=u'IPv4',
required=False,
error_messages=error_messages)
ipv4_type = forms.ChoiceField(
label=u'',
required=False,
error_messages=error_messages,
choices=CHOICES,
widget=forms.RadioSelect())
ipv4_specific = forms.CharField(
label=u'',
required=False,
error_messages=error_messages,
widget=forms.TextInput(
attrs={'style': "width: 231px"}))
ipv6_check = forms.BooleanField(
label=u'IPv6',
required=False,
error_messages=error_messages)
ipv6_type = forms.ChoiceField(
label=u'',
required=False,
error_messages=error_messages,
choices=CHOICES,
widget=forms.RadioSelect())
ipv6_specific = forms.CharField(
label=u'',
required=False,
error_messages=error_messages,
widget=forms.TextInput(
attrs={'style': "width: 231px"}))
def clean(self):
cleaned_data = self.cleaned_data
ipv4_check = cleaned_data.get("ipv4_check")
ipv6_check = cleaned_data.get("ipv6_check")
if not ipv4_check and not ipv6_check:
self._errors["ipv4_check"] = self.error_class(
["Pelo menos uma opção de IP tem que ser selecionada"])
else:
if ipv4_check:
ipv4_type = cleaned_data.get("ipv4_type")
ipv4_specific = cleaned_data.get("ipv4_specific")
if ipv4_type == '1' and ipv4_specific is None:
self._errors["ipv4_specific"] = self.error_class(
["Este campo é obrigatório com a opção Especificar IP selecionada."])
elif ipv4_type == '1' and ipv4_specific is not None:
if not is_valid_ipv4(ipv4_specific):
self._errors["ipv4_specific"] = self.error_class(
["Ip não informado ou informado de forma incorreta. \
IPv4 deve ser informado no formato xxx.xxx.xxx.xxx"])
if ipv6_check:
ipv6_type = cleaned_data.get("ipv6_type")
ipv6_specific = cleaned_data.get("ipv6_specific")
if ipv6_type == '1' and ipv6_specific is None:
self._errors["ipv6_specific"] = self.error_class(
["Este campo é obrigatório com a opção Especificar IP selecionada."])
elif ipv6_type == '1' and ipv6_specific is not None:
if not is_valid_ipv6(ipv6_specific):
self._errors["ipv6_specific"] = self.error_class(
["Ip não informado ou informado de forma incorreta. \
IPv6 deve ser informado no formato xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx"])
return cleaned_data
class SearchVipRequestForm(forms.Form):
id_request = forms.IntegerField(
label="Id Requisição",
required=False,
error_messages=error_messages,
widget=forms.TextInput(
attrs={"style": "width: 40px"}))
ipv4 = forms.CharField(
label="IPv4",
required=False,
min_length=1,
max_length=15,
error_messages=error_messages,
widget=forms.HiddenInput())
ipv6 = forms.CharField(
label="IPv6",
required=False,
min_length=1,
max_length=39,
error_messages=error_messages,
widget=forms.HiddenInput())
hostname = forms.CharField(
label="Host",
required=False,
error_messages=error_messages,
widget=forms.TextInput(
attrs={"style": "width: 240px"})
)
vip_created = forms.BooleanField(
label="Buscar apenas vips criados",
required=False,
error_messages=error_messages)
vip_with_onwer = forms.BooleanField(
label="Buscar apenas \"Meus\" vips",
required=False,
error_messages=error_messages)
def clean(self):
cleaned_data = super(SearchVipRequestForm, self).clean()
ipv4 = cleaned_data.get("ipv4")
ipv6 = cleaned_data.get("ipv6")
if ipv4 is not None and ipv6 is not | |
# coding=utf-8
# @Author : zhzhx2008
# @Time : 19-7-11
import os
import warnings
import jieba
import numpy as np
from keras import Model
from keras import backend as K
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from keras.engine.topology import Layer, Input
from keras.layers import Embedding, SpatialDropout1D, GlobalAveragePooling1D, Dropout, Dense, initializers, regularizers, constraints
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
# from:https://github.com/CyberZHG/keras-pos-embd/blob/master/keras_pos_embd/pos_embd.py
class PositionEmbedding(Layer):
"""Turn integers (positions) into dense vectors of fixed size.
eg. [[-4], [10]] -> [[0.25, 0.1], [0.6, -0.2]]
Expand mode: negative integers (relative position) could be used in this mode.
# Input shape
2D tensor with shape: `(batch_size, sequence_length)`.
# Output shape
3D tensor with shape: `(batch_size, sequence_length, output_dim)`.
Add mode:
# Input shape
3D tensor with shape: `(batch_size, sequence_length, feature_dim)`.
# Output shape
3D tensor with shape: `(batch_size, sequence_length, feature_dim)`.
Concat mode:
# Input shape
3D tensor with shape: `(batch_size, sequence_length, feature_dim)`.
# Output shape
3D tensor with shape: `(batch_size, sequence_length, feature_dim + output_dim)`.
"""
MODE_EXPAND = 'expand'
MODE_ADD = 'add'
MODE_CONCAT = 'concat'
def __init__(self,
input_dim,
output_dim,
mode=MODE_EXPAND,
embeddings_initializer='uniform',
embeddings_regularizer=None,
activity_regularizer=None,
embeddings_constraint=None,
mask_zero=False,
**kwargs):
"""
:param input_dim: The maximum absolute value of positions.
:param output_dim: The embedding dimension.
:param embeddings_initializer:
:param embeddings_regularizer:
:param activity_regularizer:
:param embeddings_constraint:
:param mask_zero: The index that represents padding. Only works in `append` mode.
:param kwargs:
"""
self.input_dim = input_dim
self.output_dim = output_dim
self.mode = mode
self.embeddings_initializer = initializers.get(embeddings_initializer)
self.embeddings_regularizer = regularizers.get(embeddings_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.embeddings_constraint = constraints.get(embeddings_constraint)
self.mask_zero = mask_zero
self.supports_masking = mask_zero is not False
self.embeddings = None
super(PositionEmbedding, self).__init__(**kwargs)
def get_config(self):
config = {'input_dim': self.input_dim,
'output_dim': self.output_dim,
'mode': self.mode,
'embeddings_initializer': initializers.serialize(self.embeddings_initializer),
'embeddings_regularizer': regularizers.serialize(self.embeddings_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'embeddings_constraint': constraints.serialize(self.embeddings_constraint),
'mask_zero': self.mask_zero}
base_config = super(PositionEmbedding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
if self.mode == self.MODE_EXPAND:
self.embeddings = self.add_weight(
shape=(self.input_dim * 2 + 1, self.output_dim),
initializer=self.embeddings_initializer,
name='embeddings',
regularizer=self.embeddings_regularizer,
constraint=self.embeddings_constraint,
)
else:
self.embeddings = self.add_weight(
shape=(self.input_dim, self.output_dim),
initializer=self.embeddings_initializer,
name='embeddings',
regularizer=self.embeddings_regularizer,
constraint=self.embeddings_constraint,
)
super(PositionEmbedding, self).build(input_shape)
def compute_mask(self, inputs, mask=None):
if self.mode == self.MODE_EXPAND:
if self.mask_zero:
output_mask = K.not_equal(inputs, self.mask_zero)
else:
output_mask = None
else:
output_mask = mask
return output_mask
def compute_output_shape(self, input_shape):
if self.mode == self.MODE_EXPAND:
return input_shape + (self.output_dim,)
if self.mode == self.MODE_CONCAT:
return input_shape[:-1] + (input_shape[-1] + self.output_dim,)
return input_shape
def call(self, inputs, **kwargs):
if self.mode == self.MODE_EXPAND:
if K.dtype(inputs) != 'int32':
inputs = K.cast(inputs, 'int32')
return K.gather(
self.embeddings,
K.minimum(K.maximum(inputs, -self.input_dim), self.input_dim) + self.input_dim,
)
input_shape = K.shape(inputs)
if self.mode == self.MODE_ADD:
batch_size, seq_len, output_dim = input_shape[0], input_shape[1], input_shape[2]
else:
batch_size, seq_len, output_dim = input_shape[0], input_shape[1], self.output_dim
pos_embeddings = K.tile(
K.expand_dims(self.embeddings[:seq_len, :self.output_dim], axis=0),
[batch_size, 1, 1],
)
if self.mode == self.MODE_ADD:
return inputs + pos_embeddings
return K.concatenate([inputs, pos_embeddings], axis=-1)
# from:https://github.com/CyberZHG/keras-pos-embd/blob/master/keras_pos_embd/trig_pos_embd.py
class TrigPosEmbedding(Layer):
"""Position embedding use sine and cosine functions.
See: https://arxiv.org/pdf/1706.03762
Expand mode:
# Input shape
2D tensor with shape: `(batch_size, sequence_length)`.
# Output shape
3D tensor with shape: `(batch_size, sequence_length, output_dim)`.
Add mode:
# Input shape
3D tensor with shape: `(batch_size, sequence_length, feature_dim)`.
# Output shape
3D tensor with shape: `(batch_size, sequence_length, feature_dim)`.
Concat mode:
# Input shape
3D tensor with shape: `(batch_size, sequence_length, feature_dim)`.
# Output shape
3D tensor with shape: `(batch_size, sequence_length, feature_dim + output_dim)`.
"""
MODE_EXPAND = 'expand'
MODE_ADD = 'add'
MODE_CONCAT = 'concat'
def __init__(self,
mode=MODE_ADD,
output_dim=None,
**kwargs):
"""
:param output_dim: The embedding dimension.
:param kwargs:
"""
if mode in [self.MODE_EXPAND, self.MODE_CONCAT]:
if output_dim is None:
raise NotImplementedError('`output_dim` is required in `%s` mode' % mode)
if output_dim % 2 != 0:
raise NotImplementedError('It does not make sense to use an odd output dimension: %d' % output_dim)
self.mode = mode
self.output_dim = output_dim
self.supports_masking = True
super(TrigPosEmbedding, self).__init__(**kwargs)
def get_config(self):
config = {
'mode': self.mode,
'output_dim': self.output_dim,
}
base_config = super(TrigPosEmbedding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_mask(self, inputs, mask=None):
return mask
def compute_output_shape(self, input_shape):
if self.mode == self.MODE_EXPAND:
return input_shape + (self.output_dim,)
if self.mode == self.MODE_CONCAT:
return input_shape[:-1] + (input_shape[-1] + self.output_dim,)
return input_shape
def call(self, inputs, mask=None):
input_shape = K.shape(inputs)
if self.mode == self.MODE_ADD:
batch_size, seq_len, output_dim = input_shape[0], input_shape[1], input_shape[2]
pos_input = K.tile(K.expand_dims(K.arange(0, seq_len), axis=0), [batch_size, 1])
elif self.mode == self.MODE_CONCAT:
batch_size, seq_len, output_dim = input_shape[0], input_shape[1], self.output_dim
pos_input = K.tile(K.expand_dims(K.arange(0, seq_len), axis=0), [batch_size, 1])
else:
output_dim = self.output_dim
pos_input = inputs
if K.dtype(pos_input) != K.floatx():
pos_input = K.cast(pos_input, K.floatx())
evens = K.arange(0, output_dim // 2) * 2
odds = K.arange(0, output_dim // 2) * 2 + 1
even_embd = K.sin(
K.dot(
K.expand_dims(pos_input, -1),
K.expand_dims(1.0 / K.pow(
10000.0,
K.cast(evens, K.floatx()) / K.cast(output_dim, K.floatx())
), 0)
)
)
odd_embd = K.cos(
K.dot(
K.expand_dims(pos_input, -1),
K.expand_dims(1.0 / K.pow(
10000.0, K.cast((odds - 1), K.floatx()) / K.cast(output_dim, K.floatx())
), 0)
)
)
embd = K.stack([even_embd, odd_embd], axis=-1)
output = K.reshape(embd, [-1, K.shape(inputs)[1], output_dim])
if self.mode == self.MODE_CONCAT:
output = K.concatenate([inputs, output], axis=-1)
if self.mode == self.MODE_ADD:
output += inputs
return output
# from:https://github.com/bojone/attention/blob/master/attention_keras.py
class Attention(Layer):
def __init__(self, nb_head, size_per_head, mask_right=False, **kwargs):
self.nb_head = nb_head
self.size_per_head = size_per_head
self.output_dim = nb_head * size_per_head
self.mask_right = mask_right
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
self.WQ = self.add_weight(name='WQ',
shape=(input_shape[0][-1], self.output_dim),
initializer='glorot_uniform',
trainable=True)
self.WK = self.add_weight(name='WK',
shape=(input_shape[1][-1], self.output_dim),
initializer='glorot_uniform',
trainable=True)
self.WV = self.add_weight(name='WV',
shape=(input_shape[2][-1], self.output_dim),
initializer='glorot_uniform',
trainable=True)
super(Attention, self).build(input_shape)
def Mask(self, inputs, seq_len, mode='mul'):
if seq_len == None:
return inputs
else:
mask = K.one_hot(seq_len[:, 0], K.shape(inputs)[1])
mask = 1 - K.cumsum(mask, 1)
for _ in range(len(inputs.shape) - 2):
mask = K.expand_dims(mask, 2)
if mode == 'mul':
return inputs * mask
if mode == 'add':
return inputs - (1 - mask) * 1e12
def call(self, x, **kwargs):
# 如果只传入Q_seq,K_seq,V_seq,那么就不做Mask
# 如果同时传入Q_seq,K_seq,V_seq,Q_len,V_len,那么对多余部分做Mask
Q_seq, K_seq, V_seq, Q_len, V_len = None, None, None, None, None
if len(x) == 3:
Q_seq, K_seq, V_seq = x
Q_len, V_len = None, None
elif len(x) == 5:
Q_seq, K_seq, V_seq, Q_len, V_len = x
# 对Q、K、V做线性变换
Q_seq = K.dot(Q_seq, self.WQ)
Q_seq = K.reshape(Q_seq, (-1, K.shape(Q_seq)[1], self.nb_head, self.size_per_head))
Q_seq = K.permute_dimensions(Q_seq, (0, 2, 1, 3))
K_seq = K.dot(K_seq, self.WK)
K_seq = K.reshape(K_seq, (-1, K.shape(K_seq)[1], self.nb_head, self.size_per_head))
K_seq = K.permute_dimensions(K_seq, (0, 2, 1, 3))
V_seq = K.dot(V_seq, self.WV)
V_seq = K.reshape(V_seq, (-1, K.shape(V_seq)[1], self.nb_head, self.size_per_head))
V_seq = K.permute_dimensions(V_seq, (0, 2, 1, 3))
# 计算内积,然后mask,然后softmax
A = K.batch_dot(Q_seq, K_seq, axes=[3, 3]) / self.size_per_head ** 0.5
A = K.permute_dimensions(A, (0, 3, 2, 1))
A = self.Mask(A, V_len, 'add')
A = K.permute_dimensions(A, (0, 3, 2, 1))
if self.mask_right:
ones = K.ones_like(A[:1, :1])
mask = (ones - K.tf.matrix_band_part(ones, -1, 0)) * 1e12
A = A - mask
A = K.softmax(A)
# 输出并mask
O_seq = K.batch_dot(A, V_seq, axes=[3, 2])
O_seq = K.permute_dimensions(O_seq, (0, 2, 1, 3))
O_seq = K.reshape(O_seq, (-1, K.shape(O_seq)[1], self.output_dim))
O_seq = self.Mask(O_seq, Q_len, 'mul')
return O_seq
def compute_output_shape(self, input_shape):
return (input_shape[0][0], input_shape[0][1], self.output_dim)
warnings.filterwarnings("ignore")
seed = 2019
np.random.seed(seed)
def get_labels_datas(input_dir):
datas_word = []
datas_char = []
labels = []
label_dirs = os.listdir(input_dir)
for label_dir in label_dirs:
txt_names = os.listdir(os.path.join(input_dir, label_dir))
for txt_name in txt_names:
with open(os.path.join(input_dir, label_dir, txt_name), 'r') as fin:
content = fin.readline() # 只取第一行
content = content.strip().replace(' ', '')
datas_word.append(' '.join(jieba.cut(content)))
datas_char.append(' '.join(list(content)))
labels.append(label_dir)
return labels, datas_word, datas_char
def get_label_id_map(labels):
labels = set(labels)
id_label_map = {}
label_id_map = {}
for index, label in enumerate(labels):
id_label_map[index] = label
label_id_map[label] = index
return id_label_map, label_id_map
input_dir = './data/THUCNews'
labels, datas_word, datas_char = get_labels_datas(input_dir)
id_label_map, label_id_map = get_label_id_map(labels)
labels, labels_test, datas_word, datas_word_test, datas_char, datas_char_test = train_test_split(labels, datas_word, datas_char, test_size=0.3, shuffle=True, stratify=labels)
labels_train, labels_dev, datas_word_train, datas_word_dev, datas_char_train, datas_char_dev = train_test_split(labels, datas_word, datas_char, test_size=0.1, shuffle=True, stratify=labels)
y_train = [label_id_map.get(x) for x in labels_train]
y_dev = [label_id_map.get(x) for x in labels_dev]
y_test = [label_id_map.get(x) for x in labels_test]
num_classes = len(set(y_train))
y_train_index = to_categorical(y_train, num_classes)
y_dev_index = to_categorical(y_dev, num_classes)
y_test_index = to_categorical(y_test, num_classes)
# keras extract feature
tokenizer = Tokenizer()
tokenizer.fit_on_texts(datas_word_train)
# feature5: word index for deep learning
x_train_word_index = tokenizer.texts_to_sequences(datas_word_train)
x_dev_word_index = tokenizer.texts_to_sequences(datas_word_dev)
x_test_word_index = tokenizer.texts_to_sequences(datas_word_test)
max_word_length = max([len(x) for x in x_train_word_index])
x_train_word_index = pad_sequences(x_train_word_index, maxlen=max_word_length)
x_dev_word_index = pad_sequences(x_dev_word_index, maxlen=max_word_length)
x_test_word_index = pad_sequences(x_test_word_index, maxlen=max_word_length)
input = Input(shape=(max_word_length,))
embedding = Embedding(len(tokenizer.word_index) + 1, 128)(input)
# embedding = TrigPosEmbedding(mode='add', output_dim=128)(embedding)
embedding = PositionEmbedding(input_dim=128, output_dim=128, mode='add')(embedding)
embedding = SpatialDropout1D(0.2)(embedding)
att = Attention(8, 16)([embedding, embedding, embedding])
att = GlobalAveragePooling1D()(att)
att = Dropout(0.2)(att)
output = Dense(num_classes, activation='softmax')(att)
model = Model(inputs=input, outputs=output)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
model_weight_file = './model_multi_head_attention.h5'
model_file = './model_multi_head_attention.model'
early_stopping = EarlyStopping(monitor='val_loss', patience=5)
model_checkpoint = ModelCheckpoint(model_weight_file, save_best_only=True, save_weights_only=True)
model.fit(x_train_word_index,
y_train_index,
batch_size=8,
epochs=1000,
verbose=2,
callbacks=[early_stopping, model_checkpoint],
validation_data=(x_dev_word_index, | |
row, col, ocol)
col += gap
def moveBeyond(tok):
nonlocal orow, ocol
nrow, ncol = tok[3]
if nrow > orow or (nrow == orow and ncol > ocol):
orow = nrow
ocol = ncol
def advance(skip=True):
nextToken = next(tokens)
if skip:
moveBeyond(nextToken)
else:
injectToken(nextToken)
return nextToken
def callFunction(function, argument=None, implementation=None):
nonlocal skip, matched, functionStack
functionStack.append((function, parenLevel))
implementation = function if implementation is None else implementation
injectToken((NAME, implementation))
injectToken((LPAR, '('))
if argument is not None:
injectToken(argument)
injectToken((COMMA, ','))
skip = True
matched = True
def popFunction():
nonlocal context, startLevel
functionStack.pop()
injectToken((RPAR, ')'))
context, startLevel = (None, 0) if len(functionStack) == 0 else functionStack[-1]
def wrapStatementCall():
injectToken((NAME, 'raise'), spaceAfter=1)
injectToken((NAME, statementRaiseMarker), spaceAfter=1)
injectToken((NAME, 'from'), spaceAfter=1)
# Catch Python operators that can't be used in Scenic
if ttype in illegalTokens:
self.parseError(token, f'illegal operator "{tstring}"')
# Determine which operators are allowed in current context
allowedPrefixOps = prefixOperators
allowedInfixOps = dict()
allowedModifiers = dict()
allowedTerminators = set()
inConstructorContext = False
context, startLevel = functionStack[-1] if functionStack else (None, None)
if parenLevel == startLevel:
if context in constructors:
inConstructorContext = True
allowedPrefixOps = self.specifiersForConstructor(context)
else:
for opTokens, op in infixTokens.items():
if not op.contexts or context in op.contexts:
allowedInfixOps[opTokens] = op.token
for name, mod in modifierNames.items():
if not mod.contexts or context in mod.contexts:
allowedModifiers[name] = mod.name
if context in modifierNames:
allowedTerminators = modifierNames[context].terminators
else:
allowedInfixOps = generalInfixOps
# Parse next token
if ttype == LPAR or ttype == LSQB: # keep track of nesting level
parenLevel += 1
elif ttype == RPAR or ttype == RSQB: # ditto
parenLevel -= 1
elif ttype == STRING:
# special case for global parameters with quoted names:
# transform "name"=value into "name", value
if (len(functionStack) > 0 and functionStack[-1][0] == paramStatement
and peek(tokens).string == '='):
next(tokens) # consume '='
injectToken(token)
injectToken((COMMA, ','))
skip = True
elif ttype == NAME: # the interesting case: almost all new syntax falls in here
# try to match 2-word language constructs
matched = False
nextToken = peek(tokens) # lookahead so we can give 2-word ops precedence
if nextToken is not None:
endToken = nextToken # tentatively; will be overridden if no match
nextString = nextToken.string
twoWords = (tstring, nextString)
if startOfLine and tstring == 'for': # TODO improve hack?
matched = True
endToken = token
elif startOfLine and tstring in constructorStatements: # class definition
if nextToken.type != NAME or nextString in keywords:
self.parseError(nextToken, f'invalid class name "{nextString}"')
nextToken = next(tokens) # consume name
bases, scenicParents = [], []
if peek(tokens).exact_type == LPAR: # superclass specification
next(tokens)
nextToken = next(tokens)
while nextToken.exact_type != RPAR:
base = nextToken.string
if nextToken.exact_type != NAME:
self.parseError(nextToken, f'invalid superclass "{base}"')
bases.append(base)
if base in self.constructors:
scenicParents.append(base)
if peek(tokens).exact_type == COMMA:
next(tokens)
nextToken = next(tokens)
if not scenicParents and tstring != 'class':
self.parseError(nextToken,
f'Scenic class definition with no Scenic superclasses')
if peek(tokens).exact_type != COLON:
self.parseError(peek(tokens), 'malformed class definition')
if not bases:
bases = scenicParents = ('Object',) # default superclass
if scenicParents:
self.createConstructor(nextString, scenicParents)
injectToken((NAME, 'class'), spaceAfter=1)
injectToken((NAME, nextString))
injectToken((LPAR, '('))
injectToken((NAME, bases[0]))
for base in bases[1:]:
injectToken((COMMA, ','), spaceAfter=1)
injectToken((NAME, base))
injectToken((RPAR, ')'))
skip = True
matched = True
endToken = nextToken
elif startOfLine and tstring == 'scenario': # scenario definition
if nextToken.type != NAME:
self.parseError(nextToken, f'invalid scenario name "{nextString}"')
className = scenarioMarker + nextString
injectToken((NAME, 'async'), spaceAfter=1)
injectToken((NAME, 'def'), spaceAfter=1)
injectToken((NAME, className))
advance() # consume name
skip = True
matched = True
elif startOfLine and tstring == behaviorStatement: # behavior definition
if nextToken.type != NAME:
self.parseError(nextToken, f'invalid behavior name "{nextString}"')
injectToken((NAME, 'async'), spaceAfter=1)
injectToken((NAME, 'def'), spaceAfter=1)
skip = True
matched = True
endToken = token
elif startOfLine and tstring == monitorStatement: # monitor definition
if nextToken.type != NAME:
self.parseError(nextToken, f'invalid monitor name "{nextString}"')
injectToken((NAME, 'async'), spaceAfter=1)
injectToken((NAME, 'def'), spaceAfter=1)
injectToken((NAME, veneer.functionForMonitor(nextString)))
injectToken((LPAR, '('))
injectToken((RPAR, ')'))
advance() # consume name
if peek(tokens).exact_type != COLON:
self.parseError(nextToken, 'malformed monitor definition')
skip = True
matched = True
elif twoWords in allowedPrefixOps: # 2-word prefix operator
callFunction(allowedPrefixOps[twoWords])
advance() # consume second word
elif not startOfStatement and twoWords in allowedInfixOps: # 2-word infix operator
injectToken(allowedInfixOps[twoWords])
advance() # consume second word
skip = True
matched = True
elif startOfLine and twoWords == interruptWhenStatement:
# special case for interrupt when
injectToken((NAME, 'except'), spaceAfter=1)
callFunction(interruptExceptMarker)
advance() # consume second word
matched = True
elif startOfStatement and twoWords in threeWordIncipits: # 3-word statement
advance() # consume second word
endToken = advance() # consume third word
thirdWord = endToken.tstring
expected = threeWordIncipits[twoWords]
if thirdWord != expected:
self.parseError(endToken,
f'expected "{expected}", got "{thirdWord}"')
wrapStatementCall()
function = functionForStatement(twoWords + (thirdWord,))
callFunction(function)
matched = True
elif startOfStatement and twoWords in twoWordStatements: # 2-word statement
wrapStatementCall()
function = functionForStatement(twoWords)
callFunction(function)
advance() # consume second word
matched = True
elif inConstructorContext and tstring == 'with': # special case for 'with' specifier
callFunction('With', argument=(STRING, f'"{nextString}"'))
advance() # consume property name
elif startOfStatement and tstring == requireStatement and nextString == '[':
# special case for require[p]
next(tokens) # consume '['
nextToken = next(tokens)
if nextToken.exact_type != NUMBER:
self.parseError(nextToken,
'soft requirement must have constant probability')
prob = nextToken.string
if not 0 <= float(prob) <= 1:
self.parseError(nextToken, 'probability must be between 0 and 1')
nextToken = next(tokens)
if nextToken.exact_type != RSQB:
self.parseError(nextToken, 'malformed soft requirement')
wrapStatementCall()
callFunction(softRequirement, argument=(NUMBER, prob))
endToken = nextToken
elif twoWords in twoWordReplacements: # 2-word direct replacement
for tok in twoWordReplacements[twoWords]:
injectToken(tok, spaceAfter=1)
advance() # consume second word
skip = True
elif twoWords in illegalConstructs:
construct = ' '.join(twoWords)
self.parseError(token,
f'Python construct "{construct}" not allowed in Scenic')
if not matched:
# 2-word constructs don't match; try 1-word
endToken = token
oneWord = (tstring,)
if oneWord in allowedPrefixOps: # 1-word prefix operator
callFunction(allowedPrefixOps[oneWord])
elif not startOfStatement and oneWord in allowedInfixOps: # 1-word infix operator
injectToken(allowedInfixOps[oneWord])
skip = True
elif inConstructorContext: # couldn't match any 1- or 2-word specifier
self.parseError(token, f'unknown specifier "{tstring}"')
elif not startOfStatement and tstring in allowedModifiers:
injectToken((COMMA, ','))
callFunction(tstring, argument=(STRING, f'"{tstring}"'),
implementation='Modifier')
skip = True
elif not startOfStatement and tstring in allowedTerminators:
injectToken((COMMA, ','))
injectToken((STRING, f'"{tstring}"'))
popFunction()
skip = True
elif startOfStatement and tstring in oneWordStatements: # 1-word statement
wrapStatementCall()
callFunction(tstring)
elif token.start[1] == 0 and tstring == modelStatement: # model statement
components = []
while peek(tokens).exact_type not in (COMMENT, NEWLINE):
nextToken = next(tokens)
if nextToken.exact_type != NAME and nextToken.string != '.':
self.parseError(nextToken, 'invalid module name')
components.append(nextToken.string)
if not components:
self.parseError(token, 'model statement is missing module name')
components.append("'")
literal = "'" + ''.join(components)
callFunction(modelStatement, argument=(NAME, namespaceReference))
injectToken((STRING, literal))
skip = True
elif startOfLine and tstring in scenarioBlocks: # named block of scenario
if peek(tokens).exact_type != COLON:
self.parseError(peek(tokens), f'malformed "{tstring}" block')
injectToken((NAME, 'async'), spaceAfter=1)
injectToken((NAME, 'def'), spaceAfter=1)
injectToken((NAME, tstring))
injectToken((LPAR, '('))
injectToken((RPAR, ')'))
skip = True
elif (tstring in self.constructors
and peek(tokens).exact_type not in (RPAR, RSQB, RBRACE, COMMA)):
# instance definition
callFunction(tstring)
elif tstring in replacements: # direct replacement
for tok in replacements[tstring]:
injectToken(tok, spaceAfter=1)
skip = True
elif startOfLine and tstring == 'from': # special case to allow 'from X import Y'
pass
elif tstring in keywords: # some malformed usage
self.parseError(token, f'unexpected keyword "{tstring}"')
elif tstring in illegalConstructs:
self.parseError(token,
f'Python construct "{tstring}" not allowed in Scenic')
else:
pass # nothing matched; pass through unchanged to Python
# Detect the end of function argument lists
if len(functionStack) > 0:
context, startLevel = functionStack[-1]
while parenLevel < startLevel: # we've closed all parens for the current function
popFunction()
inConstructor = any(context in constructors for context, sl in functionStack)
if inConstructor and parenLevel == startLevel and ttype == COMMA: # starting a new specifier
while functionStack and context not in constructors:
popFunction()
# allow the next specifier to be on the next line, if indented
injectToken(token) # emit comma immediately
skip = True
nextToken = peek(tokens)
specOnNewLine = False
while nextToken.exact_type in (NEWLINE, NL, COMMENT, ENDMARKER):
specOnNewLine = True
if nextToken.exact_type == COMMENT:
advance(skip=False) # preserve comment
nextToken = peek(tokens)
if nextToken.exact_type not in (NEWLINE, NL):
self.parseError(nextToken, 'comma with no specifier following')
advance(skip=False) # preserve newline
nextToken = peek(tokens)
if specOnNewLine and not specifiersIndented:
nextToken = next(tokens) # consume indent
if nextToken.exact_type != INDENT:
self.parseError(nextToken,
'expected indented specifier (extra comma on previous line?)')
injectToken(nextToken)
specifiersIndented = True
elif ttype in (NEWLINE, ENDMARKER, COMMENT, SEMI): # end of line or statement
if parenLevel != 0:
self.parseError(token, 'unmatched parens/brackets')
interrupt = False
if functionStack and functionStack[0][0] == interruptExceptMarker:
lastToken = newTokens[-1]
if lastToken[1] != ':':
self.parseError(nextToken, 'expected colon for interrupt')
newTokens.pop() # remove colon for now
interrupt = True
while len(functionStack) > 0:
functionStack.pop()
injectToken((RPAR, ')'))
if interrupt:
injectToken((COLON, ':'))
# Output token unchanged, unless handled above
if not skip:
injectToken(token)
else:
moveBeyond(endToken)
startOfLine = (ttype in (ENCODING, NEWLINE, NL, INDENT, DEDENT))
startOfStatement = startOfLine or (ttype == SEMI)
rewrittenSource = tokenize.untokenize(newTokens)
if not isinstance(rewrittenSource, str): # TODO improve?
rewrittenSource = str(rewrittenSource, encoding='utf-8')
return rewrittenSource, self.constructors
### TRANSLATION PHASE THREE: parsing of Python resulting from token translation
def parseTranslatedSource(source, filename):
try:
tree = parse(source, filename=filename)
return tree
except SyntaxError as e:
raise PythonParseError(e) from None
### TRANSLATION PHASE FOUR: modifying the parse tree
temporaryName = '_Scenic_temporary_name'
behaviorArgName = '_Scenic_current_behavior'
checkInvariantsName = '_Scenic_check_invariants'
interruptPrefix = '_Scenic_interrupt'
abortFlag = Attribute(Name('BlockConclusion', Load()), 'ABORT', Load())
breakFlag = Attribute(Name('BlockConclusion', Load()), 'BREAK', Load())
continueFlag = Attribute(Name('BlockConclusion', Load()), 'CONTINUE', Load())
returnFlag = Attribute(Name('BlockConclusion', Load()), 'RETURN', Load())
finishedFlag = Attribute(Name('BlockConclusion', Load()), 'FINISHED', Load())
noArgs = ast.arguments(
posonlyargs=[],
args=[], vararg=None,
kwonlyargs=[], kw_defaults=[],
kwarg=None, defaults=[])
selfArg = ast.arguments(
posonlyargs=[],
args=[ast.arg(arg='self', annotation=None)], vararg=None,
kwonlyargs=[], kw_defaults=[],
kwarg=None, defaults=[])
tempArg = ast.arguments(
posonlyargs=[],
args=[ast.arg(arg=temporaryName, annotation=None)], vararg=None,
kwonlyargs=[], kw_defaults=[],
kwarg=None, defaults=[Constant(None)])
initialBehaviorArgs = [
ast.arg(arg=behaviorArgName, annotation=None),
ast.arg(arg='self', annotation=None)
]
onlyBehaviorArgs = ast.arguments(
posonlyargs=[],
args=initialBehaviorArgs, vararg=None,
kwonlyargs=[], kw_defaults=[],
kwarg=None, defaults=[])
class AttributeFinder(NodeVisitor):
"""Utility class for finding all referenced attributes of a given name."""
@staticmethod
def find(target, node):
af = AttributeFinder(target)
af.visit(node)
return af.attributes
def __init__(self, target):
super().__init__()
self.target = target
self.attributes = set()
def visit_Attribute(self, node):
val = node.value
if isinstance(val, Name) and val.id == self.target:
self.attributes.add(node.attr)
self.visit(val)
class LocalFinder(NodeVisitor):
"""Utility class for finding all local variables of a code block."""
@staticmethod
def findIn(block, ignoreTemporaries=True):
lf = LocalFinder()
for statement in block:
lf.visit(statement)
if ignoreTemporaries:
names = set(name for name in lf.names if not name.startswith(temporaryName))
else:
names = lf.names
return names - lf.globals - lf.nonlocals
def __init__(self):
self.names = set()
self.globals = set()
self.nonlocals = set()
def visit_Global(self, node):
self.globals |= node.names
def visit_Nonlocal(self, node):
self.nonlocals |= node.names
def visit_FunctionDef(self, node):
self.names.add(node.name)
self.visit(node.args)
for decorator in node.decorator_list:
self.visit(decorator)
if node.returns is not None:
self.visit(node.returns)
# do not visit body; it's another block
def visit_Lambda(self, node):
self.visit(node.args)
# do not visit body; it's another block
def visit_ClassDef(self, node):
self.names.add(node.name)
for child in itertools.chain(node.bases, node.keywords, node.decorator_list):
self.visit(child)
# do not visit body; it's another block
def visit_Import(self, node):
for alias in node.names:
bound = alias.asname
if bound is None:
bound = alias.name
self.names.add(bound)
def visit_ImportFrom(self, node):
self.visit_Import(node)
def visit_Name(self, node):
if isinstance(node.ctx, Store):
self.names.add(node.id)
def visit_ExceptHandler(self, node):
if node.name is not None:
self.names.add(node.name)
self.generic_visit(node)
class ASTSurgeon(NodeTransformer):
def | |
from __future__ import absolute_import, print_function, with_statement
import logging
import os
import shutil
import sys
import torch
from .deployer_utils import save_python_function, serialize_object
from ..clipper_admin import ClipperException
from ..version import __registry__, __version__
logger = logging.getLogger(__name__)
PYTORCH_WEIGHTS_RELATIVE_PATH = "pytorch_weights.pkl"
PYTORCH_MODEL_RELATIVE_PATH = "pytorch_model.pkl"
def create_endpoint(clipper_conn,
name,
input_type,
func,
pytorch_model,
default_output="None",
version=1,
slo_micros=3000000,
labels=None,
registry=None,
base_image="default",
num_replicas=1,
batch_size=-1,
pkgs_to_install=None):
"""Registers an app and deploys the provided predict function with PyTorch model as
a Clipper model.
Parameters
----------
clipper_conn : :py:meth:`clipper_admin.ClipperConnection`
A ``ClipperConnection`` object connected to a running Clipper cluster.
name : str
The name to be assigned to both the registered application and deployed model.
input_type : str
The input_type to be associated with the registered app and deployed model.
One of "integers", "floats", "doubles", "bytes", or "strings".
func : function
The prediction function. Any state associated with the function will be
captured via closure capture and pickled with Cloudpickle.
pytorch_model : pytorch model object
The PyTorch model to save.
default_output : str, optional
The default output for the application. The default output will be returned whenever
an application is unable to receive a response from a model within the specified
query latency SLO (service level objective). The reason the default output was returned
is always provided as part of the prediction response object. Defaults to "None".
version : str, optional
The version to assign this model. Versions must be unique on a per-model
basis, but may be re-used across different models.
slo_micros : int, optional
The query latency objective for the application in microseconds.
This is the processing latency between Clipper receiving a request
and sending a response. It does not account for network latencies
before a request is received or after a response is sent.
If Clipper cannot process a query within the latency objective,
the default output is returned. Therefore, it is recommended that
the SLO not be set aggressively low unless absolutely necessary.
100000 (100ms) is a good starting value, but the optimal latency objective
will vary depending on the application.
labels : list(str), optional
A list of strings annotating the model. These are ignored by Clipper
and used purely for user annotations.
registry : str, optional
The Docker container registry to push the freshly built model to. Note
that if you are running Clipper on Kubernetes, this registry must be accessible
to the Kubernetes cluster in order to fetch the container from the registry.
base_image : str, optional
The base Docker image to build the new model image from. This
image should contain all code necessary to run a Clipper model
container RPC client.
num_replicas : int, optional
The number of replicas of the model to create. The number of replicas
for a model can be changed at any time with
:py:meth:`clipper.ClipperConnection.set_num_replicas`.
batch_size : int, optional
The user-defined query batch size for the model. Replicas of the model will attempt
to process at most `batch_size` queries simultaneously. They may process smaller
batches if `batch_size` queries are not immediately available.
If the default value of -1 is used, Clipper will adaptively calculate the batch size for individual
replicas of this model.
pkgs_to_install : list (of strings), optional
A list of the names of packages to install, using pip, in the container.
The names must be strings.
"""
clipper_conn.register_application(name, input_type, default_output,
slo_micros)
deploy_pytorch_model(clipper_conn, name, version, input_type, func,
pytorch_model, base_image, labels, registry,
num_replicas, batch_size, pkgs_to_install)
clipper_conn.link_model_to_app(name, name)
def build_pytorch_model(clipper_conn,
name,
version,
input_type,
func,
pytorch_model,
base_image="default",
registry=None,
pkgs_to_install=None,
gpu=False):
"""Deploy a Python function with a PyTorch model.
Parameters
----------
clipper_conn : :py:meth:`clipper_admin.ClipperConnection`
A ``ClipperConnection`` object connected to a running Clipper cluster.
name : str
The name to be assigned to both the registered application and deployed model.
version : str
The version to assign this model. Versions must be unique on a per-model
basis, but may be re-used across different models.
input_type : str
The input_type to be associated with the registered app and deployed model.
One of "integers", "floats", "doubles", "bytes", or "strings".
func : function
The prediction function. Any state associated with the function will be
captured via closure capture and pickled with Cloudpickle.
pytorch_model : pytorch model object
The Pytorch model to save.
base_image : str, optional
The base Docker image to build the new model image from. This
image should contain all code necessary to run a Clipper model
container RPC client.
registry : str, optional
The Docker container registry to push the freshly built model to. Note
that if you are running Clipper on Kubernetes, this registry must be accessible
to the Kubernetes cluster in order to fetch the container from the registry.
pkgs_to_install : list (of strings), optional
A list of the names of packages to install, using pip, in the container.
The names must be strings.
gpu : bool, optional
A boolean flag that indicates if the model will be run on a CUDA enabled GPU.
Example
-------
Define a pytorch nn module and save the model::
from clipper_admin import ClipperConnection, DockerContainerManager
from clipper_admin.deployers.pytorch import deploy_pytorch_model
from torch import nn
clipper_conn = ClipperConnection(DockerContainerManager())
# Connect to an already-running Clipper cluster
clipper_conn.connect()
model = nn.Linear(1, 1)
# Define a shift function to normalize prediction inputs
def predict(model, inputs):
pred = model(shift(inputs))
pred = pred.data.numpy()
return [str(x) for x in pred]
deploy_pytorch_model(
clipper_conn,
name="example",
version=1,
input_type="doubles",
func=predict,
pytorch_model=model)
"""
serialization_dir = save_python_function(name, func)
# save Torch model
torch_weights_save_loc = os.path.join(serialization_dir,
PYTORCH_WEIGHTS_RELATIVE_PATH)
torch_model_save_loc = os.path.join(serialization_dir,
PYTORCH_MODEL_RELATIVE_PATH)
try:
torch.save(pytorch_model.state_dict(), torch_weights_save_loc)
serialized_model = serialize_object(pytorch_model)
with open(torch_model_save_loc, "wb") as serialized_model_file:
serialized_model_file.write(serialized_model)
logger.info("Torch model saved")
py_minor_version = (sys.version_info.major, sys.version_info.minor)
# Check if Python 2 or Python 3 image
if base_image == "default":
if gpu:
if py_minor_version < (3, 0):
logger.info("Using Python 2 CUDA 10 base image")
base_image = "{}/cuda10-pytorch27-container:{}".format(
__registry__, __version__)
elif py_minor_version == (3, 6):
logger.info("Using Python 3.6 CUDA 10 base image")
base_image = "{}/cuda10-pytorch36-container:{}".format(
__registry__, __version__)
else:
msg = (
"CUDA 10 PyTorch deployer only supports Python 2.7 and 3.6. "
"Detected {major}.{minor}").format(
major=sys.version_info.major,
minor=sys.version_info.minor)
logger.error(msg)
# Remove temp files
shutil.rmtree(serialization_dir)
raise ClipperException(msg)
else:
if py_minor_version < (3, 0):
logger.info("Using Python 2 base image")
base_image = "{}/pytorch-container:{}".format(
__registry__, __version__)
elif py_minor_version == (3, 5):
logger.info("Using Python 3.5 base image")
base_image = "{}/pytorch35-container:{}".format(
__registry__, __version__)
elif py_minor_version == (3, 6):
logger.info("Using Python 3.6 base image")
base_image = "{}/pytorch36-container:{}".format(
__registry__, __version__)
else:
msg = (
"PyTorch deployer only supports Python 2.7, 3.5, and 3.6. "
"Detected {major}.{minor}").format(
major=sys.version_info.major,
minor=sys.version_info.minor)
logger.error(msg)
# Remove temp files
shutil.rmtree(serialization_dir)
raise ClipperException(msg)
# Deploy model
return clipper_conn.build_model(
name=name,
version=version,
model_data_path=serialization_dir,
base_image=base_image,
container_registry=registry,
pkgs_to_install=pkgs_to_install)
except Exception as e:
raise ClipperException("Error saving torch model: %s" % e)
finally:
# Remove temp files
shutil.rmtree(serialization_dir)
def deploy_pytorch_model(clipper_conn,
name,
version,
input_type,
func,
pytorch_model,
base_image="default",
labels=None,
registry=None,
num_replicas=1,
batch_size=-1,
pkgs_to_install=None,
gpu=False):
"""Deploy a Python function with a PyTorch model.
Parameters
----------
clipper_conn : :py:meth:`clipper_admin.ClipperConnection`
A ``ClipperConnection`` object connected to a running Clipper cluster.
name : str
The name to be assigned to both the registered application and deployed model.
version : str
The version to assign this model. Versions must be unique on a per-model
basis, but may be re-used across different models.
input_type : str
The input_type to be associated with the registered app and deployed model.
One of "integers", "floats", "doubles", "bytes", or "strings".
func : function
The prediction function. Any state associated with the function will be
captured via closure capture and pickled with Cloudpickle.
pytorch_model : pytorch model object
The Pytorch model to save.
base_image : str, optional
The base Docker image to build the new model image from. This
image should contain all code necessary to run a Clipper model
container RPC client.
labels : list(str), optional
A list of strings annotating the model. These are ignored by Clipper
and used purely for user annotations.
registry : str, optional
The Docker container registry to push the freshly built model to. Note
that if you are running Clipper on Kubernetes, this registry must be accesible
| |
"""
defines:
bdf merge (IN_BDF_FILENAMES)... [-o OUT_BDF_FILENAME]\n'
bdf equivalence IN_BDF_FILENAME EQ_TOL\n'
bdf renumber IN_BDF_FILENAME [-o OUT_BDF_FILENAME]\n'
bdf mirror IN_BDF_FILENAME [-o OUT_BDF_FILENAME] [--plane PLANE] [--tol TOL]\n'
bdf export_mcids IN_BDF_FILENAME [-o OUT_GEOM_FILENAME]\n'
bdf split_cbars_by_pin_flags IN_BDF_FILENAME [-o OUT_BDF_FILENAME]\n'
"""
import os
import sys
from io import StringIO
from typing import List
from cpylog import SimpleLogger
import pyNastran
from pyNastran.bdf.mesh_utils.bdf_renumber import bdf_renumber, superelement_renumber
from pyNastran.bdf.mesh_utils.bdf_merge import bdf_merge
from pyNastran.bdf.mesh_utils.export_mcids import export_mcids
from pyNastran.bdf.mesh_utils.pierce_shells import pierce_shell_model
# testing these imports are up to date
# if something is imported and tested, it should be removed from here
from pyNastran.bdf.mesh_utils.shift import update_nodes
from pyNastran.bdf.mesh_utils.mirror_mesh import write_bdf_symmetric
from pyNastran.bdf.mesh_utils.collapse_bad_quads import convert_bad_quads_to_tris
from pyNastran.bdf.mesh_utils.delete_bad_elements import delete_bad_shells, get_bad_shells
from pyNastran.bdf.mesh_utils.split_cbars_by_pin_flag import split_cbars_by_pin_flag
from pyNastran.bdf.mesh_utils.dev.create_vectorized_numbered import create_vectorized_numbered
from pyNastran.bdf.mesh_utils.remove_unused import remove_unused
from pyNastran.bdf.mesh_utils.free_faces import write_skin_solid_faces
from pyNastran.bdf.mesh_utils.get_oml import get_oml_eids
def cmd_line_create_vectorized_numbered(argv=None, quiet=False): # pragma: no cover
if argv is None:
argv = sys.argv
msg = (
'Usage:\n'
' bdf create_vectorized_numbered IN_BDF_FILENAME [OUT_BDF_FILENAME]\n'
' bdf create_vectorized_numbered -h | --help\n'
' bdf create_vectorized_numbered -v | --version\n'
'\n'
'Positional Arguments:\n'
' IN_BDF_FILENAME the model to convert\n'
" OUT_BDF_FILENAME the converted model name (default=IN_BDF_FILENAME + '_convert.bdf')"
'\n'
'Info:\n'
' -h, --help show this help message and exit\n'
" -v, --version show program's version number and exit\n"
)
if len(argv) == 1:
sys.exit(msg)
from docopt import docopt
ver = str(pyNastran.__version__)
data = docopt(msg, version=ver, argv=argv[1:])
if not quiet: # pragma: no cover
print(data)
bdf_filename_in = data['IN_BDF_FILENAME']
if data['OUT_BDF_FILENAME']:
bdf_filename_out = data['OUT_BDF_FILENAME']
else:
base, ext = os.path.splitext(bdf_filename_in)
bdf_filename_out = base + '_convert' + ext
create_vectorized_numbered(bdf_filename_in, bdf_filename_out)
def cmd_line_equivalence(argv=None, quiet=False):
"""command line interface to bdf_equivalence_nodes"""
if argv is None:
argv = sys.argv
from docopt import docopt
msg = (
'Usage:\n'
' bdf equivalence IN_BDF_FILENAME EQ_TOL [-o OUT_BDF_FILENAME]\n'
' bdf equivalence -h | --help\n'
' bdf equivalence -v | --version\n'
'\n'
"Positional Arguments:\n"
" IN_BDF_FILENAME path to input BDF/DAT/NAS file\n"
" EQ_TOL the spherical equivalence tolerance\n"
#" OUT_BDF_FILENAME path to output BDF/DAT/NAS file\n"
'\n'
'Options:\n'
" -o OUT, --output OUT_BDF_FILENAME path to output BDF/DAT/NAS file\n\n"
'Info:\n'
' -h, --help show this help message and exit\n'
" -v, --version show program's version number and exit\n"
)
if len(argv) == 1:
sys.exit(msg)
ver = str(pyNastran.__version__)
#type_defaults = {
# '--nerrors' : [int, 100],
#}
data = docopt(msg, version=ver, argv=argv[1:])
if not quiet: # pragma: no cover
print(data)
bdf_filename = data['IN_BDF_FILENAME']
bdf_filename_out = data['--output']
if bdf_filename_out is None:
bdf_filename_out = 'merged.bdf'
tol = float(data['EQ_TOL'])
size = 16
from pyNastran.bdf.mesh_utils.bdf_equivalence import bdf_equivalence_nodes
level = 'debug' if not quiet else 'warning'
log = SimpleLogger(level=level, encoding='utf-8', log_func=None)
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False,
neq_max=10, xref=True,
node_set=None, size=size,
is_double=False,
remove_collapsed_elements=False,
avoid_collapsed_elements=False,
crash_on_collapse=False,
log=log, debug=True)
def cmd_line_bin(argv=None, quiet=False): # pragma: no cover
"""bins the model into nbins"""
if argv is None:
argv = sys.argv
from docopt import docopt
msg = (
"Usage:\n"
#" bdf bin IN_BDF_FILENAME AXIS1 AXIS2 [--cid CID] [--step SIZE]\n"
" bdf bin IN_BDF_FILENAME AXIS1 AXIS2 [--cid CID] [--nbins NBINS]\n"
' bdf bin -h | --help\n'
' bdf bin -v | --version\n'
'\n'
"Positional Arguments:\n"
" IN_BDF_FILENAME path to input BDF/DAT/NAS file\n"
" AXIS1 axis to loop over\n"
" AXIS2 axis to bin\n"
'\n'
'Options:\n'
" --cid CID the coordinate system to bin (default:0)\n"
" --step SIZE the step size for binning\n\n"
" --nbins NBINS the number of bins\n\n"
'Info:\n'
' -h, --help show this help message and exit\n'
" -v, --version show program's version number and exit\n\n"
'Plot z (2) as a function of y (1) in y-stepsizes of 0.1:\n'
' bdf bin fem.bdf 1 2 --cid 0 --step 0.1\n\n'
'Plot z (2) as a function of y (1) with 50 bins:\n'
' bdf bin fem.bdf 1 2 --cid 0 --nbins 50'
)
if len(argv) == 1:
sys.exit(msg)
ver = str(pyNastran.__version__)
#type_defaults = {
# '--nerrors' : [int, 100],
#}
data = docopt(msg, version=ver, argv=argv[1:])
bdf_filename = data['IN_BDF_FILENAME']
axis1 = int(data['AXIS1'])
axis2 = int(data['AXIS2'])
cid = 0
if data['--cid']:
cid = int(data['--cid'])
#stepsize = 0.1
#if data['--step']:
#stepsize = float(data['--step'])
nbins = 10
if data['--nbins']:
nbins = int(data['--nbins'])
assert nbins >= 2, nbins
if not quiet: # pragma: no cover
print(data)
import numpy as np
import matplotlib.pyplot as plt
from pyNastran.bdf.bdf import read_bdf
level = 'debug' if not quiet else 'warning'
log = SimpleLogger(level=level, encoding='utf-8', log_func=None)
model = read_bdf(bdf_filename, log=log)
xyz_cid = model.get_xyz_in_coord(cid=cid, fdtype='float64')
y = xyz_cid[:, axis1]
z = xyz_cid[:, axis2]
plt.figure(1)
#n, bins, patches = plt.hist( [x0,x1,x2], 10, weights=[w0, w1, w2], histtype='bar')
ys = []
#zs = []
zs_min = []
zs_max = []
y0 = y.min()
y1 = y.max()
dy = (y1 - y0) / nbins
y0i = y0
y1i = y0 + dy
for unused_i in range(nbins):
j = np.where((y0i <= y) & (y <= y1i))[0]
if not len(j):
continue
ys.append(y[j].mean())
zs_min.append(z[j].min())
zs_max.append(z[j].max())
y0i += dy
y1i += dy
zs_max = np.array(zs_max)
zs_min = np.array(zs_min)
if not quiet: # pragma: no cover
print('ys = %s' % ys)
print('zs_max = %s' % zs_max)
print('zs_min = %s' % zs_min)
plt.plot(ys, zs_max, 'r-o', label='max')
plt.plot(ys, zs_min, 'b-o', label='min')
plt.plot(ys, zs_max - zs_min, 'g-o', label='delta')
#plt.xlim([y0, y1])
plt.xlabel('Axis %s' % axis1)
plt.ylabel('Axis %s' % axis2)
plt.grid(True)
plt.legend()
plt.show()
def cmd_line_renumber(argv=None, quiet=False):
"""command line interface to bdf_renumber"""
if argv is None:
argv = sys.argv
from docopt import docopt
msg = (
"Usage:\n"
' bdf renumber IN_BDF_FILENAME OUT_BDF_FILENAME [--superelement] [--size SIZE]\n'
' bdf renumber IN_BDF_FILENAME [--superelement] [--size SIZE]\n'
' bdf renumber -h | --help\n'
' bdf renumber -v | --version\n'
'\n'
'Positional Arguments:\n'
' IN_BDF_FILENAME path to input BDF/DAT/NAS file\n'
' OUT_BDF_FILENAME path to output BDF/DAT/NAS file\n'
'\n'
'Options:\n'
'--superelement calls superelement_renumber\n'
'--size SIZE set the field size (default=16)\n\n'
'Info:\n'
' -h, --help show this help message and exit\n'
" -v, --version show program's version number and exit\n"
)
if len(argv) == 1:
sys.exit(msg)
ver = str(pyNastran.__version__)
#type_defaults = {
# '--nerrors' : [int, 100],
#}
data = docopt(msg, version=ver, argv=argv[1:])
if not quiet: # pragma: no cover
print(data)
bdf_filename = data['IN_BDF_FILENAME']
bdf_filename_out = data['OUT_BDF_FILENAME']
if bdf_filename_out is None:
bdf_filename_out = 'renumber.bdf'
size = 16
if data['--size']:
size = int(data['SIZE'])
assert size in [8, 16], f'size={size} args={argv}'
#cards_to_skip = [
#'AEFACT', 'CAERO1', 'CAERO2', 'SPLINE1', 'SPLINE2',
#'AERO', 'AEROS', 'PAERO1', 'PAERO2', 'MKAERO1']
cards_to_skip = []
level = 'debug' if not quiet else 'warning'
log = SimpleLogger(level=level, encoding='utf-8', log_func=None)
if data['--superelement']:
superelement_renumber(bdf_filename, bdf_filename_out, size=size, is_double=False,
starting_id_dict=None, #round_ids=False,
cards_to_skip=cards_to_skip, log=log)
else:
bdf_renumber(bdf_filename, bdf_filename_out, size=size, is_double=False,
starting_id_dict=None, round_ids=False,
cards_to_skip=cards_to_skip, log=log)
def cmd_line_mirror(argv=None, quiet=False):
"""command line interface to write_bdf_symmetric"""
if argv is None:
argv = sys.argv
from docopt import docopt
import pyNastran
msg = (
"Usage:\n"
" bdf mirror IN_BDF_FILENAME [-o OUT_BDF_FILENAME] [--plane PLANE] [--tol TOL]\n"
" bdf mirror IN_BDF_FILENAME [-o OUT_BDF_FILENAME] [--plane PLANE] [--noeq]\n"
' bdf mirror -h | --help\n'
' bdf mirror -v | --version\n'
'\n'
"Positional Arguments:\n"
" IN_BDF_FILENAME path to input BDF/DAT/NAS file\n"
#" OUT_BDF_FILENAME path to output BDF/DAT/NAS file\n"
'\n'
'Options:\n'
" -o OUT, --output OUT_BDF_FILENAME path to output BDF/DAT/NAS file\n"
" --plane PLANE the symmetry plane (xz, yz, xy); default=xz\n"
' --tol TOL the spherical equivalence tolerance; default=1e-6\n'
' --noeq disable equivalencing\n'
"\n" # (default=0.000001)
'Info:\n'
' -h, --help show this help message and exit\n'
" -v, --version show program's version number and exit\n"
)
if len(argv) == 1:
sys.exit(msg)
ver = str(pyNastran.__version__)
#type_defaults = {
# '--nerrors' : [int, 100],
#}
data = docopt(msg, version=ver, argv=argv[1:])
if data['--tol'] is None:
data['TOL'] = 0.000001
if isinstance(data['TOL'], str):
data['TOL'] = float(data['TOL'])
tol = data['TOL']
assert data['--noeq'] in [True, False]
if data['--noeq']:
tol = -1.
plane = 'xz'
if data['--plane'] is not None: # None or str
plane = data['--plane']
if not quiet: # pragma: no cover
print(data)
size = 16
bdf_filename = data['IN_BDF_FILENAME']
bdf_filename_out = data['--output']
if bdf_filename_out is None:
bdf_filename_out = 'mirrored.bdf'
#from io import StringIO
from pyNastran.bdf.bdf import read_bdf
from pyNastran.bdf.mesh_utils.bdf_equivalence import bdf_equivalence_nodes
level = 'debug' if not quiet else 'warning'
log = SimpleLogger(level=level, encoding='utf-8', log_func=None)
model = read_bdf(bdf_filename, log=log)
bdf_filename_stringio = StringIO()
write_bdf_symmetric(model, bdf_filename_stringio, encoding=None, size=size,
is_double=False,
enddata=None, close=False,
plane=plane, log=log)
bdf_filename_stringio.seek(0)
if tol >= 0.0:
bdf_equivalence_nodes(bdf_filename_stringio, bdf_filename_out, tol,
renumber_nodes=False,
neq_max=10, xref=True,
node_set=None, | |
return ticket
if not is_multiple_copy:
try:
os.rename(a_path, bad_file)
except:
msg = "failed to rename %s to %s"%(a_path, bad_file)
ticket = {'work': 'unmark_bad', 'bfid': bfid, 'path': bad_file};
ticket = self.send(ticket,rcv_timeout=timeout, tries=retry)
if ticket['status'][0] != e_errors.OK:
msg += '(Failed to umark the file bad: '+ticket['status'][1]+')'
return {'status': (e_errors.FILE_CLERK_ERROR, msg) }
print bfid, a_path, "->", bad_file
return ticket
def unmark_bad(self, path, specified_bfid = None, timeout=RCV_TIMEOUT, retry=RCV_TRIES):
# get the full absolute path
a_path = os.path.abspath(path)
dirname, filename = os.path.split(a_path)
# does it exist?
if not os.access(path, os.F_OK):
msg = "%s does not exist!" % (path)
return {'status': (e_errors.FILE_CLERK_ERROR, msg)}
# check premission
if not os.access(dirname, os.W_OK):
msg = "not enough privilege to rename %s" % (path)
return {'status': (e_errors.FILE_CLERK_ERROR, msg)}
# get bfid
bfid_file = os.path.join(dirname, '.(use)(1)(%s)' % (filename))
f = open(bfid_file)
bfid = string.strip(f.readline())
f.close()
if len(bfid) < 12:
msg = "can not find bfid for %s"%(path)
return {'status': (e_errors.FILE_CLERK_ERROR, msg)}
#Detect if the suplied bfid is a multiple copy of the primary bfid.
is_multiple_copy = False
if specified_bfid:
copy_dict = self.find_all_copies(bfid,timeout,retry)
if e_errors.is_ok(copy_dict):
copy_bfids = copy_dict['copies']
else:
return copy_dict
try:
#Remove the primary bfid from the list. file_copies()
# can miss copies of copies, so we don't want to use that.
del copy_bfids[copy_bfids.index(bfid)]
except IndexError:
pass
if specified_bfid in copy_bfids:
bfid = specified_bfid
is_multiple_copy = True
else:
msg = "%s bfid is not a copy of %s" % (specified_bfid, path)
return {'status': (e_errors.FILE_CLERK_ERROR, msg)}
# is it a "bad" file?
if filename[:5] != ".bad." and not is_multiple_copy:
msg = "%s is not officially a bad file"%(path)
return {'status': (e_errors.FILE_CLERK_ERROR, msg)}
record = self.bfid_info(bfid,timeout,retry)
if record['status'][0] != e_errors.OK:
return record
if is_multiple_copy:
good_file = path
else:
good_file = os.path.join(dirname, filename[5:])
# rename it
try:
os.rename(a_path, good_file)
except:
msg = "failed to rename %s to %s"%(a_path, good_file)
return {'status': (e_errors.FILE_CLERK_ERROR, msg)}
# log it
ticket = {'work': 'unmark_bad', 'bfid': bfid}
ticket = self.send(ticket,rcv_timeout=timeout, tries=retry)
if ticket['status'][0] == e_errors.OK:
print bfid, a_path, "->", good_file
return ticket
def bfid_info(self, bfid = None, timeout=0, retry=0):
if not bfid:
bfid = self.bfid
r = self.send({"work" : "bfid_info",
"bfid" : bfid }, rcv_timeout=timeout, tries=retry)
if r.has_key("work"):
del r['work']
return r
# This is only to be used internally
def exist_bfids(self, bfids = [], timeout=RCV_TIMEOUT, retry=RCV_TRIES):
if bfids == None:
bfids = self.bfid
r = self.send({"work" : "exist_bfids",
"bfids": bfids}, rcv_timeout=timeout, tries=retry)
return r['result']
# This is a retrofit for bfid
def set_deleted(self, deleted, restore_dir="no", bfid = None, timeout=RCV_TIMEOUT, retry=RCV_TRIES):
deleted = string.lower(deleted);
if deleted not in enstore_constants.FILE_DELETED_FLAGS:
message="Unsupported delete flag \"%s\", supported flags are "%(deleted,)
for f in enstore_constants.FILE_DELETED_FLAGS:
message=message+"\""+f+"\","
message=message[:-1]
return {'status': (e_errors.FILE_CLERK_ERROR, message)}
if bfid == None:
bfid = self.bfid
r = self.send({"work" : "set_deleted",
"bfid" : bfid,
"deleted" : deleted,
"restore_dir" : restore_dir }, rcv_timeout=timeout, tries=retry)
return r
def get_crcs(self, bfid, timeout=RCV_TIMEOUT, retry=RCV_TRIES):
r = self.send({"work" : "get_crcs",
"bfid" : bfid}, rcv_timeout=timeout, tries=retry)
return r
def set_crcs(self, bfid, sanity_cookie, complete_crc, timeout=RCV_TIMEOUT, retry=RCV_TRIES):
r = self.send({"work" : "set_crcs",
"bfid" : bfid,
"sanity_cookie": sanity_cookie,
"complete_crc": complete_crc}, rcv_timeout=timeout, tries=retry)
return r
# delete a volume
def delete_volume(self, vol, timeout=RCV_TIMEOUT, retry=RCV_TRIES):
r = self.send({"work" : "delete_volume",
"external_label" : vol }, rcv_timeout=timeout, tries=retry)
return r
# erase a volume
def erase_volume(self, vol, timeout=RCV_TIMEOUT, retry=RCV_TRIES):
r = self.send({"work" : "erase_volume",
"external_label" : vol }, rcv_timeout=timeout, tries=retry)
return r
# does the volume contain any undeleted file?
def has_undeleted_file(self, vol, timeout=RCV_TIMEOUT, retry=RCV_TRIES):
r = self.send({"work" : "has_undeleted_file",
"external_label" : vol }, rcv_timeout=timeout, tries=retry)
return r
def restore(self, bfid, uid = None, gid = None, force = None, timeout=0, retry=0):
# get the file information from the file clerk
bit_file = self.bfid_info(bfid,timeout,retry)
if bit_file['status'][0] != e_errors.OK:
return bit_file
del bit_file['status']
# take care of uid and gid
if not uid:
uid = bit_file['uid']
if not gid:
gid = bit_file['gid']
"""
# try its best to set uid and gid
try:
os.setregid(gid, gid)
os.setreuid(uid, uid)
except:
pass
"""
# check if the volume is deleted
if bit_file["external_label"][-8:] == '.deleted':
message = "volume %s is deleted" % (bit_file["external_label"],)
return {'status': (e_errors.FILE_CLERK_ERROR, message)}
# make sure the file has to be deleted (if --force was specified,
# allow for the restore to update the file)
if bit_file['deleted'] != 'yes' and force == None:
message = "%s is not deleted" % (bfid,)
return {'status': (e_errors.FILE_CLERK_ERROR, message)}
# find out file_family
vcc = volume_clerk_client.VolumeClerkClient(self.csc)
vol = vcc.inquire_vol(bit_file['external_label'])
if vol['status'][0] != e_errors.OK:
return vol
file_family = volume_family.extract_file_family(vol['volume_family'])
del vcc
# check if the path is a valid pnfs path
if not bit_file['pnfs_name0']:
#We get here if there is no path information.
message = "no path information found for %s" % (bfid,)
return {'status': (e_errors.FILE_CLERK_ERROR, message)}
if not chimera.is_chimera_path(bit_file['pnfs_name0'], check_name_only = 1):
message = "%s is not a valid chimera/pnfs path" % (bit_file['pnfs_name0'],)
return {'status': (e_errors.FILE_CLERK_ERROR, message)}
# its directory has to exist
p_p, p_f = os.path.split(bit_file['pnfs_name0'])
p_stat = file_utils.get_stat(p_p)
rtn_code2 = file_utils.e_access_cmp(p_stat, os.F_OK)
if not rtn_code2:
message = "can not write in directory %s" % (p_p,)
return {'status': (e_errors.FILE_CLERK_ERROR, message)}
# check if the file has already existed (if --force was specified,
# allow for the restore to update the file)
rtn_code = file_utils.e_access(bit_file['pnfs_name0'], os.F_OK)
if rtn_code and force == None: # file exists
message = "%s exists" % (bit_file['pnfs_name0'],)
return {'status': (e_errors.FILE_CLERK_ERROR, message)}
if rtn_code and force != None:
#check if any file has the same pnfs_id
pnfs_id=""
if chimera.is_chimera_path(bit_file['pnfs_name0'], check_name_only = 1):
pnfs_id = chimera.get_pnfsid(bit_file['pnfs_name0'])
else:
message = "file %s is not chimera nor pnfs"\
% (bit_file['pnfs_name0'])
return {'status': (e_errors.FILE_CLERK_ERROR, message)}
if pnfs_id != bit_file['pnfsid']:
message = "file pnfs id (%s) does not match database pnfs id (%s)"\
% (bit_file['pnfs_name0'], pnfs_id)
return {'status': (e_errors.FILE_CLERK_ERROR, message)}
#Setup the File class to do the update.
bit_file['file_family'] = file_family
pf=None
if chimera.is_chimera_path(bit_file['pnfs_name0'], check_name_only = 1):
pf = chimera.File(bit_file)
else:
message = "%s is not chimera not pnfs file" % (bit_file['pnfs_name0'],)
return {'status': (e_errors.FILE_CLERK_ERROR, message)}
# Now create/update it; catch any error
if not rtn_code: #DOES NOT EXIST
# Has it already existed?
if pf.exists() and force == None:
message = "%s already exists" % (bit_file['pnfs_name0'],)
return {'status': (e_errors.FILE_CLERK_ERROR, message)}
if not pf.exists():
#We need to wrap this code (when uid == 0) to set the euid and
# egid to the owner of the directory. This will allow root
# to create files in non-admin and non-trusted filesystems.
#print "os.geteuid():", os.geteuid(), p_p
file_utils.match_euid_egid(p_p)
#print "os.geteuid():", os.geteuid(), p_p
try:
pf.create()
except (OSError, IOError), msg:
message = "can not create: %s" % (str(msg),)
return {'status': (e_errors.PNFS_ERROR, message)}
except:
message = "can not create: %s: %s" % (str(sys.exc_info()[0]),
str(sys.exc_info()[1]))
return {'status' : (e_errors.PNFS_ERROR, message)}
file_utils.set_euid_egid(0, 0)
file_utils.release_lock_euid_egid()
#Now that we are back to root, we can change the ownership
# of the file.
file_utils.chown(bit_file['pnfs_name0'], uid, gid)
else: #DOES EXIST
file_utils.match_euid_egid(bit_file['pnfs_name0'])
try:
pf.update()
message = ""
except:
message = "can not update %s: %s" % (pf.path,
sys.exc_info()[1])
file_utils.set_euid_egid(0, 0)
file_utils.release_lock_euid_egid()
if message:
return {'status': (e_errors.FILE_CLERK_ERROR, message)}
pnfs_id = pf.get_pnfs_id()
if pnfs_id != pf.pnfs_id or bit_file['deleted'] != "no":
# update file record
return self.modify({'bfid': bfid, 'pnfsid':pnfs_id,
'deleted':'no'})
return {'status':(e_errors.OK, None)}
# rebuild pnfs file entry
def rebuild_pnfs_file(self, bfid, file_family = None, timeout=RCV_TIMEOUT, retry=RCV_TRIES):
ticket = {"work": "restore_file2",
"bfid": bfid,
"check": 0}
if file_family:
ticket['file_family'] = file_family
return self.send(ticket, rcv_timeout=timeout, tries=retry)
# get volume map name for given bfid
def get_volmap_name(self, bfid = None, timeout=RCV_TIMEOUT, retry=RCV_TRIES):
if not bfid:
bfid = self.bfid
r = self.send({"work" : "get_volmap_name",
"bfid" : bfid}, rcv_timeout=timeout, tries=retry)
return r
# delete bitfile
def del_bfid(self, bfid = None, timeout=RCV_TIMEOUT, retry=RCV_TRIES):
if not bfid:
bfid = self.bfid
r = self.send({"work" : "del_bfid",
"bfid" : bfid}, rcv_timeout=timeout, tries=retry)
return r
# create file record
def add(self, ticket, timeout=0, retry=0):
ticket['work'] = 'add_file_record'
return self.send(ticket,rcv_timeout=timeout, tries=retry)
# modify file record
def modify(self, ticket, timeout=0, retry=0):
if type(ticket) == types.DictType :
ticket['work'] = 'modify_file_record'
return self.send(ticket,rcv_timeout=timeout, tries=retry)
elif type(ticket) == types.ListType:
rticket = {}
rticket["work"] = 'modify_file_records'
rticket["list"] = ticket
return self.send(rticket)
else:
raise | |
<gh_stars>1-10
import numpy as np
class Reserve:
def __init__(self, name, amount, crr, verbose=0):
assert(amount > 0)
assert(crr > 0.0 and crr <= 1.0)
self.name = name
self.amount = amount
self.crr = crr
self.verbose = verbose
def __str__(self):
if self.verbose > 0:
return '[{}'.format(self.name) \
+ ' R={}'.format(np.round(self.amount, 3)) \
+ ' F={}]'.format(np.round(self.crr, 3))
else:
return ''
def log(self):
print(self)
class TokenChanger:
def __init__(self, name, supply, reserve_tokens=dict(), verbose=0, precision=4):
self.name = name
self.reserve_tokens = reserve_tokens
self.supply = supply
self.verbose=verbose
self.precision=precision
if self.verbose > 0:
self.log()
def log(self):
if self.verbose > 0:
print('[{}][LOG]'.format(self.name)
+ ' S={s} CRR={crr}:'.format(crr=np.round(self.crr, self.precision), s=np.round(self.supply, self.precision)) \
+ ':'.join([str(r) for _, r in self.reserve_tokens.items()])
)
def add_reserve(self, reserve):
assert(type(reserve) is Reserve)
assert(reserve.name not in self.reserve_tokens.keys())
self.reserve_tokens[reserve.name] = reserve
crr = self.crr
assert(crr >= 0 and crr <= 1.0)
@property
def crr(self):
return np.sum([r.crr for _, r in self.reserve_tokens.items()])
@property
def reserve(self):
reserve_tot = np.sum([r.amount for _,r in self.reserve_tokens.items()])
assert(reserve_tot >= 0)
return reserve_tot
@property
def price(self):
return self.reserve/(self.crr * self.supply)
def partial_prices(self):
return dict(zip(self.reserve_tokens.keys(),
[self.__partial_price(tkn) for tkn in self.reserve_tokens.keys()]))
def __partial_price(self, token, reserve_amount=None, supply=None):
"""
Implied price of the SmartToken if one buys it using the given token
:param token:
:param reserve_amount: if unset used the actual reserve amount otherwise this price is sim
:return:
"""
assert(token in self.reserve_tokens.keys())
reserve = self.reserve_tokens[token]
assert (type(reserve) is Reserve)
if reserve_amount is None:
reserve_amount = reserve.amount
if supply is None:
supply = self.supply
return reserve_amount / (reserve.crr * supply)
def buy(self, amount, from_token, const=False):
"""
Exchange amount of reserve token for SmartToken
:param amount: amount of reserve token to be sold
:param from_token: name of token being sold
:param const: set to True to get the answer without modifying the object
:return:
"""
assert(from_token in self.reserve_tokens.keys())
assert(amount >= 0)
reserve = self.reserve_tokens[from_token]
P0 = self.__partial_price(from_token)
R0 = reserve.amount
S0 = self.supply
R1 = R0 + amount
if not const:
self.reserve_tokens[from_token].amount = R1
S1 = S0 * np.power(R1/R0, reserve.crr) #update supply while maintaining crr
dS = S1 - S0
assert(dS >= 0)
if not const:
self.supply = S1
if self.verbose > 1:
self.log()
if self.verbose > 0:
px_eff = amount / dS
P1 = self.__partial_price(from_token, R1, S1)
slippage = 10000 * (px_eff / P0 - 1)
print('[{n}][BUY] {amt}[{ctr}] => {dS}[{n}] ; P0={P0} P1={P1} PAY={px} SLIP={slip}(bps)'.format(
amt=np.round(amount, self.precision), ctr=from_token,
dS=np.round(dS, self.precision), n=self.name,
P0=np.round(P0, self.precision), P1=np.round(P1, self.precision),
px=np.round(px_eff, self.precision), slip=np.round(slippage, self.precision)))
return dS
def sell(self, amount, to_token, const=False):
"""
Exchange SmartToken for a certain amount of reserve token
:param amount: amount of SmartToken to be sold
:param to_token: name of reserve token to be bought
:param const: set to True to get the answer without modifying the object
:return:
"""
assert(to_token in self.reserve_tokens.keys())
assert(amount >= 0 and amount <= self.supply)
reserve = self.reserve_tokens[to_token]
P0 = self.__partial_price(to_token)
R0 = reserve.amount
S0 = self.supply
S1 = S0 - amount
if not const:
self.supply = S1
R1 = R0 * np.power(S1/S0, 1/reserve.crr) #update reserve while maintaining crr
if not const:
self.reserve_tokens[to_token].amount = R1
dR = R0 - R1
assert(dR >= 0)
if self.verbose > 1:
self.log()
if self.verbose > 0:
px_eff = dR/amount
slippage = 10000*(1-px_eff/P0)
P1 = self.__partial_price(to_token, R1, S1)
print('[{n}][SELL] {amt}[{n}] => {dR}[{ctr}] ; P0={P0} P1={P1} REC={px} SLIP={slip}(bps)'.format(
amt=np.round(amount, self.precision), n=self.name,
ctr=to_token, dR=np.round(dR, self.precision),
P0=np.round(P0, self.precision), P1=np.round(P1, self.precision),
px=np.round(px_eff, self.precision), slip=np.round(slippage, self.precision)
))
return dR
def exchange(self, amount, from_token, to_token, const=False):
"""
Exchange between 2 reserve tokens using the SmartToken
:param amount: amount of base reserve token to be sold
:param from_token: name of base reserve token
:param to_token: name of target reserve token
:param const: set to True to get the answer without modifying the object
:return:
"""
if self.verbose > 0:
self.log()
P0 = self.exchange_price(from_token,to_token)
st_amount = self.buy(amount, from_token, const)
to_amount = self.sell(st_amount, to_token, const)
px_eff = amount/to_amount
slip = 10000*(px_eff/P0-1)
if self.verbose > 1:
self.log()
if self.verbose > 0:
print('[{n}][EXCH] {from_amt}[{from_token}] => {ctr_amt}[{ctr_token}] PAY={px} SLIP={slip}(bps)'.format(
n=self.name, from_amt=np.round(amount, self.precision),from_token=from_token,
ctr_amt=np.round(to_amount, self.precision),ctr_token=to_token,
px=np.round(px_eff, self.precision), slip=np.round(slip, self.precision)))
return to_amount
def exchange_price(self, from_token, to_token):
"""
Return the mid price for exchanging from_token => to_token
:param from_token:
:param to_token:
:return:
"""
if from_token == self.name:
return self.__partial_price(to_token)
elif to_token == self.name:
return 1/self.__partial_price(from_token)
assert (from_token in self.reserve_tokens.keys())
assert (to_token in self.reserve_tokens.keys())
base = self.reserve_tokens[from_token]
ctr = self.reserve_tokens[to_token]
px_imp = (base.amount*ctr.crr)/(ctr.amount*base.crr)
if self.verbose > 1:
self.log()
if self.verbose > 0:
print('[{n}][IMP] {ctr}/{base}={px}'.format(
n=self.name, ctr=to_token, base=from_token, px=np.round(px_imp, self.precision)
))
return px_imp
class SmartETF(TokenChanger):
def __init__(self, name, supply, reserve_tokens=dict(), verbose=0, precision=4):
TokenChanger.__init__(self, name, supply, reserve_tokens, verbose, precision)
@property
def index(self):
return dict(zip(self.reserve_tokens.keys(),[r.crr for r in list(self.reserve_tokens.values())]))
def creation(self, basket):
"""
convert a basket of reserve currencies into units of the SmartETF
:param basket:
:return: shares of the SmartETF
"""
assert(type(basket) is list)
bad_currencies = set(dict(basket).keys()).difference(self.reserve_tokens.keys())
if len(bad_currencies) != 0:
raise Exception('bad currencies in creation basket: {}'.format(bad_currencies))
shares = 0
for cmp in basket:
token = cmp[0]
amount = cmp[1]
# we do not allow const=False here because when sending a perfect basket
# it is crucial to modify the partial prices at each iteration in order to
# achieve the min slippage
shares += self.buy(amount=amount, from_token=token, const=False)
if self.verbose > 1:
print('[{n}][PARTIAL] {ppx}'.format(
n=self.name,
ppx=dround(self.partial_prices(), self.precision)
))
return shares
def redemption(self, shares):
"""
convert shares of the SmartETF into a basket of reserve currencies
:param shares:
:return:
"""
basket = dict()
ratio = (1-shares/self.supply)
for token in self.index.keys():
amount = self.supply*(1.0-np.power(ratio,self.index[token]))
basket[token] = self.sell(amount, token, const=False)
if self.verbose > 1:
print('[{n}][PARTIAL] {ppx}'.format(
n=self.name,
ppx=dround(self.partial_prices(), self.precision)
))
return basket
class SmartToken:
def __init__(self,name,R,S,F,verbose=0):
"""
Notations follow the document
Formulas for Bancor System by <NAME>
"""
self.name = name
self.R = R
self.S = S
self.F = F
self.P = R/(S*F)
self.verbose=verbose
@property
def price(self):
return self.P
@property
def supply(self):
return self.S
@property
def reserve(self):
return self.R
def log(self):
if self.verbose > 0:
print('[{}]'.format(self.name)
+ ' R={}'.format(np.round(self.R,2))
+ ' S={}'.format(np.round(self.S,2))
+ ' F={}'.format(np.round(self.F,2))
+ ' P={}'.format(np.round(self.P,2)))
def exchange_cash(self,dR):
R0 = self.R
S0 = self.S
P0 = self.P
self.R += dR
scale = self.R/R0
self.S = S0*np.power(scale, self.F)
self.P = P0*np.power(scale, 1-self.F)
dS = self.S - S0
if self.verbose > 1:
print('[exchange_cash] dR={dR} scale={s} P0={P0} P1={P1}'.format(
dR=np.round(dR,2),
s=np.round(scale,2),
P0=np.round(P0,2),
P1=np.round(self.P,2)
))
self.log()
return dS
def exchange_shares(self,dS):
R0 = self.R
S0 = self.S
P0 = self.P
self.S += dS
scale = self.S/S0
self.R = R0*np.power(scale, 1/self.F)
self.P = P0*np.power(scale, 1/self.F-1)
dR = self.R - R0
self.log()
return -dR
def mkt_move(self,ret):
scale = 1 + ret
self.R *= scale
self.P *= scale
self.log()
class Portfolio:
def __init__(self,cash,token,verbose=0):
assert(cash >= 0)
self.shares = 0
self.cash = cash
self.token = token
self.verbose=verbose
self.log()
token.log()
def log(self):
if self.verbose > 0:
print('[portfolio] cash={}'.format(np.round(self.cash,2))
+ ' shares={}'.format(np.round(self.shares,2))
+ ' wealth={}'.format(np.round(self.wealth))
)
@property
def wealth(self):
return self.cash + self.shares*self.token.price
def buy(self,amount):
assert(amount >= 0)
assert(amount <= self.cash)
self.cash -= amount
self.shares += self.token.exchange_cash(amount)
self.log()
def sell(self,qty):
assert(qty >= 0)
assert(qty <= self.shares)
self.shares -= qty
self.cash += self.token.exchange_shares(-qty)
self.log()
def example1(verbose):
print('[EXAMPLE 1] buy then sell returns the wealth to initial level, so our intermediate wealth was "illusory"')
BNT = SmartToken('BNT',100.0,100.0,0.2,verbose)
pi = Portfolio(100.0,BNT,verbose)
pi.buy(100)
pi.sell(pi.shares)
return BNT
def example2(verbose):
print('[EXAMPLE 2] (consistency): we check that multiple small buys are equivalent to one large one')
BNT = SmartToken('BNT',100.0,100.0,0.2,verbose)
pi = Portfolio(100.0,BNT,verbose)
pi.buy(50)
pi.buy(50)
pi.sell(pi.shares)
return BNT
def example3(verbose):
print('[EXAMPLE 3] (consistency): we check that if we buy and the market moves meanwhile the result is consistent')
BNT = SmartToken('BNT',100.0,100.0,0.2,verbose)
pi = Portfolio(100.0,BNT,verbose)
pi.buy(50)
BNT.mkt_move(0.05) #market moves 5% up
pi.buy(50)
pi.sell(pi.shares)
np.testing.assert_almost_equal(pi.cash,102.5)
np.testing.assert_almost_equal(BNT.price,5.25)
return BNT
def example4(verbose):
print('[EXAMPLE 4] (exchange token): we show the slippage incurred by using an exchange token')
GNO = Reserve('GNO',amount=25000,crr=0.2,verbose=1)
GNO.log()
ETH = Reserve('ETH',amount=100000,crr=0.8,verbose=1)
ETH.log()
GNOETH=TokenChanger('GNOETH', reserve_tokens=dict(), supply=1000, verbose=1)
GNOETH.log()
GNOETH.add_reserve(GNO)
GNOETH.log()
GNOETH.add_reserve(ETH)
GNOETH.log()
P0 = GNOETH.exchange_price('GNO', 'ETH')
amt = GNOETH.exchange(10,'GNO', 'ETH')
P1 = GNOETH.exchange_price('GNO', 'ETH')
return GNOETH
def example5(verbose):
print('[EXAMPLE 5] where we show that for a basket token there is a | |
print("right right")
angle = 1
green_var = True
elif contour_green is None and green_left %2 == 0:
print("left left")
angle = -1
green_var = True
else:
green_var = False
if green_left == 7:
counter_train += rc.get_delta_time()
if counter_train < 0.5:
angle = -1
print("turning left")
elif counter_train < 1.45:
angle = 0
print("going straight")
elif counter_train < 3.1:
angle = 1.5
print("turning right")
elif counter_train < 6.6:
green_var = False
green_left = 2
allow = True
allow_2 = False
allow_3 = True
#CONE SLALOMING
if curr_state == State.cone_slaloming:
if var is False:
speed = 0.07
if counter_final < 3.6:
angle = 0
elif counter_final < 7.6:
angle = 2.5
else:
var = True
else:
if counter_final > 37 and contours_green is not None:
curr_state = State.green_line
camefromcone = True
#allow_2 = True
speed = 0.07
if color_img is not None:
cone_distance = 40
contours_red = rc_utils.find_contours(color_img, RED[0], RED[1])
contour_red = rc_utils.get_largest_contour(contours_red, 100)
if contour_red is not None:
contour_center_red = rc_utils.get_contour_center(contour_red)
rc_utils.draw_contour(color_img, contour_red, rc_utils.ColorBGR.red.value)
rc_utils.draw_circle(color_img, contour_center_red,rc_utils.ColorBGR.yellow.value)
if contour_red is None:
contour_center_red = None
contours_blue = rc_utils.find_contours(color_img, BLUE[0], BLUE[1])
contour_blue = rc_utils.get_largest_contour(contours_blue, 100)
if contour_blue is not None:
contour_center_blue = rc_utils.get_contour_center(contour_blue)
rc_utils.draw_contour(color_img, contour_blue, rc_utils.ColorBGR.red.value)
rc_utils.draw_circle(color_img, contour_center_blue,rc_utils.ColorBGR.yellow.value)
color_img_temp = rc_utils.crop(color_img, (2* height//5, 0), (height, width))
rc.display.show_color_image(color_img_temp)
if curr_state_COLOR == State_COLOR.align_blue:
angle = rc_utils.remap_range(contour_center_blue[1], 0, width, -1, 1)
if depth_image_new[contour_center_blue[0]][contour_center_blue[1]] < 50:
counter_COLOR = 0
curr_state_COLOR = State_COLOR.avoid_blue
if curr_state_COLOR == State_COLOR.avoid_blue:
# if contour_green is not None:
# curr_state = State.green_line
if counter_COLOR < 0.6:
print("turn 1")
angle = -2
elif counter_COLOR < 1:
print("turn 2")
angle = 0
elif counter_COLOR < 2.2:
print("turn 3")
angle = 2
else:
print("else - align red")
curr_state_COLOR = State_COLOR.align_red
cone_counter += 1
if cone_counter == 7:
curr_state = State.green_line
counter_COLOR += rc.get_delta_time()
if curr_state_COLOR == State_COLOR.align_red:
speed = 0.07
if contour_center_red is not None:
angle = rc_utils.remap_range(contour_center_red[1], 0, width, -1, 1)
#print(depth_image_new[contour_center_red[0]][contour_center_red[1]])
if depth_image_new is not None and contour_center_red is not None:
if depth_image_new[contour_center_red[0]][contour_center_red[1]] < 50:
counter_COLOR = 0
curr_state_COLOR = State_COLOR.avoid_red
if curr_state_COLOR == State_COLOR.avoid_red:
speed = 0.07
# if depth_image_new[contour_center_red[0]][contour_center_red[1]] > cone_distance and counter == 0:
# curr_state_COLOR = State_COLOR.align_red
# else:
# if contour_green is not None:
# curr_state = State.green_line
if True:
if counter_COLOR < 0.6:
angle = 2
elif counter_COLOR < 1:
angle = 0
elif counter_COLOR < 2.2:
angle = -2
else:
curr_state_COLOR = State_COLOR.align_blue
cone_counter += 1
counter_COLOR += rc.get_delta_time()
counter_final += rc.get_delta_time()
#WALL FOLLOWING
if curr_state == State.wall_following:
speed = 0.15
if counter_wall == 1:
#curr_state = State.ar_tag_slaloming
curr_state = State.stop
if counter < 1:
# speed = 0.1
angle = 0
counter += rc.get_delta_time()
else:
diff = right_front - left_front
mult = 0.1
if diff > 10:
mult = 2
if diff > 40:
# speed = 0
pass
angle = rc_utils.remap_range(diff, -20, 20, -1, 1) * mult
if forward_distance > 500:
just_finished_wall_following = True
counter = 0
counter_wall += 1
curr_state = State.green_line
#PURPLE LINE FOLLOWING
if curr_state == State.purple_line_following:
speed = 0.16
camefrompurple = True
if len(markers) > 0 and ar_marker_near and marker.get_id() != 1:
curr_state = State.ar_tag
img_left = rc_utils.crop(color_img, (4* height//5, 0), (height, width//3))
img_right = rc_utils.crop(color_img, (4* height//5, 2 * width//3), (height, width))
potential_colors = [PURPLE, ORANGE]
detected_color = None
greatest_area = 0
for (hsv_lower, hsv_upper, color_name) in potential_colors:
contours = rc_utils.find_contours(color_img, hsv_lower, hsv_upper)
largest_contour = rc_utils.get_largest_contour(contours)
if largest_contour is not None:
contour_area = rc_utils.get_contour_area(largest_contour)
if contour_area > greatest_area:
greatest_area = contour_area
detected_color = color_name
if detected_color is None:
curr_state = State.green_line
if detected_color == "orange":
contours_left_ORANGE = rc_utils.find_contours(img_left, ORANGE[0], ORANGE[1])
contours_right_ORANGE = rc_utils.find_contours(img_right, ORANGE[0], ORANGE[1])
elif detected_color == "purple":
contours_left_PURPLE = rc_utils.find_contours(img_left, PURPLE[0], PURPLE[1])
contours_right_PURPLE = rc_utils.find_contours(img_right, PURPLE[0], PURPLE[1])
contours_left_PURPLE = rc_utils.find_contours(img_left, PURPLE[0], PURPLE[1])
contours_right_PURPLE = rc_utils.find_contours(img_right, PURPLE[0], PURPLE[1])
contour_left_PURPLE = rc_utils.get_largest_contour(contours_left_PURPLE, MIN_CONTOUR_AREA)
contour_right_PURPLE = rc_utils.get_largest_contour(contours_right_PURPLE, MIN_CONTOUR_AREA)
contour_center_left_PURPLE = rc_utils.get_contour_center(contour_left_PURPLE)
contour_center_right_PURPLE = rc_utils.get_contour_center(contour_right_PURPLE)
contours_left_ORANGE = rc_utils.find_contours(img_left, ORANGE[0], ORANGE[1])
contours_right_ORANGE = rc_utils.find_contours(img_right, ORANGE[0], ORANGE[1])
contour_left_ORANGE = rc_utils.get_largest_contour(contours_left_ORANGE, MIN_CONTOUR_AREA)
contour_right_ORANGE = rc_utils.get_largest_contour(contours_right_ORANGE, MIN_CONTOUR_AREA)
contour_center_left_ORANGE = rc_utils.get_contour_center(contour_left_ORANGE)
contour_center_right_ORANGE = rc_utils.get_contour_center(contour_right_ORANGE)
kp = 0.02
if contour_center_right_ORANGE is not None:
midpoint = contour_center_right_ORANGE[1] - 200
angle = kp * midpoint
print("right found")
if contour_center_right_PURPLE is not None:
midpoint = contour_center_right_PURPLE[1] - 200
angle = kp * midpoint
print("right found")
if contour_center_right_PURPLE is None and contour_center_right_ORANGE is None:
angle = 0.7
contours_green_ar = rc_utils.find_contours(color_img, GREEN[0], GREEN[1])
if contours_green_ar:
cropped = rc_utils.crop(color_img, (rc.camera.get_height() // 4 * 3, 0),
(rc.camera.get_height(), rc.camera.get_width()))
contours = rc_utils.find_contours(cropped, GREEN[0], GREEN[1])
# contour_green = rc_utils.get_largest_contour(cropped, 190)
contour_green = rc_utils.get_largest_contour(contours_green, 100) # changed from MIN_CONTOUR_AREA
#AR TAG SLALOMING
if curr_state == State.ar_tag_slaloming:
camefrompurple = False
speed = 0.125
# if counter_AR < 0.02:
# speed = 0.06
# counter_AR += rc.get_delta_time()
contours_BLACK = rc_utils.find_contours(color_img, BLACK[0], BLACK[1])
if contours_BLACK:
contour_BLACK = rc_utils.get_largest_contour(contours_BLACK, MIN_CONTOUR_AREA)
contour_center_BLACK = rc_utils.get_contour_center(contour_BLACK)
else:
contour_BLACK = None
if curr_state_ARS == State_ARS.search:
if contours_green is not None and contour_BLACK is None:
contours = rc_utils.find_contours(cropped, GREEN[0], GREEN[1])
contour_green = rc_utils.get_largest_contour(contours_green, 100)
if contour_green is not None:
print("here")
curr_state = State.green_line
if last_direction_left == True:
angle = 1
elif last_direction_right == True:
angle = -1
if contour_BLACK is not None and len(markers) > 0: #sees ar tag
if marker.get_orientation() == Orientation.LEFT:
orientation = "left"
curr_state_ARS = State_ARS.approach
elif marker.get_orientation() == Orientation.RIGHT:
orientation = "right"
curr_state_ARS = State_ARS.approach
else:
#speed = 0.05
pass
pillar_distance = 65
if curr_state_ARS == State_ARS.approach:
last_direction_left = False
last_direction_right = False
counter_AR = 0
if depth_img[contour_center_BLACK[0]][contour_center_BLACK[1]] < pillar_distance:
if orientation == "right":
curr_state_ARS = State_ARS.avoid_right
if orientation == "left":
curr_state_ARS = State_ARS.avoid_left
else:
if contour_BLACK is not None:
if last_direction_left is False and last_direction_right is False:
kP = 1
angle = rc_utils.remap_range(contour_center_BLACK[1], 0, width, -1, 1) *kP
if curr_state_ARS == State_ARS.avoid_left:
speed = 0.05
if counter_AR < 0.7:
print("angle .7")
angle = -1.3
elif counter_AR < 1.8:
print("angle 0")
angle = 0
elif counter_AR < 3.2:
angle = 0.8
print("angle -0.7")
else:
print("end of avoid state")
curr_state_ARS = State_ARS.search
counter_AR += rc.get_delta_time()
last_direction_left = True
if curr_state_ARS == State_ARS.avoid_right:
speed = 0.05
if counter_AR < 0.7:
print("angle .7")
angle = 1.3
elif counter_AR < 1.8:
print("angle 0")
angle = 0
elif counter_AR < 3.2:
angle = -0.8
print("angle -0.7")
else:
print("end of avoid state")
curr_state_ARS = State_ARS.search
counter_AR += rc.get_delta_time()
last_direction_right = True
print(curr_state_ARS)
#Elevator
if curr_state == State.elevator:
contour_center_blue = width//2
counter_final = 0
potential_colors = [RED, BLUE]
detected_color = None
greatest_area = 0
for (hsv_lower, hsv_upper, color_name) in potential_colors:
contours = rc_utils.find_contours(color_img, hsv_lower, hsv_upper)
largest_contour = rc_utils.get_largest_contour(contours)
if largest_contour is not None:
contour_area = rc_utils.get_contour_area(largest_contour)
if contour_area > greatest_area:
greatest_area = contour_area
detected_color = color_name
if True:
if marker is not None:
print(detected_color)
print(ar_marker_elevator)
if detected_color == "red":
if not ar_marker_near:
speed = 0.3
else:
speed = 0
elif detected_color == "blue":
contours_blue = rc_utils.find_contours(color_img, BLUE[0], BLUE[1])
if contours_blue:
contour_blue = rc_utils.get_largest_contour(contours_blue, MIN_CONTOUR_AREA)
if contour_blue is not None:
contour_center_blue = rc_utils.get_contour_center(contour_blue)
angle = rc_utils.remap_range(contour_center_blue[1]+50, 0, width, -1, 1)
speed = 2
else:
curr_state = State.green_line
speed = 0.1
if curr_state == State.stop:
counter_stop += rc.get_delta_time()
if counter_stop < 0.7:
speed = -0.7
else:
counter_stop = 0
curr_state = State.ar_tag_slaloming
if curr_state == State.stop2:
counter_stop_2 += rc.get_delta_time()
if counter_stop_2 < 0.7:
speed = -0.3
else:
counter_stop_2 = 0
curr_state = State.cone_slaloming
#Moving Trains
if curr_state == State.trains:
camefromtrain = True
counter_train1 += rc.get_delta_time()
if counter_train1 < 0.5:
angle = 1
speed = 0.1
else:
allow_2 = True
allow = False
curr_state = State.green_line
# #allow_2 = True
# allow = False
# contours_green = rc_utils.find_contours(color_img_2, GREEN[0], GREEN[1])
# contour_green = None
# if contours_green:
# contour_green = rc_utils.get_largest_contour(contours_green, 30)
# if right_front > 550:
# curr_state = State.green_line
# camefromtrain = True
# print("helooooooooooooooooooooooooo")
# #allow = True
# else:
# | |
import json
import SalesforceMetadataModule as smm
import dicttoxml
from xml.dom.minidom import parseString
from fulcrum import Fulcrum
import re
import collections
import time
import datetime
import requests
import base64
import string
import random
from simple_salesforce import Salesforce
from simple_salesforce import SalesforceLogin
from simple_salesforce import SFType
_sfdcPrefix = 'f_'
_sfdcUsername = ""
_sfdcPassword = ""
_sfdcToken = ""
_sfdcDomain = 'test'
# Set _sfdcSandbox to False to run in production
_sfdcSandbox = True
_isDateFieldDefault = False
_fulcrumXApiToken = ""
_fulcrumBaseURL = 'https://api.fulcrumapp.com/api/v2/'
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, <NAME>, Burning Man Project"
__credits__ = ["<NAME>"]
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>, <EMAIL>"
__status__ = "Development"
__Changelog01__ = "Initial Release"
class FulcrumRecordToSalesforceRecord:
_sfdcSession_id, _sfdcInstance = SalesforceLogin(username=_sfdcUsername, password=_sfdcPassword, security_token=_sfdcToken, domain=_sfdcDomain)
sfdc = Salesforce(instance=_sfdcInstance, session_id=_sfdcSession_id)
fulcrum = Fulcrum(key=_fulcrumXApiToken)
fulcrumHeaders = {'X-ApiToken': _fulcrumXApiToken}
def sf_api_call(self, action, parameters = {}, method = 'get', data = {}, multipart=False, boundary=None):
"""
Helper function to make calls to Salesforce REST API.
Parameters: action (the URL), URL params, method (get, post or patch), data for POST/PATCH.
"""
headers = {}
if multipart == False:
headers = {
'Content-type': 'application/json',
'Accept-Encoding': 'gzip',
'Authorization': 'OAuth ' + self._sfdcSession_id,
}
else:
headers = {
'Content-type': 'multipart/form-data; boundary='+boundary,
'Accept-Encoding': 'gzip',
'Authorization': 'OAuth ' + self._sfdcSession_id,
}
if method == 'get':
r = requests.request(method, 'https://'+self._sfdcInstance+action, headers=headers, params=parameters, timeout=30)
elif method in ['post', 'patch']:
r = requests.request(method, 'https://'+self._sfdcInstance+action, headers=headers, json=data, params=parameters, timeout=10)
else:
# other methods not implemented in this example
raise ValueError('Method should be get or post or patch.')
#print('Debug: API %s call: %s' % (method, r.url) )
if r.status_code < 300:
if method=='patch':
return None
else:
return r.json()
else:
raise Exception('API error when calling %s : %s' % (r.url, r.content))
# Generates a random string
def id_generator(self, size=32, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
#checks to see if a key exists in a dictonary
def checkKey (self, dictionary, key):
try:
if key in dictionary.keys():
return True
else:
return False
except KeyError:
return False
## pass JSON Directly
def composite_salesforce_create (self, objectId, records):
response = self.sfdc.restful (method='POST', path='composite/tree/'+objectId, json=records)
return response
#must have Salesforce record IDs
def composite_salesforce_update (self, objectId, extCustomField, extIdValue, records):
response = self.sfdc.restful (method='PATCH', path='composite/sobjects', json=records)
return response
def composite_salesforce_request (self, objectId, extCustomField, extIdValue, records):
response = self.sfdc.restful (method='POST', path='composite/sobjects/' + objectId, json=records)
return reponse
# Data should either be a single JSON encapsulating base64 encoded blob up to 34MB
# Or a multipart message encapsulating a base64 encoded blob up to 2GB
# https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/dome_sobject_insert_update_blob.htm
def contentVersion_salesforce_create (self, data):
return self.sf_api_call('/services/data/v40.0/sobjects/ContentVersion', method="post", data=data)
def contentVersion_2GB_salesforce_create (self, data, boundary):
return self.sf_api_call('/services/data/v40.0/sobjects/ContentVersion', method="post", data=data, multipart=True, boundary=boundary)
# Data should be an ID
def contentVersion_salesforce_get (self, data):
return self.sf_api_call('/services/data/v40.0/sobjects/ContentVersion/%s' % data)
def contentDocumentLink_salesforce_create (self, data):
return self.sf_api_call('/services/data/v40.0/sobjects/ContentDocumentLink', method = 'post', data=data)
def create_output_json (self, recordJson):
recordJson = json.dumps (recordJson)
recordJson = recordJson[1:-1]
recordJson = recordJson.replace('null', '')
return recordJson
def process_generate_field (self, fieldId, fieldValue, fieldType='Data'):
print ' ' + str(fieldType) + ': ' + str(_sfdcPrefix) + str(fieldId) + '__c:' + str(fieldValue)
if fieldType == 'Latitude' or fieldType == 'Longitude':
return {_sfdcPrefix + fieldId + '__' + fieldType +'__s' : fieldValue}
else:
return {_sfdcPrefix + fieldId + '__c' : fieldValue}
def upload_2GB_file_to_salesforce_and_attach_to_record (self, recordId, fileTitle, fileDescription, fileName, fileContents):
boundary = self.id_generator ()
fileContents = base64.b64encode(fileContents)
#Multi part request can handle 2GB Max
ContentVersionMetadata = {
'Title':fileTitle,
'Description':fileDescription,
'PathOnClient':fileName,
}
ContentVersionData = """--"""+boundary+"""
Content-Disposition: form-data; name="entity_content";
Content-Type: application/json
{
"Title" : """+'"'+fileTitle+'"'+""",
"Description" : """+'"'+fileDescription+'"'+""",
"PathOnClient" : """+'"'+fileName+'"'+"""
}
--"""+boundary+"""
Content-Disposition: form-data; name="VersionData"; filename=""" + '"' + fileName + '"' +"""
Content-Type: application/octet-stream
""" + fileContents + """
--"""+boundary+"""--"""
# 1: Insert the Content Document
ContentVersion = self.contentVersion_2GB_salesforce_create (data=ContentVersionData, boundary=boundary)
ContentVersionId = ContentVersion.get('id')
# 2: Get the ContentDocumentId from the just inserted ContentVersion
ContentVersion = self.contentVersion_salesforce_get (ContentVersionId)
ContentDocumentId = ContentVersion.get('ContentDocumentId')
# 3: Create a ContentDocumentLink between the ContentDocumentId and the Record
contentDocumentLinkMetadata = {
'ContentDocumentId': ContentDocumentId,
'LinkedEntityId': recordId,
'ShareType': 'V'
}
ContentDocumentLink = self.contentDocumentLink_salesforce_create (contentDocumentLinkMetadata)
return {'ContentVersionId' : ContentVersionId, 'ContentDocumentId' : ContentDocumentId, 'ContentDocumentLink' : ContentDocumentLink}
def upload_file_to_salesforce_and_attach_to_record (self, recordId, fileTitle, fileDescription, fileName, fileContent, fulcrumId):
fileContent = base64.b64encode(fileContent)
#Single part request can handle ~34MB Max
ContentVersionData = {
'Title':fileTitle,
'Description':fileDescription,
'PathOnClient':fileName,
'VersionData':fileContent,
_sfdcPrefix + 'Fulcrum_Id__c':fulcrumId,
# _sfdcPrefix + 'Location__c':fulcrumLocation
}
# 1: Insert the Content Document
ContentVersion = self.contentVersion_salesforce_create (data=ContentVersionData)
ContentVersionId = ContentVersion.get('id')
# 2: Get the ContentDocumentId from the just inserted ContentVersion
ContentVersion = self.contentVersion_salesforce_get (ContentVersionId)
ContentDocumentId = ContentVersion.get('ContentDocumentId')
# 3: Create a ContentDocumentLink between the ContentDocumentId and the Record
contentDocumentLinkMetadata = {
'ContentDocumentId': ContentDocumentId,
'LinkedEntityId': recordId,
'ShareType': 'V'
}
ContentDocumentLink = self.contentDocumentLink_salesforce_create (contentDocumentLinkMetadata)
return {'ContentVersionId' : ContentVersionId, 'ContentDocumentId' : ContentDocumentId, 'ContentDocumentLink' : ContentDocumentLink}
def process_file_fields (self, record, recordId):
#print record
newFiles = []
for fieldId in record['form_values']:
files = self.detect_file_field_type_and_process_field (fieldId, record, recordId=recordId)
#print files
if isinstance (files, dict):
newFiles.append (files)
return newFiles
def process_video_field (self, fieldValue, recordId):
print 'Downloading Video File From Fulcrum ... ' + fieldValue['video_id']
baseurl = _fulcrumBaseURL + 'videos/' + fieldValue['video_id']
blob = requests.request ('GET', baseurl + '.mp4', headers=self.fulcrumHeaders)
if blob.status_code == 200:
videoMetadata = self.fulcrum.videos.find(fieldValue['video_id'])
print 'Uploading Video File To Salesforce... ' + recordId
self.upload_file_to_salesforce_and_attach_to_record (recordId=recordId, fileTitle=fieldValue['video_id'] + ' Video', fileDescription=fieldValue['caption'], fileName=fieldValue['video_id'] + '.mp4', fileContent=blob.content, fulcrumId=fieldValue['video_id'])
blob = requests.request ('GET', baseurl + '/track.json', headers=self.fulcrumHeaders)
if blob.status_code == 200:
print 'Uploading Video Track To Salesforce... ' + recordId
self.upload_file_to_salesforce_and_attach_to_record (recordId=recordId, fileTitle=fieldValue['video_id'] + ' JSON Track', fileDescription='JSON Track Of\n' + fieldValue['caption'], fileName=fieldValue['video_id'] + '-track.json', fileContent=blob.content)
blob = requests.request ('GET', baseurl + '/track.geojson', headers=self.fulcrumHeaders)
if blob.status_code == 200:
print 'Uploading Video GeoJSON Track To Salesforce... ' + recordId
self.upload_file_to_salesforce_and_attach_to_record (recordId=recordId, fileTitle=fieldValue['video_id'] + ' GEO JSON Track', fileDescription='GeoJSON Track Of\n' + fieldValue['caption'], fileName=fieldValue['video_id'] + '-track.geojson', fileContent=blob.content)
blob = requests.request ('GET', baseurl + '/track.gpx', headers=self.fulcrumHeaders)
if blob.status_code == 200:
print 'Uploading Video GPX Track To Salesforce... ' + recordId
self.upload_file_to_salesforce_and_attach_to_record (recordId=recordId, fileTitle=fieldValue['video_id'] + ' GPX Track', fileDescription='GPX Track Track Of\n' + fieldValue['caption'], fileName=fieldValue['video_id'] + '-track.gpx', fileContent=blob.content)
blob = requests.request ('GET', baseurl + '/track.kml', headers=self.fulcrumHeaders)
if blob.status_code == 200:
print 'Uploading Video KML Track To Salesforce... ' + recordId
self.upload_file_to_salesforce_and_attach_to_record (recordId=recordId, fileTitle=fieldValue['video_id'] + ' KML Track', fileDescription='KML Track Track Of\n' + fieldValue['caption'], fileName=fieldValue['video_id'] + '-track.kml', fileContent=blob.content)
return
def process_photo_field (self, fieldValue, recordId):
print 'Downloading Photo File From Fulcrum ... ' + fieldValue['photo_id']
blob = requests.request ('GET', _fulcrumBaseURL + 'photos/' + fieldValue['photo_id'] + '.jpg', headers=self.fulcrumHeaders)
if blob.status_code == 200:
print 'Uploading Photo File To Salesforce... ' + recordId
self.upload_file_to_salesforce_and_attach_to_record (recordId=recordId, fileTitle=fieldValue['photo_id'] + ' Photo', fileDescription=fieldValue['caption'], fileName=fieldValue['photo_id'] + '.jpg', fileContent=blob.content, fulcrumId=fieldValue['photo_id'])
return
def process_signature_field (self, fieldValue, recordId):
print 'Downloading Signature File From Fulcrum ... ' + fieldValue['signature_id']
blob = requests.request ('GET', _fulcrumBaseURL + 'signature/' + fieldValue['signature_id'] + '.png', headers=self.fulcrumHeaders)
if blob.status_code == 200:
print 'Uploading Signature File To Salesforce... ' + recordId
self.upload_file_to_salesforce_and_attach_to_record (recordId=recordId, fileTitle=fieldValue['photo_id'] + ' Signature', fileDescription='Signed At: ' + fieldValue['timestamp'], fileName=fieldValue['signature_id'] + '.png', fileContent=blob.content, fulcrumId=fieldValue['signature_id'])
return
def process_audio_field (self, fieldValue, recordId):
print 'Downloading Audio File From Fulcrum ... ' + fieldValue['audio_id']
blob = requests.request ('GET', _fulcrumBaseURL + 'audio/' + fieldValue['audio_id'] + '.mp4', headers=self.fulcrumHeaders)
if blob.status_code == 200:
print 'Uploading Audio File To Salesforce... ' + recordId
self.upload_file_to_salesforce_and_attach_to_record (recordId=recordId, fileTitle=fieldValue['audio_id'] + ' Video', fileDescription=fieldValue['caption'], fileName=fieldValue['audio_id'] + '.mp4', fileContent=blob.content, fulcrumId=fieldValue['audio_id'])
blob = requests.request ('GET', _fulcrumBaseURL + 'audio/' + fieldValue['audio_id'] + '/track.json', headers=self.fulcrumHeaders)
if blob.status_code == 200:
print 'Uploading Audio Track To Salesforce... ' + recordId
self.upload_file_to_salesforce_and_attach_to_record (recordId=recordId, fileTitle=fieldValue['audio_id'] + ' JSON Track', fileDescription='JSON Track Of\n' + fieldValue['caption'], fileName=fieldValue['audio_id'] + '-track.json', fileContent=blob.content)
blob = requests.request ('GET', _fulcrumBaseURL + 'audio/' + fieldValue['audio_id'] + '/track.geojson', headers=self.fulcrumHeaders)
if blob.status_code == 200:
print 'Uploading Audio GeoJSON Track To Salesforce... ' + recordId
self.upload_file_to_salesforce_and_attach_to_record (recordId=recordId, fileTitle=fieldValue['audio_id'] + ' GEO JSON Track', fileDescription='GeoJSON Track Of\n' + fieldValue['caption'], fileName=fieldValue['audio_id'] + '-track.geojson', fileContent=blob.content)
blob = requests.request ('GET', _fulcrumBaseURL + 'audio/' + fieldValue['audio_id'] + '/track.gpx', headers=self.fulcrumHeaders)
if blob.status_code == 200:
print 'Uploading Audio GPX Track To Salesforce... ' + recordId
self.upload_file_to_salesforce_and_attach_to_record (recordId=recordId, fileTitle=fieldValue['audio_id'] + ' GPX Track', fileDescription='GPX Track Track Of\n' + fieldValue['caption'], fileName=fieldValue['audio_id'] + '-track.gpx', fileContent=blob.content)
blob = requests.request ('GET', _fulcrumBaseURL + 'audio/' + fieldValue['audio_id'] + '/track.kml', headers=self.fulcrumHeaders)
if blob.status_code == 200:
print 'Uploading Audio KML Track To Salesforce... ' + recordId
self.upload_file_to_salesforce_and_attach_to_record (recordId=recordId, fileTitle=fieldValue['audio_id'] + ' KML Track', fileDescription='KML Track Track Of\n' + fieldValue['caption'], fileName=fieldValue['audio_id'] + '-track.kml', fileContent=blob.content)
return
def process_date_field (self, fieldId, fieldValue):
#Generate Date Time
return self.process_generate_field (fieldId, fieldValue, 'Date')
def process_datetime_field (self, record, isDateField, fieldId, fieldValue):
#Generate Date Time
# Check to see if the last field processed was a Date Field
if isDateField != _isDateFieldDefault:
dateValue = record['form_values'][isDateField]
dateTimeValue = dateValue + ' ' + fieldValue
return self.process_generate_field (isDateField + '_' + fieldId, dateTimeValue, 'DateTime')
#Not paired with a Date Field
else:
return self.process_generate_field (fieldId, fieldValue, 'Time')
def process_address_and_choice_field (self, fieldId, subFieldKey, subFieldValue):
if subFieldValue == 'sub_thoroughfare':
return self.process_generate_field (fieldId + '_1', subFieldValue, 'Street Number')
elif subFieldKey == 'thoroughfare':
return self.process_generate_field (fieldId + '_2', subFieldValue, 'Street Name')
elif subFieldKey == 'suite':
return self.process_generate_field (fieldId + '_3', subFieldValue, 'Suite')
elif subFieldKey == 'locality':
return self.process_generate_field (fieldId + '_4', subFieldValue, 'City')
elif subFieldKey == 'sub_admin_area':
return self.process_generate_field (fieldId + '_5', subFieldValue, 'County')
elif subFieldKey == 'admin_area':
return self.process_generate_field (fieldId + '_6', subFieldValue, 'State/Province')
elif subFieldKey == 'postal_code':
return self.process_generate_field (fieldId + '_7', subFieldValue, 'Postal Code')
elif subFieldKey == 'country':
return self.process_generate_field (fieldId + '_8', subFieldValue, 'Country')
elif subFieldKey == 'choice_values':
choices = []
multiSelectChoices = subFieldValue[0]
for choice in subFieldValue:
choices.append (choice)
if multiSelectChoices != choice:
multiSelectChoices += ';' + choice
if len(choices) == 1:
self.process_generate_field (fieldId, choices, 'Choices')
else:
return self.process_generate_field (fieldId, multiSelectChoices, 'Multiselect Choices')
elif subFieldKey == 'other_values':
for choice in subFieldValue:
return self.process_generate_field (fieldId, choice, 'Other Choice')
# Determine the type of field and process it. This handles files.
def detect_file_field_type_and_process_field (self, fieldId, record, recordId, detail=False):
fieldValue = ''
if detail == False:
fieldValue | |
range(minBurst, maxBurst)]
#doing adjustment before use
adjustValidWithLooks([ifg], box, numberAzimuthLooks, numberRangeLooks, edge=0, avalid='strict', rvalid=np.int(np.around(numberRangeLooks/8.0)))
mergeBurstsVirtual([ifg], [bst], box, os.path.join(outputDirname, outputFilename+suffix))
#take looks
if suffix not in ['', None]:
multilook2(os.path.join(outputDirname, outputFilename+suffix),
os.path.join(outputDirname, outputFilename),
numberAzimuthLooks0,
numberRangeLooks0)
else:
print('skipping multilooking')
#The orginal coherence calculated by topsApp.py is not good at all, use the following coherence instead
lowerintfile = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.mergedDirname + '_IW{0}'.format(swath), self._insar.mergedIfgname)
upperintfile = os.path.join(ionParam.ionDirname, ionParam.upperDirname, ionParam.mergedDirname + '_IW{0}'.format(swath), self._insar.mergedIfgname)
corfile = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.mergedDirname + '_IW{0}'.format(swath), self._insar.correlationFilename)
img = isceobj.createImage()
img.load(lowerintfile + '.xml')
width = img.width
length = img.length
lowerint = np.fromfile(lowerintfile, dtype=np.complex64).reshape(length, width)
upperint = np.fromfile(upperintfile, dtype=np.complex64).reshape(length, width)
##########################################################################
#slight filtering to improve the estimation accurary of swath difference
if 1 and shutil.which('psfilt1') != None:
cmd1 = 'mv {} tmp'.format(lowerintfile)
cmd2 = 'psfilt1 tmp {} {} .3 32 8'.format(lowerintfile, width)
cmd3 = 'rm tmp'
cmd4 = 'mv {} tmp'.format(upperintfile)
cmd5 = 'psfilt1 tmp {} {} .3 32 8'.format(upperintfile, width)
cmd6 = 'rm tmp'
runCmd(cmd1)
runCmd(cmd2)
runCmd(cmd3)
runCmd(cmd4)
runCmd(cmd5)
runCmd(cmd6)
##########################################################################
#compute coherence only using interferogram
#here I use differential interferogram of lower and upper band interferograms
#so that coherence is not affected by fringes
cord = cal_coherence(lowerint*np.conjugate(upperint), win=3, edge=4)
cor = np.zeros((length*2, width), dtype=np.float32)
cor[0:length*2:2, :] = np.sqrt( (np.absolute(lowerint)+np.absolute(upperint))/2.0 )
cor[1:length*2:2, :] = cord
cor.astype(np.float32).tofile(corfile)
#create xml and vrt
#img.scheme = 'BIL'
#img.bands = 2
#img.filename = corfile
#img.renderHdr()
#img = isceobj.Image.createUnwImage()
img = isceobj.createOffsetImage()
img.setFilename(corfile)
img.extraFilename = corfile + '.vrt'
img.setWidth(width)
img.setLength(length)
img.renderHdr()
########################################################
#STEP 2. UNWRAP SWATH INTERFEROGRAM
########################################################
dirs = [ionParam.lowerDirname, ionParam.upperDirname]
#there is only one coherence file in lower directory
corfile = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.mergedDirname + '_IW{0}'.format(swath), self._insar.correlationFilename)
for dirx in dirs:
procdir = os.path.join(ionParam.ionDirname, dirx, ionParam.mergedDirname + '_IW{0}'.format(swath))
wrapName = os.path.join(procdir, self._insar.mergedIfgname)
unwrapName = os.path.join(procdir, self._insar.unwrappedIntFilename)
xmlDirname = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.fineIfgDirname)
#unwrap
snaphuUnwrap(self, xmlDirname, wrapName, corfile, unwrapName, numberRangeLooks0, numberAzimuthLooks0, costMode = 'SMOOTH',initMethod = 'MCF', defomax = 2, initOnly = True)
#remove wired things in no-data area
maskUnwrap(unwrapName, wrapName)
if [ionParam.numberRangeLooks0, ionParam.numberAzimuthLooks0] != [ionParam.numberRangeLooks, ionParam.numberAzimuthLooks]:
multilook_unw(self, ionParam, ionParam.mergedDirname + '_IW{0}'.format(swath))
########################################################
#STEP 3. COMPUTE IONOSPHERE
########################################################
#get files
lowerUnwfile = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.mergedDirname + '_IW{0}'.format(swath), self._insar.unwrappedIntFilename)
upperUnwfile = os.path.join(ionParam.ionDirname, ionParam.upperDirname, ionParam.mergedDirname + '_IW{0}'.format(swath), self._insar.unwrappedIntFilename)
corfile = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.mergedDirname + '_IW{0}'.format(swath), self._insar.correlationFilename)
#use image size from lower unwrapped interferogram
img = isceobj.createImage()
img.load(lowerUnwfile + '.xml')
width = img.width
length = img.length
lowerUnw = (np.fromfile(lowerUnwfile, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
upperUnw = (np.fromfile(upperUnwfile, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
lowerAmp = (np.fromfile(lowerUnwfile, dtype=np.float32).reshape(length*2, width))[0:length*2:2, :]
upperAmp = (np.fromfile(upperUnwfile, dtype=np.float32).reshape(length*2, width))[0:length*2:2, :]
cor = (np.fromfile(corfile, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
amp = np.sqrt(lowerAmp**2+upperAmp**2)
#compute ionosphere
fl = SPEED_OF_LIGHT / ionParam.radarWavelengthLower
fu = SPEED_OF_LIGHT / ionParam.radarWavelengthUpper
adjFlag = 1
ionos = computeIonosphere(lowerUnw, upperUnw, cor, fl, fu, adjFlag, corThresholdAdj, 0)
#dump result
outDir = os.path.join(ionParam.ionDirname, ionParam.ioncalDirname + '_IW{0}'.format(swath))
os.makedirs(outDir, exist_ok=True)
outFilename = os.path.join(outDir, ionParam.ionRawNoProj)
ion = np.zeros((length*2, width), dtype=np.float32)
ion[0:length*2:2, :] = amp
ion[1:length*2:2, :] = ionos
ion.astype(np.float32).tofile(outFilename)
img.filename = outFilename
img.extraFilename = outFilename + '.vrt'
img.renderHdr()
corList.append(cor)
ampList.append(amp)
ionosList.append(ionos)
#do adjustment between ajacent swaths
if numValidSwaths == 3:
adjustList = [ionosList[0], ionosList[2]]
else:
adjustList = [ionosList[0]]
for adjdata in adjustList:
index = np.nonzero((adjdata!=0) * (ionosList[1]!=0) * (corList[1] > corThresholdSwathAdj))
if index[0].size < 5:
print('WARNING: too few samples available for adjustment between swaths: {} with coherence threshold: {}'.format(index[0].size, corThresholdSwathAdj))
print(' no adjustment made')
print(' to do ajustment, please consider using lower coherence threshold')
else:
print('number of samples available for adjustment in the overlap area: {}'.format(index[0].size))
#diff = np.mean((ionosList[1] - adjdata)[index], dtype=np.float64)
#use weighted mean instead
wgt = corList[1][index]**14
diff = np.sum((ionosList[1] - adjdata)[index] * wgt / np.sum(wgt, dtype=np.float64), dtype=np.float64)
index2 = np.nonzero(adjdata!=0)
adjdata[index2] = adjdata[index2] + diff
#get merged ionosphere
ampMerged = np.zeros((length, width), dtype=np.float32)
corMerged = np.zeros((length, width), dtype=np.float32)
ionosMerged = np.zeros((length, width), dtype=np.float32)
for i in range(numValidSwaths):
nBurst = len(burstValidBox[i])
for j in range(nBurst):
#index after multi-looking in merged image, index starts from 1
first_line = np.int(np.around((burstValidBox[i][j][0] - 1) / numberAzimuthLooks + 1))
last_line = np.int(np.around(burstValidBox[i][j][1] / numberAzimuthLooks))
first_sample = np.int(np.around((burstValidBox[i][j][2] - 1) / numberRangeLooks + 1))
last_sample = np.int(np.around(burstValidBox[i][j][3] / numberRangeLooks))
corMerged[first_line-1:last_line-1+1, first_sample-1:last_sample-1+1] = \
corList[i][first_line-1:last_line-1+1, first_sample-1:last_sample-1+1]
ampMerged[first_line-1:last_line-1+1, first_sample-1:last_sample-1+1] = \
ampList[i][first_line-1:last_line-1+1, first_sample-1:last_sample-1+1]
ionosMerged[first_line-1:last_line-1+1, first_sample-1:last_sample-1+1] = \
ionosList[i][first_line-1:last_line-1+1, first_sample-1:last_sample-1+1]
#remove an empirical ramp
if ionParam.rampRemovel != 0:
warningInfo = '{} calculating ionosphere for cross S-1A/B interferogram, an empirical ramp is removed from estimated ionosphere\n'.format(datetime.datetime.now())
with open(os.path.join(ionParam.ionDirname, ionParam.warning), 'a') as f:
f.write(warningInfo)
abramp = cal_cross_ab_ramp(swathList, box[1], numberRangeLooks, ionParam.passDirection)
if ionParam.rampRemovel == -1:
abramp *= -1.0
#currently do not apply this
#ionosMerged -= abramp[None, :]
#dump ionosphere
outDir = os.path.join(ionParam.ionDirname, ionParam.ioncalDirname)
os.makedirs(outDir, exist_ok=True)
outFilename = os.path.join(outDir, ionParam.ionRawNoProj)
ion = np.zeros((length*2, width), dtype=np.float32)
ion[0:length*2:2, :] = ampMerged
ion[1:length*2:2, :] = ionosMerged
ion.astype(np.float32).tofile(outFilename)
img.filename = outFilename
img.extraFilename = outFilename + '.vrt'
img.renderHdr()
#dump coherence
outFilename = os.path.join(outDir, ionParam.ionCorNoProj)
ion[1:length*2:2, :] = corMerged
ion.astype(np.float32).tofile(outFilename)
img.filename = outFilename
img.extraFilename = outFilename + '.vrt'
img.renderHdr()
def multilookIndex(first, last, nl):
'''
create the index after multilooking
the orginal 1-look index can start from any number such as 0, 1 or other number
after multilooking, the index still starts from the same number.
first: index of first pixel in the original 1-look array
last: index of last pixel in the original 1-look array
nl: number of looks(nl can also be 1). nl >= 1
'''
#number of pixels after multilooking
num = int((last - first + 1)/nl)
offset = (first + (first + nl - 1)) / 2.0
index = offset + np.arange(num) * nl
return index
def computeDopplerOffset(burst, firstline, lastline, firstcolumn, lastcolumn, nrlks=1, nalks=1):
'''
compute offset corresponding to center Doppler frequency
firstline, lastline, firstcolumn, lastcolumn: index of original 1-look burst, index starts from 1.
output: first lines > 0, last lines < 0
'''
Vs = np.linalg.norm(burst.orbit.interpolateOrbit(burst.sensingMid, method='hermite').getVelocity())
Ks = 2 * Vs * burst.azimuthSteeringRate / burst.radarWavelength
#firstcolumn, lastcolumn: index starts from 1
rng = multilookIndex(firstcolumn-1, lastcolumn-1, nrlks) * burst.rangePixelSize + burst.startingRange
#firstline, lastline: index starts from 1
eta = ( multilookIndex(firstline-1, lastline-1, nalks) - (burst.numberOfLines-1.0)/2.0) * burst.azimuthTimeInterval
f_etac = burst.doppler(rng)
Ka = burst.azimuthFMRate(rng)
eta_ref = (burst.doppler(burst.startingRange) / burst.azimuthFMRate(burst.startingRange) ) - (f_etac / Ka)
Kt = Ks / (1.0 - Ks/Ka)
#carr = np.pi * Kt[None,:] * ((eta[:,None] - eta_ref[None,:])**2)
#center doppler frequency due to rotation
dopplerOffset1 = (eta[:,None] - eta_ref[None,:]) * Kt / Ka[None,:] / (burst.azimuthTimeInterval * nalks)
#center doppler frequency due to squint
dopplerOffset2 = (f_etac[None,:] / Ka[None,:]) / (burst.azimuthTimeInterval * nalks)
dopplerOffset = dopplerOffset1 + dopplerOffset2
return (dopplerOffset, Ka)
def grd2ion(self, ionParam):
from scipy import interpolate
from scipy.interpolate import interp1d
print('resampling ionosphere from ground to ionospheric layer')
#get files
corfile = os.path.join(ionParam.ionDirname, ionParam.ioncalDirname, ionParam.ionCorNoProj)
ionfile = os.path.join(ionParam.ionDirname, ionParam.ioncalDirname, ionParam.ionRawNoProj)
#use image size from lower unwrapped interferogram
img = isceobj.createImage()
img.load(corfile + '.xml')
width = img.width
length = img.length
cor = (np.fromfile(corfile, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
amp = (np.fromfile(ionfile, dtype=np.float32).reshape(length*2, width))[0:length*2:2, :]
ionos = (np.fromfile(ionfile, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
#use the satellite height of the mid burst of first swath of reference acquistion
swathList = self._insar.getValidSwathList(self.swaths)
reference = self._insar.loadProduct( os.path.join(self._insar.referenceSlcProduct, 'IW{0}.xml'.format(swathList[0])))
minBurst, maxBurst = self._insar.commonReferenceBurstLimits(swathList[0]-1)
#no problem with this index at all
midBurst = np.int(np.around((minBurst+ maxBurst-1) / 2.0))
masBurst = reference.bursts[midBurst]
#satellite height
satHeight = np.linalg.norm(masBurst.orbit.interpolateOrbit(masBurst.sensingMid, method='hermite').getPosition())
#orgininal doppler offset should be multiplied by this ratio
ratio = ionParam.ionHeight/(satHeight-ionParam.earthRadius)
xmlDirname = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.fineIfgDirname)
(box, burstValidBox, burstValidBox2, frames) = getMergeBox(self, xmlDirname, numberRangeLooks=ionParam.numberRangeLooks, numberAzimuthLooks=ionParam.numberAzimuthLooks)
##############################################################################################################
swathList = self._insar.getValidSwathList(self.swaths)
frames=[]
#for valid swaths and bursts, consistent with runMergeBursts.py
for swath in swathList:
minBurst, maxBurst = self._insar.commonReferenceBurstLimits(swath-1)
if minBurst==maxBurst:
print('Skipping processing of swath {0}'.format(swath))
continue
ifg = self._insar.loadProduct( os.path.join(xmlDirname, 'IW{0}.xml'.format(swath)))
frames.append(ifg)
##############################################################################################################
for band in [amp, ionos, cor]:
nswath = len(frames)
for i in range(nswath):
nburst = len(frames[i].bursts)
for j in range(nburst):
#according to runBurstIfg.py, this is originally from self._insar.referenceSlcProduct, 'IW{0}.xml'
masBurst = frames[i].bursts[j]
(dopplerOffset, Ka) = computeDopplerOffset(masBurst, burstValidBox2[i][j][0], burstValidBox2[i][j][1], burstValidBox2[i][j][2], burstValidBox2[i][j][3], nrlks=ionParam.numberRangeLooks, nalks=ionParam.numberAzimuthLooks)
offset = ratio * dopplerOffset
# 0 1 2 3
#firstlineAdj, lastlineAdj, firstcolumnAdj, lastcolumnAdj,
#after multiplication, index starts from 1
firstline = np.int(np.around((burstValidBox[i][j][0] - 1) / ionParam.numberAzimuthLooks + 1))
lastline = | |
Cd u0 {3,D}
8 Cd u0 {4,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)(Cds-Cds)',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 718,
label = "Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd)Ct",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Cd u0 {1,S} {8,D}
5 Ct u0 {1,S}
6 Cd u0 {2,D}
7 Cd u0 {3,D}
8 Cdd u0 {4,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-Cd)Ct',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 719,
label = "Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-O2d)Ct",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {6,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {7,D}
4 Cd u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Ct u0 {1,S}
7 Cd u0 {3,D}
8 Cd u0 {4,D}
9 O2d u0 {5,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)(Cds-Cdd-O2d)',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-S2d)Ct",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {6,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {7,D}
4 Cd u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Ct u0 {1,S}
7 Cd u0 {3,D}
8 Cd u0 {4,D}
9 S2d u0 {5,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 720,
label = "Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-Cd)Ct",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {6,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {7,D}
4 Cd u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Ct u0 {1,S}
7 Cd u0 {3,D}
8 Cd u0 {4,D}
9 C u0 {5,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)Ct',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 721,
label = "Cs-(Cds-Cds)(Cds-Cdd)(Cds-Cdd)Ct",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Cd u0 {1,S} {8,D}
5 Ct u0 {1,S}
6 Cd u0 {2,D}
7 Cdd u0 {3,D}
8 Cdd u0 {4,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Ct',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 722,
label = "Cs-(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-O2d)Ct",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {7,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Ct u0 {1,S}
8 Cd u0 {4,D}
9 O2d u0 {5,D}
10 O2d u0 {6,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-O2d)',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 723,
label = "Cs-(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-Cd)Ct",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {7,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Ct u0 {1,S}
8 Cd u0 {4,D}
9 O2d u0 {5,D}
10 C u0 {6,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-O2d)Ct',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cds)(Cds-Cdd-S2d)(Cds-Cdd-S2d)Ct",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {7,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Ct u0 {1,S}
8 Cd u0 {4,D}
9 S2d u0 {5,D}
10 S2d u0 {6,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cds)(Cds-Cdd-S2d)(Cds-Cdd-Cd)Ct",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {7,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Ct u0 {1,S}
8 Cd u0 {4,D}
9 S2d u0 {5,D}
10 C u0 {6,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 724,
label = "Cs-(Cds-Cds)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Ct",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {7,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Ct u0 {1,S}
8 Cd u0 {4,D}
9 C u0 {5,D}
10 C u0 {6,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)Ct',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 725,
label = "Cs-(Cds-Cdd)(Cds-Cdd)(Cds-Cdd)Ct",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Cd u0 {1,S} {8,D}
5 Ct u0 {1,S}
6 Cdd u0 {2,D}
7 Cdd u0 {3,D}
8 Cdd u0 {4,D}
""",
thermo = u'Cs-(Cds-Cdd-Cd)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Ct',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 726,
label = "Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)(Cds-Cdd-O2d)Ct",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {8,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cdd u0 {4,D} {11,D}
8 Ct u0 {1,S}
9 O2d u0 {5,D}
10 O2d u0 {6,D}
11 O2d u0 {7,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-O2d)(Cds-Cdd-O2d)',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 727,
label = "Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)(Cds-Cdd-Cd)Ct",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {8,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cdd u0 {4,D} {11,D}
8 Ct u0 {1,S}
9 O2d u0 {5,D}
10 O2d u0 {6,D}
11 C u0 {7,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-O2d)Ct',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 728,
label = "Cs-(Cds-Cdd-O2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Ct",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {8,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cdd u0 {4,D} {11,D}
8 Ct u0 {1,S}
9 O2d u0 {5,D}
10 C u0 {6,D}
11 C u0 {7,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-O2d)Ct',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)(Cds-Cdd-S2d)(Cds-Cdd-S2d)Ct",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {8,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cdd u0 {4,D} {11,D}
8 Ct u0 {1,S}
9 S2d u0 {5,D}
10 S2d u0 {6,D}
11 S2d u0 {7,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)(Cds-Cdd-S2d)(Cds-Cdd-Cd)Ct",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {8,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cdd u0 {4,D} {11,D}
8 Ct u0 {1,S}
9 S2d u0 {5,D}
10 S2d u0 {6,D}
11 C u0 {7,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Ct",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {8,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cdd u0 {4,D} {11,D}
8 Ct u0 {1,S}
9 S2d u0 {5,D}
10 C u0 {6,D}
11 C u0 {7,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 729,
label = "Cs-(Cds-Cdd-Cd)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Ct",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {8,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cdd u0 {4,D} {11,D}
8 Ct u0 {1,S}
9 C u0 {5,D}
10 C u0 {6,D}
11 C u0 {7,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)Ct',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 730,
label = "Cs-CbCdsCdsCds",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cb u0 {1,S}
3 [Cd,CO] u0 {1,S}
4 [Cd,CO] u0 {1,S}
5 [Cd,CO] u0 {1,S}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)Cb',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 731,
label = "Cs-(Cds-O2d)(Cds-O2d)(Cds-O2d)Cb",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CO u0 {1,S} {6,D}
3 CO u0 {1,S} {7,D}
4 CO u0 {1,S} {8,D}
5 Cb u0 {1,S}
6 O2d u0 {2,D}
7 O2d u0 {3,D}
8 O2d u0 {4,D}
""",
thermo = u'Cs-(Cds-O2d)(Cds-O2d)(Cds-O2d)(Cds-Cds)',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 732,
label = "Cs-(Cds-O2d)(Cds-O2d)(Cds-Cd)Cb",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CO u0 {1,S} {7,D}
3 CO u0 {1,S} {8,D}
4 Cd u0 {1,S} {6,D}
5 Cb u0 {1,S}
6 C u0 {4,D}
7 O2d u0 {2,D}
8 O2d u0 {3,D}
""",
thermo = u'Cs-(Cds-O2d)(Cds-O2d)(Cds-Cds)Cb',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 733,
label = "Cs-(Cds-O2d)(Cds-O2d)(Cds-Cds)Cb",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CO u0 {1,S} {7,D}
3 CO u0 {1,S} {8,D}
4 Cd u0 {1,S} {6,D}
5 Cb u0 {1,S}
6 Cd u0 {4,D}
7 O2d u0 {2,D}
8 O2d u0 {3,D}
""",
thermo = u'Cs-(Cds-O2d)(Cds-O2d)(Cds-Cds)(Cds-Cds)',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 734,
label = "Cs-(Cds-O2d)(Cds-O2d)(Cds-Cdd)Cb",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CO u0 {1,S} {7,D}
3 CO u0 {1,S} {8,D}
4 Cd u0 {1,S} {6,D}
5 Cb u0 {1,S}
6 Cdd u0 {4,D}
7 O2d u0 {2,D}
8 O2d u0 {3,D}
""",
thermo = u'Cs-(Cds-O2d)(Cds-O2d)(Cds-Cdd-Cd)Cb',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 735,
label = "Cs-(Cds-O2d)(Cds-O2d)(Cds-Cdd-O2d)Cb",
group =
"""
1 * Cs u0 {2,S} {3,S} | |
3 floats`)
"""
return _robotsim.TransformPoser_set(self, R, t)
def get(self):
"""
"""
return _robotsim.TransformPoser_get(self)
def enableTranslation(self, arg2):
"""
Args:
arg2 (bool)
"""
return _robotsim.TransformPoser_enableTranslation(self, arg2)
def enableRotation(self, arg2):
"""
Args:
arg2 (bool)
"""
return _robotsim.TransformPoser_enableRotation(self, arg2)
__swig_destroy__ = _robotsim.delete_TransformPoser
__del__ = lambda self: None
TransformPoser_swigregister = _robotsim.TransformPoser_swigregister
TransformPoser_swigregister(TransformPoser)
class ObjectPoser(Widget):
"""
"""
__swig_setmethods__ = {}
for _s in [Widget]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, ObjectPoser, name, value)
__swig_getmethods__ = {}
for _s in [Widget]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, ObjectPoser, name)
__repr__ = _swig_repr
def __init__(self, object):
"""
Args:
object (:class:`~klampt.RigidObjectModel`)
Returns:
(:obj:`ObjectPoser`):
"""
this = _robotsim.new_ObjectPoser(object)
try:
self.this.append(this)
except Exception:
self.this = this
def set(self, R, t):
"""
Args:
R (:obj:`list of 9 floats (so3 element)`)
t (:obj:`list of 3 floats`)
"""
return _robotsim.ObjectPoser_set(self, R, t)
def get(self):
"""
"""
return _robotsim.ObjectPoser_get(self)
__swig_destroy__ = _robotsim.delete_ObjectPoser
__del__ = lambda self: None
ObjectPoser_swigregister = _robotsim.ObjectPoser_swigregister
ObjectPoser_swigregister(ObjectPoser)
class RobotPoser(Widget):
"""
"""
__swig_setmethods__ = {}
for _s in [Widget]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, RobotPoser, name, value)
__swig_getmethods__ = {}
for _s in [Widget]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, RobotPoser, name)
__repr__ = _swig_repr
def __init__(self, robot):
"""
Args:
robot (:class:`~klampt.RobotModel`)
Returns:
(:obj:`RobotPoser`):
"""
this = _robotsim.new_RobotPoser(robot)
try:
self.this.append(this)
except Exception:
self.this = this
def setActiveDofs(self, dofs):
"""
Args:
dofs (:obj:`list of int`)
"""
return _robotsim.RobotPoser_setActiveDofs(self, dofs)
def set(self, q):
"""
Args:
q (:obj:`list of floats`)
"""
return _robotsim.RobotPoser_set(self, q)
def get(self):
"""
"""
return _robotsim.RobotPoser_get(self)
def getConditioned(self, qref):
"""
Args:
qref (:obj:`list of floats`)
"""
return _robotsim.RobotPoser_getConditioned(self, qref)
def addIKConstraint(self, obj):
"""
Args:
obj (:obj:`IKObjective`)
"""
return _robotsim.RobotPoser_addIKConstraint(self, obj)
def clearIKConstraints(self):
"""
"""
return _robotsim.RobotPoser_clearIKConstraints(self)
__swig_destroy__ = _robotsim.delete_RobotPoser
__del__ = lambda self: None
RobotPoser_swigregister = _robotsim.RobotPoser_swigregister
RobotPoser_swigregister(RobotPoser)
class Mass(_object):
"""
Stores mass information for a rigid body or robot link. Note: you should use the
set/get functions rather than changing the members directly due to strangeness
in SWIG's handling of vectors.
C++ includes: robotmodel.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Mass, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Mass, name)
__repr__ = _swig_repr
def setMass(self, _mass):
"""
Args:
_mass (float)
"""
return _robotsim.Mass_setMass(self, _mass)
def getMass(self):
"""
Returns:
(float):
"""
return _robotsim.Mass_getMass(self)
def setCom(self, _com):
"""
Args:
_com (:obj:`list of floats`)
"""
return _robotsim.Mass_setCom(self, _com)
def getCom(self):
"""
"""
return _robotsim.Mass_getCom(self)
def setInertia(self, _inertia):
"""
Args:
_inertia (:obj:`list of floats`)
"""
return _robotsim.Mass_setInertia(self, _inertia)
def getInertia(self):
"""
"""
return _robotsim.Mass_getInertia(self)
__swig_setmethods__["mass"] = _robotsim.Mass_mass_set
__swig_getmethods__["mass"] = _robotsim.Mass_mass_get
if _newclass:
mass = _swig_property(_robotsim.Mass_mass_get, _robotsim.Mass_mass_set)
__swig_setmethods__["com"] = _robotsim.Mass_com_set
__swig_getmethods__["com"] = _robotsim.Mass_com_get
if _newclass:
com = _swig_property(_robotsim.Mass_com_get, _robotsim.Mass_com_set)
__swig_setmethods__["inertia"] = _robotsim.Mass_inertia_set
__swig_getmethods__["inertia"] = _robotsim.Mass_inertia_get
if _newclass:
inertia = _swig_property(_robotsim.Mass_inertia_get, _robotsim.Mass_inertia_set)
def __init__(self):
"""
Stores mass information for a rigid body or robot link. Note: you should use the
set/get functions rather than changing the members directly due to strangeness
in SWIG's handling of vectors.
Returns:
(:class:`~klampt.Mass`):
C++ includes: robotmodel.h
"""
this = _robotsim.new_Mass()
try:
self.this.append(this)
except Exception:
self.this = this
__swig_destroy__ = _robotsim.delete_Mass
__del__ = lambda self: None
Mass_swigregister = _robotsim.Mass_swigregister
Mass_swigregister(Mass)
class ContactParameters(_object):
"""
Stores contact parameters for an entity. Currently only used for simulation, but
could be used for contact mechanics in the future.
C++ includes: robotmodel.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ContactParameters, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ContactParameters, name)
__repr__ = _swig_repr
__swig_setmethods__["kFriction"] = _robotsim.ContactParameters_kFriction_set
__swig_getmethods__["kFriction"] = _robotsim.ContactParameters_kFriction_get
if _newclass:
kFriction = _swig_property(_robotsim.ContactParameters_kFriction_get, _robotsim.ContactParameters_kFriction_set)
__swig_setmethods__["kRestitution"] = _robotsim.ContactParameters_kRestitution_set
__swig_getmethods__["kRestitution"] = _robotsim.ContactParameters_kRestitution_get
if _newclass:
kRestitution = _swig_property(_robotsim.ContactParameters_kRestitution_get, _robotsim.ContactParameters_kRestitution_set)
__swig_setmethods__["kStiffness"] = _robotsim.ContactParameters_kStiffness_set
__swig_getmethods__["kStiffness"] = _robotsim.ContactParameters_kStiffness_get
if _newclass:
kStiffness = _swig_property(_robotsim.ContactParameters_kStiffness_get, _robotsim.ContactParameters_kStiffness_set)
__swig_setmethods__["kDamping"] = _robotsim.ContactParameters_kDamping_set
__swig_getmethods__["kDamping"] = _robotsim.ContactParameters_kDamping_get
if _newclass:
kDamping = _swig_property(_robotsim.ContactParameters_kDamping_get, _robotsim.ContactParameters_kDamping_set)
def __init__(self):
"""
Stores contact parameters for an entity. Currently only used for simulation, but
could be used for contact mechanics in the future.
Returns:
(:class:`~klampt.ContactParameters`):
C++ includes: robotmodel.h
"""
this = _robotsim.new_ContactParameters()
try:
self.this.append(this)
except Exception:
self.this = this
__swig_destroy__ = _robotsim.delete_ContactParameters
__del__ = lambda self: None
ContactParameters_swigregister = _robotsim.ContactParameters_swigregister
ContactParameters_swigregister(ContactParameters)
class RobotModelLink(_object):
"""
A reference to a link of a RobotModel.
The link stores many mostly-constant items (id, name, parent, geometry,
appearance, mass, joint axes). There are two exceptions:
* the link's current transform, which is affected by the RobotModel's current
configuration, i.e., the last :meth:`RobotModel.setConfig` (q) call.
* The various Jacobians of points on the link, accessed by
:meth:`RobotModelLink.getJacobian` ,
:meth:`RobotModelLink.getPositionJacobian` , and
:meth:`RobotModelLink.getOrientationJacobian` , which are configuration
dependent.
A RobotModelLink is not created by hand, but instead accessed using
:meth:`RobotModel.link` (index or name)
C++ includes: robotmodel.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, RobotModelLink, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, RobotModelLink, name)
__repr__ = _swig_repr
def __init__(self):
"""
Returns:
(:class:`~klampt.RobotModelLink`):
"""
this = _robotsim.new_RobotModelLink()
try:
self.this.append(this)
except Exception:
self.this = this
def getID(self):
"""
Returns the ID of the robot link in its world (Note: not the same as getIndex())
Returns:
(int):
"""
return _robotsim.RobotModelLink_getID(self)
def getName(self):
"""
Returns the name of the robot link.
Returns:
(str):
"""
return _robotsim.RobotModelLink_getName(self)
def setName(self, name):
"""
Sets the name of the robot link.
Args:
name (str)
"""
return _robotsim.RobotModelLink_setName(self, name)
def robot(self):
"""
Returns a reference to the link's robot.
Returns:
(:class:`~klampt.RobotModel`):
"""
return _robotsim.RobotModelLink_robot(self)
def getIndex(self):
"""
Returns the index of the link (on its robot).
Returns:
(int):
"""
return _robotsim.RobotModelLink_getIndex(self)
def getParent(self):
"""
Returns the index of the link's parent (on its robot).
Returns:
(int):
"""
return _robotsim.RobotModelLink_getParent(self)
def parent(self):
"""
Returns a reference to the link's parent, or a NULL link if it has no parent.
Returns:
(:class:`~klampt.RobotModelLink`):
"""
return _robotsim.RobotModelLink_parent(self)
def setParent(self, *args):
"""
Sets the link's parent (must be on the same robot).
setParent (p)
setParent (l)
Args:
p (int, optional):
l (:class:`~klampt.RobotModelLink`, optional):
"""
return _robotsim.RobotModelLink_setParent(self, *args)
def geometry(self):
"""
Returns a reference to the link's geometry.
Returns:
(:class:`~klampt.Geometry3D`):
"""
return _robotsim.RobotModelLink_geometry(self)
def appearance(self):
"""
Returns a reference to the link's appearance.
Returns:
(:class:`~klampt.Appearance`):
"""
return _robotsim.RobotModelLink_appearance(self)
def getMass(self):
"""
Retrieves the inertial properties of the link. (Note that the Mass is given with
origin at the link frame, not about the COM.)
Returns:
(:class:`~klampt.Mass`):
"""
return _robotsim.RobotModelLink_getMass(self)
def setMass(self, mass):
"""
Sets the inertial proerties of the link. (Note that the Mass is given with
origin at the link frame, not about the COM.)
Args:
mass (:class:`~klampt.Mass`)
"""
return _robotsim.RobotModelLink_setMass(self, mass)
def getParentTransform(self):
"""
Gets transformation (R,t) to the parent link.
"""
return _robotsim.RobotModelLink_getParentTransform(self)
def setParentTransform(self, R, t):
"""
Sets transformation (R,t) to the parent link.
Args:
R (:obj:`list of 9 floats (so3 element)`)
t (:obj:`list of 3 floats`)
"""
return _robotsim.RobotModelLink_setParentTransform(self, R, t)
def getAxis(self):
"""
Gets the local rotational / translational axis.
"""
return _robotsim.RobotModelLink_getAxis(self)
def setAxis(self, axis):
"""
Sets the local rotational / translational axis.
Args:
axis (:obj:`list of 3 floats`)
"""
return _robotsim.RobotModelLink_setAxis(self, axis)
def getWorldPosition(self, plocal):
"""
Converts point from local to world coordinates.
Args:
plocal (:obj:`list of 3 floats`)
"""
return _robotsim.RobotModelLink_getWorldPosition(self, plocal)
def getWorldDirection(self, vlocal):
"""
Converts direction from local to world coordinates.
Args:
vlocal (:obj:`list of 3 floats`)
"""
return _robotsim.RobotModelLink_getWorldDirection(self, vlocal)
def getLocalPosition(self, pworld):
"""
Converts point from world to local coordinates.
Args:
pworld (:obj:`list of 3 floats`)
"""
return _robotsim.RobotModelLink_getLocalPosition(self, pworld)
def getLocalDirection(self, vworld):
"""
Converts direction from world to local coordinates.
Args:
vworld (:obj:`list of 3 floats`)
"""
return _robotsim.RobotModelLink_getLocalDirection(self, vworld)
def getTransform(self):
"""
Gets transformation (R,t) to the world frame.
"""
return _robotsim.RobotModelLink_getTransform(self)
def setTransform(self, R, t):
"""
Sets transformation (R,t) to the world frame. Note: this does NOT perform
inverse kinematics. The transform is overwritten when the robot's setConfig()
method is called.
Args:
R (:obj:`list of | |
# }
# ]
#
return self.parse_markets(response)
else:
raise NotSupported(self.id + ' fetchMarketsByType does not support market type ' + type)
async def fetch_currencies(self, params={}):
# has['fetchCurrencies'] is currently set to False
# despite that their docs say these endpoints are public:
# https://www.okex.com/api/account/v3/withdrawal/fee
# https://www.okex.com/api/account/v3/currencies
# it will still reply with {"code":30001, "message": "OK-ACCESS-KEY header is required"}
# if you attempt to access it without authentication
response = await self.accountGetCurrencies(params)
#
# [
# {
# name: '',
# currency: 'BTC',
# can_withdraw: '1',
# can_deposit: '1',
# min_withdrawal: '0.0100000000000000'
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'currency')
code = self.safe_currency_code(id)
precision = 8 # default precision, todo: fix "magic constants"
name = self.safe_string(currency, 'name')
canDeposit = self.safe_integer(currency, 'can_deposit')
canWithdraw = self.safe_integer(currency, 'can_withdraw')
active = True if (canDeposit and canWithdraw) else False
result[code] = {
'id': id,
'code': code,
'info': currency,
'type': None,
'name': name,
'active': active,
'fee': None, # todo: redesign
'precision': precision,
'limits': {
'amount': {'min': None, 'max': None},
'price': {'min': None, 'max': None},
'cost': {'min': None, 'max': None},
'withdraw': {
'min': self.safe_float(currency, 'min_withdrawal'),
'max': None,
},
},
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentId'
method += 'Depth' if (market['type'] == 'swap') else 'Book'
request = {
'instrument_id': market['id'],
}
if limit is not None:
request['size'] = limit # max 200
response = await getattr(self, method)(self.extend(request, params))
#
# { asks: [["0.02685268", "0.242571", "1"],
# ["0.02685493", "0.164085", "1"],
# ...
# ["0.02779", "1.039", "1"],
# ["0.027813", "0.0876", "1"] ],
# bids: [["0.02684052", "10.371849", "1"],
# ["0.02684051", "3.707", "4"],
# ...
# ["0.02634963", "0.132934", "1"],
# ["0.02634962", "0.264838", "2"] ],
# timestamp: "2018-12-17T20:24:16.159Z" }
#
timestamp = self.parse8601(self.safe_string(response, 'timestamp'))
return self.parse_order_book(response, timestamp)
def parse_ticker(self, ticker, market=None):
#
# { best_ask: "0.02665472",
# best_bid: "0.02665221",
# instrument_id: "ETH-BTC",
# product_id: "ETH-BTC",
# last: "0.02665472",
# ask: "0.02665472", # missing in the docs
# bid: "0.02665221", # not mentioned in the docs
# open_24h: "0.02645482",
# high_24h: "0.02714633",
# low_24h: "0.02614109",
# base_volume_24h: "572298.901923",
# timestamp: "2018-12-17T21:20:07.856Z",
# quote_volume_24h: "15094.86831261" }
#
timestamp = self.parse8601(self.safe_string(ticker, 'timestamp'))
symbol = None
marketId = self.safe_string(ticker, 'instrument_id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
elif marketId is not None:
parts = marketId.split('-')
numParts = len(parts)
if numParts == 2:
baseId, quoteId = parts
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = marketId
if (symbol is None) and (market is not None):
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
open = self.safe_float(ticker, 'open_24h')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high_24h'),
'low': self.safe_float(ticker, 'low_24h'),
'bid': self.safe_float(ticker, 'best_bid'),
'bidVolume': self.safe_float(ticker, 'best_bid_size'),
'ask': self.safe_float(ticker, 'best_ask'),
'askVolume': self.safe_float(ticker, 'best_ask_size'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'base_volume_24h'),
'quoteVolume': self.safe_float(ticker, 'quote_volume_24h'),
'info': ticker,
}
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentIdTicker'
request = {
'instrument_id': market['id'],
}
response = await getattr(self, method)(self.extend(request, params))
#
# { best_ask: "0.02665472",
# best_bid: "0.02665221",
# instrument_id: "ETH-BTC",
# product_id: "ETH-BTC",
# last: "0.02665472",
# ask: "0.02665472",
# bid: "0.02665221",
# open_24h: "0.02645482",
# high_24h: "0.02714633",
# low_24h: "0.02614109",
# base_volume_24h: "572298.901923",
# timestamp: "2018-12-17T21:20:07.856Z",
# quote_volume_24h: "15094.86831261" }
#
return self.parse_ticker(response)
async def fetch_tickers_by_type(self, type, symbols=None, params={}):
await self.load_markets()
method = type + 'GetInstrumentsTicker'
response = await getattr(self, method)(params)
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
async def fetch_tickers(self, symbols=None, params={}):
defaultType = self.safe_string_2(self.options, 'fetchTickers', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
return await self.fetch_tickers_by_type(type, symbols, self.omit(params, 'type'))
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# spot trades
#
# {
# time: "2018-12-17T23:31:08.268Z",
# timestamp: "2018-12-17T23:31:08.268Z",
# trade_id: "409687906",
# price: "0.02677805",
# size: "0.923467",
# side: "sell"
# }
#
# futures trades, swap trades
#
# {
# trade_id: "1989230840021013",
# side: "buy",
# price: "92.42",
# qty: "184", # missing in swap markets
# size: "5", # missing in futures markets
# timestamp: "2018-12-17T23:26:04.613Z"
# }
#
# fetchOrderTrades(private)
#
# spot trades, margin trades
#
# {
# "created_at":"2019-03-15T02:52:56.000Z",
# "exec_type":"T", # whether the order is taker or maker
# "fee":"0.00000082",
# "instrument_id":"BTC-USDT",
# "ledger_id":"3963052721",
# "liquidity":"T", # whether the order is taker or maker
# "order_id":"2482659399697408",
# "price":"3888.6",
# "product_id":"BTC-USDT",
# "side":"buy",
# "size":"0.00055306",
# "timestamp":"2019-03-15T02:52:56.000Z"
# },
#
# futures trades, swap trades
#
# {
# "trade_id":"197429674631450625",
# "instrument_id":"EOS-USD-SWAP",
# "order_id":"6a-7-54d663a28-0",
# "price":"3.633",
# "order_qty":"1.0000",
# "fee":"-0.000551",
# "created_at":"2019-03-21T04:41:58.0Z", # missing in swap trades
# "timestamp":"2019-03-25T05:56:31.287Z", # missing in futures trades
# "exec_type":"M", # whether the order is taker or maker
# "side":"short", # "buy" in futures trades
# }
#
symbol = None
marketId = self.safe_string(trade, 'instrument_id')
base = None
quote = None
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
base = market['base']
quote = market['quote']
elif marketId is not None:
parts = marketId.split('-')
numParts = len(parts)
if numParts == 2:
baseId, quoteId = parts
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = marketId
if (symbol is None) and (market is not None):
symbol = market['symbol']
base = market['base']
quote = market['quote']
timestamp = self.parse8601(self.safe_string_2(trade, 'timestamp', 'created_at'))
price = self.safe_float(trade, 'price')
amount = self.safe_float_2(trade, 'size', 'qty')
amount = self.safe_float(trade, 'order_qty', amount)
takerOrMaker = self.safe_string_2(trade, 'exec_type', 'liquidity')
if takerOrMaker == 'M':
takerOrMaker = 'maker'
elif takerOrMaker == 'T':
takerOrMaker = 'taker'
side = self.safe_string(trade, 'side')
cost = None
if amount is not None:
if price is not None:
cost = amount * price
feeCost = self.safe_float(trade, 'fee')
fee = None
if feeCost is not None:
feeCurrency = base if (side == 'buy') else quote
fee = {
# fee is either a positive number(invitation rebate)
# or a negative number(transaction fee deduction)
# therefore we need to invert the fee
# more about it https://github.com/ccxt/ccxt/issues/5909
'cost': -feeCost,
'currency': feeCurrency,
}
orderId = self.safe_string(trade, 'order_id')
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': self.safe_string_2(trade, 'trade_id', 'ledger_id'),
'order': orderId,
'type': None,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentIdTrades'
if (limit is None) or (limit > 100):
limit = 100 # maximum = default = 100
request = {
'instrument_id': market['id'],
'limit': limit,
# from: 'id',
# to: 'id',
}
response = await getattr(self, method)(self.extend(request, params))
#
# spot markets
#
# [
# {
# time: "2018-12-17T23:31:08.268Z",
# timestamp: "2018-12-17T23:31:08.268Z",
# trade_id: "409687906",
# price: "0.02677805",
# size: "0.923467",
# side: "sell"
# }
# ]
#
# futures markets, swap markets
#
# [
# {
# trade_id: "1989230840021013",
# side: "buy",
# price: "92.42",
# qty: "184", # missing in swap markets
# size: "5", # missing in futures markets
# timestamp: "2018-12-17T23:26:04.613Z"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# spot markets
#
# {
# close: "0.02684545",
# high: "0.02685084",
# low: "0.02683312",
# open: "0.02683894",
# time: "2018-12-17T20:28:00.000Z",
# volume: "101.457222"
# }
#
# futures markets
#
# [
# 1545072720000,
# 0.3159,
# 0.3161,
# 0.3144,
# 0.3149,
# 22886,
# 725179.26172331,
# ]
#
if isinstance(ohlcv, list):
numElements = len(ohlcv)
volumeIndex = 6 if (numElements > 6) else 5
timestamp = self.safe_value(ohlcv, 0)
if isinstance(timestamp, basestring):
timestamp = self.parse8601(timestamp)
return [
timestamp, # timestamp
self.safe_float(ohlcv, 1), # Open
self.safe_float(ohlcv, 2), # High
self.safe_float(ohlcv, 3), # Low
self.safe_float(ohlcv, 4), # Close
# self.safe_float(ohlcv, 5), # Quote Volume
# self.safe_float(ohlcv, 6), # Base Volume
| |
constraints.append(Constraint(
"Coincident",seg,EndPoint,seg2,EndPoint))
if obj.isDerivedFrom("Part::Feature"):
objs = [obj]
while objs:
obj = objs[0]
objs = objs[1:] + obj.OutList
doc.removeObject(obj.Name)
nobj.addConstraint(constraints)
recomputeObj(nobj)
return nobj
def _makeCompound(self,obj,name,label=None,fit_arcs=False,
fuse=False,add_feature=False,force=False):
obj = unpack(obj)
if not isinstance(obj,(list,tuple)):
if not force and (
not fuse or obj.TypeId=='Path::FeatureArea'):
return obj
obj = [obj]
if fuse:
return self._makeArea(obj,name,label=label,fit_arcs=fit_arcs)
if add_feature or self.add_feature:
return self._makeObject('Part::Compound',
'{}_combo'.format(name),label,'Links',obj)
return Part.makeCompound(obj)
def _makeArea(self,obj,name,offset=0,op=0,fill=None,label=None,
force=False,fit_arcs=False,reorient=False,workplane=False):
if fill is None:
fill = 2
elif fill:
fill = 1
else:
fill = 0
if not isinstance(obj,(list,tuple)):
obj = (obj,)
if self.add_feature:
if not force and obj[0].TypeId == 'Path::FeatureArea' and (
obj[0].Operation == op or len(obj[0].Sources)==1) and \
obj[0].Fill == fill:
ret = obj[0]
if len(obj) > 1:
ret.Sources = list(ret.Sources) + list(obj[1:])
else:
ret = self._makeObject('Path::FeatureArea',
'{}_area'.format(name),label)
ret.Sources = obj
ret.Operation = op
ret.Fill = fill
ret.Offset = offset
ret.Coplanar = 0
if workplane:
ret.WorkPlane = self.work_plane
ret.FitArcs = fit_arcs
ret.Reorient = reorient
for o in obj:
o.ViewObject.Visibility = False
recomputeObj(ret)
else:
ret = Path.Area(Fill=fill,FitArcs=fit_arcs,Coplanar=0)
if workplane:
ret.setPlane(self.work_plane)
for o in obj:
ret.add(o,op=op)
if offset:
ret = ret.makeOffset(offset=offset)
else:
ret = ret.getShape()
return ret
def _makeWires(self,obj,name,offset=0,fill=False,label=None,
fit_arcs=False,workplane=False):
if self.add_feature:
if self.make_sketch:
obj = self._makeSketch(obj,name,label)
elif isinstance(obj,Part.Shape):
obj = self._makeObject('Part::Feature', '{}_wire'.format(name),
label,'Shape',obj)
elif isinstance(obj,(list,tuple)):
objs = []
comp = []
for o in obj:
if isinstance(o,Part.Shape):
comp.append(o)
else:
objs.append(o)
if comp:
comp = Part.makeCompound(comp)
objs.append(self._makeObject('Part::Feature',
'{}_wire'.format(name),label,'Shape',comp))
obj = objs
if fill or offset:
return self._makeArea(obj,name,offset=offset,fill=fill,
fit_arcs=fit_arcs,label=label,workplane=workplane)
else:
return self._makeCompound(obj,name,label=label)
def _makeSolid(self,obj,name,height,label=None,fit_arcs=True):
obj = self._makeCompound(obj,name,label=label,
fuse=True,fit_arcs=fit_arcs)
if not self.add_feature:
return obj.extrude(Vector(0,0,height))
nobj = self._makeObject('Part::Extrusion',
'{}_solid'.format(name),label)
nobj.Base = obj
nobj.Dir = Vector(0,0,height)
obj.ViewObject.Visibility = False
recomputeObj(nobj)
return nobj
def _makeFuse(self,objs,name,label=None,force=False):
obj = unpack(objs)
if not isinstance(obj,(list,tuple)):
if not force:
return obj
obj = [obj]
name = '{}_fuse'.format(name)
if self.add_feature:
self._log('making fuse {}...',name)
obj = self._makeObject('Part::MultiFuse',name,label,'Shapes',obj)
self._log('fuse done')
return obj
solids = []
for o in obj:
solids += o.Solids;
if solids:
self._log('making fuse {}...',name)
obj = solids[0].multiFuse(solids[1:])
self._log('fuse done')
return obj
def _makeCut(self,base,tool,name,label=None):
base = self._makeFuse(base,name,label=label)
tool = self._makeFuse(tool,'drill',label=label)
name = '{}_drilled'.format(name)
self._log('making cut {}...',name)
if self.add_feature:
cut = self._makeObject('Part::Cut',name,label=label)
cut.Base = base
cut.Tool = tool
base.ViewObject.Visibility = False
tool.ViewObject.Visibility = False
recomputeObj(cut)
cut.ViewObject.ShapeColor = base.ViewObject.ShapeColor
else:
cut = base.cut(tool)
self._log('cut done')
return cut
def _place(self,obj,pos,angle=None):
if not self.add_feature:
if angle:
obj.rotate(Vector(),Vector(0,0,1),angle)
obj.translate(pos)
else:
r = Rotation(Vector(0,0,1),angle) if angle else Rotation()
obj.Placement = Placement(pos,r)
obj.purgeTouched()
def makeBoard(self,shape_type='solid',thickness=None,fit_arcs=True,
holes=True, minHoleSize=0,ovalHole=True,prefix=''):
edges = []
try:
# get layer name for Edge.Cuts
_,layer = self.findLayer(44)
except Exception:
raise RuntimeError('No Edge.Cuts layer found')
self._pushLog('making board...',prefix=prefix)
self._log('making {} lines',len(self.pcb.gr_line))
for l in self.pcb.gr_line:
if l.layer != layer:
continue
edges.append([l.width,
Part.makeLine(makeVect(l.start),makeVect(l.end))])
self._log('making {} arcs',len(self.pcb.gr_arc))
for l in self.pcb.gr_arc:
if l.layer != layer:
continue
# for gr_arc, 'start' is actual the center, and 'end' is the start
edges.append([l.width,
makeArc(makeVect(l.start),makeVect(l.end),l.angle)])
if hasattr(self.pcb,'gr_curve'):
self._log('making {} curves',len(self.pcb.gr_curve))
for l in self.pcb.gr_curve:
if l.layer != layer:
continue
edges.append([l.width,
makeCurve([makeVect(p) for p in SexpList(l.pts.xy)])])
if not edges:
self._popLog('no board edges found')
return
# The line width in edge cuts are important. When milling, the line
# width can represent the diameter of the drill bits to use. The user
# can use lines thick enough for hole cutting. In addition, the
# endpoints of thick lines do not have to coincide to complete a loop.
#
# Therefore, we shall use the line width as tolerance to detect closed
# wires. And for non-closed wires, if the shape_type is not wire, we
# shall thicken the wire using Path.Area for hole cutting.
for info in edges:
w,e = info
e.fixTolerance(w)
info += [e.firstVertex().Point,e.lastVertex().Point]
non_closed = defaultdict(list)
wires = []
while edges:
w,e,pstart,pend = edges.pop(-1)
wstart = wend = w
elist = [(w,e)]
closed = False
i = 0
while i < len(edges):
w,e,ps,pe = edges[i]
if pstart.distanceToPoint(ps) < (wstart+w)/2:
e.reverse()
pstart = pe
wstart = w
elist.insert(0,(w,e))
elif pstart.distanceToPoint(pe) < (wstart+w)/2:
pstart = ps
wstart = w
elist.insert(0,(w,e))
elif pend.distanceToPoint(ps) < (wend+w)/2:
e.reverse()
pend = pe
wend = w
elist.append((w,e))
elif pend.distanceToPoint(pe) < (wend+w)/2:
pend = ps
wend = w
elist.append((w,e))
else:
i += 1
continue
edges.pop(i)
i = 0
if pstart.distanceToPoint(pend) < (wstart+wend)/2:
closed = True
break
wire = None
try:
# tol = max([o[0] for o in elist])
# wire = Part.makeWires([o[1] for o in elist],'',tol,True)
wire = Part.Wire([o[1] for o in elist])
# wire.fixWire(None,tol)
# wire.fix(tol,tol,tol)
except Exception:
pass
if closed and (not wire or not wire.isClosed()):
logger.warning('wire not closed')
closed = False
if wire and closed:
wires.append(wire)
else:
for w,e in elist:
non_closed[w].append(e)
if not thickness:
thickness = self.pcb.general.thickness
def _addHoles(objs):
h = self._cutHoles(None,holes,None,
minSize=minHoleSize,oval=ovalHole)
if isinstance(h,(tuple,list)):
objs += h
elif holes:
objs.append(h)
return objs
def _wire():
objs = []
if wires:
objs.append(self._makeWires(wires,'board'))
for width,edges in iteritems(non_closed):
objs.append(self._makeWires(edges,'board',label=width))
return self._makeCompound(_addHoles(objs),'board')
def _face():
if not wires:
raise RuntimeError('no closed wire')
# Pick the wire with the largest area as outline
areas = [ Part.Face(w).Area for w in wires ]
outer = wires.pop(areas.index(max(areas)))
objs = [ self._makeWires(outer,'board',label='outline') ]
if wires:
objs.append(self._makeWires(wires,'board',label='inner'))
for width,elist in iteritems(non_closed):
wire = self._makeWires(elist,'board',label=width)
# thicken non closed wire for hole cutting
objs.append(self._makeArea(wire,'board',label=width,
offset = width*0.5))
return self._makeArea(_addHoles(objs),'board',
op=1,fill=True,fit_arcs=fit_arcs)
def _solid():
return self._makeSolid(_face(),'board',thickness,
fit_arcs = fit_arcs)
try:
func = locals()['_{}'.format(shape_type)]
except KeyError:
raise ValueError('invalid shape type: {}'.format(shape_type))
obj = func()
if self.add_feature:
if hasattr(obj.ViewObject,'MapFaceColor'):
obj.ViewObject.MapFaceColor = False
obj.ViewObject.ShapeColor = self.colors['board']
self._popLog('board done')
fitView();
return obj
def makeHoles(self,shape_type='wire',minSize=0,maxSize=0,
oval=False,prefix='',offset=0.0,npth=0,thickness=None):
self._pushLog('making holes...',prefix=prefix)
holes = defaultdict(list)
ovals = defaultdict(list)
width=0
def _wire(obj,name,fill=False):
return self._makeWires(obj,name,fill=fill,label=width)
def _face(obj,name):
return _wire(obj,name,True)
def _solid(obj,name):
return self._makeWires(obj,name,fill=True,label=width,fit_arcs=True)
try:
func = locals()['_{}'.format(shape_type)]
except KeyError:
raise ValueError('invalid shape type: {}'.format(shape_type))
oval_count = 0
count = 0
skip_count = 0
if not offset:
offset = self.hole_size_offset;
for m in self.pcb.module:
m_at,m_angle = getAt(m.at)
for p in m.pad:
if 'drill' not in p:
continue
if self.filterNets(p):
skip_count += 1
continue
if p[1]=='np_thru_hole':
if npth<0:
skip_count += 1
continue
ofs = abs(offset)
else:
if npth>0:
skip_count += 1
continue
ofs = -abs(offset)
if p.drill.oval:
if not oval:
continue
size = Vector(p.drill[0],p.drill[1])
w = make_oval(size+Vector(ofs,ofs))
ovals[min(size.x,size.y)].append(w)
oval_count += 1
elif p.drill[0]>=minSize and \
(not maxSize or p.drill[0]<=maxSize):
w = make_circle(Vector(p.drill[0]+ofs))
holes[p.drill[0]].append(w)
count += 1
else:
skip_count += 1
continue
at,angle = getAt(p.at)
angle -= m_angle;
if not isZero(angle):
w.rotate(Vector(),Vector(0,0,1),angle)
w.translate(at)
if m_angle:
w.rotate(Vector(),Vector(0,0,1),m_angle)
w.translate(m_at)
self._log('pad holes: {}, skipped: {}',count+skip_count,skip_count)
if oval:
self._log('oval holes: {}',oval_count)
if npth<=0:
skip_count = 0
ofs = -abs(offset)
for v in self.pcb.via:
if self.filterNets(v):
skip_count += 1
continue
if v.drill>=minSize and (not maxSize or v.drill<=maxSize):
w = make_circle(Vector(v.drill+ofs))
holes[v.drill].append(w)
w.translate(makeVect(v.at))
else:
skip_count += 1
self._log('via holes: {}, skipped: {}',len(self.pcb.via),skip_count)
self._log('total holes added: {}',
count+oval_count+len(self.pcb.via)-skip_count)
objs = []
if holes or ovals:
if self.merge_holes:
for o in ovals.values():
objs += o
for o in holes.values():
objs += o
objs = func(objs,"holes")
else:
for r in ((ovals,'oval'),(holes,'hole')):
if not r[0]:
continue
for (width,rs) in iteritems(r[0]):
objs.append(func(rs,r[1]))
if not npth:
label=None
elif npth>0:
label='npth'
else:
label='th'
if shape_type == 'solid':
if not thickness:
thickness = self.pcb.general.thickness+0.02
pos = -0.01
else:
pos = 0.0
objs = self._makeSolid(objs,'holes',thickness,label=label)
self._place(objs,FreeCAD.Vector(0,0,pos))
else:
objs = self._makeCompound(objs,'holes',label=label)
self._popLog('holes done')
return objs
def _cutHoles(self,objs,holes,name,label=None,fit_arcs=False,
minSize=0,maxSize=0,oval=True,npth=0,offset=0.0):
if not holes:
return objs
if not isinstance(holes,(Part.Feature,Part.Shape)):
hit = False
if self.holes_cache is not None:
key = '{}.{}.{}.{}.{}.{}'.format(
self.add_feature,minSize,maxSize,oval,npth,offset)
doc = getActiveDoc();
if self.add_feature and self.active_doc_uuid!=doc.Uid:
self.holes_cache.clear()
self.active_doc_uuid = doc.Uid
try:
holes = self.holes_cache[key]
if self.add_feature:
# access the object's Name to make sure it is not
# deleted
self._log("fetch holes '{}' "
"from cache".format(holes.Name))
else:
self._log("fetch holes from cache")
hit = True
except Exception:
pass
if not hit:
self._pushLog()
holes = self.makeHoles(shape_type='wire',prefix=None,npth=npth,
minSize=minSize,maxSize=maxSize,oval=oval,offset=offset)
self._popLog()
if isinstance(self.holes_cache,dict):
self.holes_cache[key] = holes
if not objs:
return holes
objs = (self._makeCompound(objs,name,label=label),holes)
return self._makeArea(objs,name,op=1,label=label,fit_arcs=fit_arcs)
def makePads(self,shape_type='face',thickness=0.05,holes=False,
fit_arcs=True,prefix=''):
self._pushLog('making pads...',prefix=prefix)
def _wire(obj,name,label=None,fill=False):
return self._makeWires(obj,name,fill=fill,label=label)
def _face(obj,name,label=None):
return _wire(obj,name,label,True)
_solid = _face
try:
func = locals()['_{}'.format(shape_type)]
except KeyError:
raise ValueError('invalid shape type: {}'.format(shape_type))
layer_match = '*.{}'.format(self.layer.split('.')[-1])
objs = []
count = 0
skip_count = 0
for i,m in enumerate(self.pcb.module):
ref = ''
for t in | |
import numpy as np
from sklearn.model_selection import RepeatedStratifiedKFold, StratifiedShuffleSplit
from sklearn.linear_model import LogisticRegression
from sklearn.isotonic import IsotonicRegression
from scipy.special import expit
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score
import time
# Framework:
from preprocessing.preprocessing import convert_categorical_variables, standardize, handle_missing_data, \
preprocess_credit_card_data, preprocess_kdd98, preprocess_give_me_some_credit, preprocess_telco_customer_churn, \
preprocess_default_credit_card, preprocess_bank_marketing, preprocess_vub_credit_scoring, \
preprocess_tv_subscription_churn, preprocess_kaggle_ieee_fraud
from experiments.experimental_design import experimental_design
from performance_metrics.performance_metrics import get_performance_metrics, evaluate_experiments, cost_with_algorithm
# Models:
from methodologies.cs_logit.cs_logit import CSLogit
from methodologies.cs_net import CSNeuralNetwork
from methodologies.cs_boost import CSBoost
class Experiment:
def __init__(self, settings, datasets, methodologies, evaluators):
self.settings = settings
self.l1 = self.settings['l1_regularization']
self.lambda1_list = self.settings['lambda1_options']
self.l2 = self.settings['l2_regularization']
self.lambda2_list = self.settings['lambda2_options']
self.neurons_list = self.settings['neurons_options']
if self.l1 and self.l2:
raise ValueError('Only l1 or l2 regularization allowed, not both!')
self.datasets = datasets
self.methodologies = methodologies
self.evaluators = evaluators
self.results_tr_instance = {}
self.results_tr_instance_calibrated = {}
self.results_tr_class = {}
self.results_tr_class_calibrated = {}
self.results_tr_class_imb = {}
self.results_tr_empirical_id = {}
self.results_tr_empirical_cd = {}
self.results_tr_empirical_f1 = {}
self.results_tr_insensitive = {}
def run(self, directory):
"""
LOAD AND PREPROCESS DATA
"""
print('\n\n************** LOADING DATA **************\n')
# Verify that only one dataset is selected
if sum(self.datasets.values()) != 1:
raise ValueError('Select only one dataset!')
if self.datasets['kaggle credit card fraud']:
print('Kaggle Credit Card Fraud')
covariates, labels, amounts, cost_matrix, categorical_variables = preprocess_credit_card_data(fixed_cost=10)
elif self.datasets['kdd98 direct mailing']:
print('KDD98 Direct Mailing Donations')
covariates, labels, amounts, cost_matrix, categorical_variables = preprocess_kdd98()
elif self.datasets['kaggle give me some credit']:
print('Kaggle Give Me Some Credit')
covariates, labels, amounts, cost_matrix, categorical_variables = preprocess_give_me_some_credit()
elif self.datasets['kaggle telco customer churn']:
print('Kaggle Telco Customer Churn')
covariates, labels, amounts, cost_matrix, categorical_variables = preprocess_telco_customer_churn()
elif self.datasets['uci default of credit card clients']:
print('UCI Default of Credit Card Clients')
covariates, labels, amounts, cost_matrix, categorical_variables = preprocess_default_credit_card()
elif self.datasets['uci bank marketing']:
print('UCI Bank Marketing')
covariates, labels, amounts, cost_matrix, categorical_variables = preprocess_bank_marketing()
elif self.datasets['vub credit scoring']:
print('VUB Credit Scoring')
covariates, labels, amounts, cost_matrix, categorical_variables = preprocess_vub_credit_scoring()
elif self.datasets['tv subscription churn']:
print('TV Subscription Churn')
covariates, labels, amounts, cost_matrix, categorical_variables = preprocess_tv_subscription_churn()
elif self.datasets['kaggle ieee fraud']:
print('Kaggle IEEE Fraud Detection')
covariates, labels, amounts, cost_matrix, categorical_variables = preprocess_kaggle_ieee_fraud(subsample=1)
else:
raise Exception('No dataset specified')
"""
RUN EXPERIMENTS
"""
print('\n\n***** BUILDING CLASSIFICATION MODELS *****')
# Prepare the cross-validation procedure
folds = self.settings['folds']
repeats = self.settings['repeats']
rskf = RepeatedStratifiedKFold(n_splits=folds, n_repeats=repeats, random_state=42)
prepr = experimental_design(labels, amounts)
# Prepare the evaluation matrices
n_methodologies = sum(self.methodologies.values())
for key in self.evaluators.keys():
if self.evaluators[key]:
self.results_tr_instance[key] = np.empty(shape=(n_methodologies, folds * repeats), dtype='object')
self.results_tr_instance_calibrated[key] = np.empty(shape=(n_methodologies, folds * repeats),
dtype='object')
self.results_tr_class[key] = np.empty(shape=(n_methodologies, folds * repeats), dtype='object')
self.results_tr_class_calibrated[key] = np.empty(shape=(n_methodologies, folds * repeats),
dtype='object')
self.results_tr_class_imb[key] = np.empty(shape=(n_methodologies, folds * repeats), dtype='object')
self.results_tr_empirical_id[key] = np.empty(shape=(n_methodologies, folds * repeats), dtype='object')
self.results_tr_empirical_cd[key] = np.empty(shape=(n_methodologies, folds * repeats), dtype='object')
self.results_tr_empirical_f1[key] = np.empty(shape=(n_methodologies, folds * repeats), dtype='object')
self.results_tr_insensitive[key] = np.empty(shape=(n_methodologies, folds * repeats), dtype='object')
for i, (train_val_index, test_index) in enumerate(rskf.split(covariates, prepr)):
print('\nCross validation: ' + str(i + 1))
index = 0
x_train_val, x_test = covariates.iloc[train_val_index], covariates.iloc[test_index]
y_train_val, y_test = labels[train_val_index], labels[test_index]
amounts_train_val, amounts_test = amounts[train_val_index], amounts[test_index]
cost_matrix_train_val, cost_matrix_test = cost_matrix[train_val_index, :], cost_matrix[test_index, :]
# Split training and validation set (based on instance-dependent costs)
train_ratio = 1 - self.settings['val_ratio']
skf = StratifiedShuffleSplit(n_splits=1, train_size=train_ratio, random_state=42)
prepr_val = experimental_design(y_train_val, amounts_train_val)
for train_index, val_index in skf.split(x_train_val, prepr_val):
x_train, x_val = x_train_val.iloc[train_index], x_train_val.iloc[val_index]
y_train, y_val = y_train_val[train_index], y_train_val[val_index]
cost_matrix_train, cost_matrix_val = cost_matrix_train_val[train_index, :], cost_matrix_train_val[
val_index, :]
# Setting: instance or class-dependent costs?
if self.settings['class_costs']:
cost_matrix_train = np.tile(cost_matrix_train.mean(axis=0)[None, :], (len(y_train), 1, 1))
cost_matrix_val = np.tile(cost_matrix_val.mean(axis=0)[None, :], (len(y_val), 1, 1))
# Preprocessing: Handle missing data, convert categorical variables, standardize, convert to numpy
x_train, x_val, x_test, categorical_variables = handle_missing_data(x_train, x_val, x_test,
categorical_variables)
x_train, x_val, x_test = convert_categorical_variables(x_train, y_train, x_val, x_test,
categorical_variables)
x_train, x_val, x_test = standardize(x_train=x_train, x_val=x_val, x_test=x_test)
# Assign thresholds for the different strategies:
# Instance-dependent cost-sensitive threshold
threshold_instance = (cost_matrix_test[:, 1, 0] - cost_matrix_test[:, 0, 0]) / (
cost_matrix_test[:, 1, 0] - cost_matrix_test[:, 0, 0]
+ cost_matrix_test[:, 0, 1] - cost_matrix_test[:, 1, 1])
# Class-dependent cost-sensitive threshold
threshold_class = (cost_matrix_test[:, 1, 0].mean() - cost_matrix_test[:, 0, 0].mean()) / (
cost_matrix_test[:, 1, 0].mean() - cost_matrix_test[:, 0, 0].mean()
+ cost_matrix_test[:, 0, 1].mean() - cost_matrix_test[:, 1, 1].mean())
threshold_class = np.repeat(threshold_class, len(y_test))
# Class imbalance threshold
threshold_class_imbalance = y_train.mean()
# Cost-insensitive threshold
threshold_cost_ins = np.repeat(0.5, len(y_test))
# Define evaluation procedure for different thresholding strategies
def evaluate_model(proba_val, proba, j, index, info):
# ID CS Threshold:
pred = (proba > threshold_instance).astype(int)
self.results_tr_instance = get_performance_metrics(self.evaluators, self.results_tr_instance, j, index,
cost_matrix_test, y_test, proba, pred, info)
# ID CS Threshold with calibrated probabilities (using isotonic regression):
isotonic = IsotonicRegression(out_of_bounds='clip')
isotonic.fit(proba_val, y_val) # Fit on validation set!
proba_calibrated = isotonic.transform(proba)
pred = (proba_calibrated > threshold_instance).astype(int)
self.results_tr_instance_calibrated = get_performance_metrics(self.evaluators,
self.results_tr_instance_calibrated, j,
index, cost_matrix_test, y_test,
proba_calibrated, pred, info)
# CD CS Threshold:
pred = (proba > threshold_class).astype(int)
self.results_tr_class = get_performance_metrics(self.evaluators, self.results_tr_class, j, index,
cost_matrix_test, y_test, proba, pred, info)
# CD CS Threshold with calibrated probabilities:
pred = (proba_calibrated > threshold_class).astype(int)
self.results_tr_class_calibrated = get_performance_metrics(self.evaluators,
self.results_tr_class_calibrated, j, index,
cost_matrix_test, y_test, proba, pred, info)
# Class imbalance Threshold:
pred = (proba > threshold_class_imbalance).astype(int)
self.results_tr_class_imb = get_performance_metrics(self.evaluators, self.results_tr_class_imb, j,
index, cost_matrix_test, y_test, proba, pred, info)
# Empirical thresholding: ID costs
threshold_opt_val = empirical_thresholding(proba_val, y_val, cost_matrix_val, metric='idcosts')
pred = (proba > threshold_opt_val).astype(int)
self.results_tr_empirical_id = get_performance_metrics(self.evaluators, self.results_tr_empirical_id, j,
index, cost_matrix_test, y_test, proba, pred,
info)
# Empirical thresholding: CD costs
threshold_opt_val = empirical_thresholding(proba_val, y_val, cost_matrix_val, metric='cdcosts')
pred = (proba > threshold_opt_val).astype(int)
self.results_tr_empirical_cd = get_performance_metrics(self.evaluators, self.results_tr_empirical_cd, j,
index, cost_matrix_test, y_test, proba, pred,
info)
# Empirical thresholding: F1
threshold_opt_val = empirical_thresholding(proba_val, y_val, cost_matrix_val, metric='f1')
pred = (proba > threshold_opt_val).astype(int)
self.results_tr_empirical_f1 = get_performance_metrics(self.evaluators, self.results_tr_empirical_f1,
j, index, cost_matrix_test, y_test, proba, pred,
info)
# Cost-insensitive threshold:
pred = (proba > threshold_cost_ins).astype(int)
self.results_tr_insensitive = get_performance_metrics(self.evaluators, self.results_tr_insensitive, j,
index, cost_matrix_test, y_test, proba, pred,
info)
# Logistic regression
if self.methodologies['logit']:
print('\tlogistic regression:')
# Get initial estimate for theta and create model
init_logit = LogisticRegression(penalty='none', max_iter=1, verbose=0, solver='sag', n_jobs=-1)
init_logit.fit(x_train, y_train)
init_theta = np.insert(init_logit.coef_, 0, values=init_logit.intercept_)
logit = CSLogit(init_theta, obj='ce')
# Tune regularization parameters, if necessary
logit.tune(self.l1, self.lambda1_list, self.l2, self.lambda2_list, x_train, y_train, cost_matrix_train,
x_val, y_val, cost_matrix_val)
lambda1 = logit.lambda1
lambda2 = logit.lambda2
start = time.perf_counter()
logit.fitting(x_train, y_train, cost_matrix_train)
end = time.perf_counter()
logit_proba = logit.predict(x_test)
logit_proba_val = logit.predict(x_val)
info = {'time': end - start, 'lambda1': lambda1, 'lambda2': lambda2, 'n_neurons': 0}
evaluate_model(logit_proba_val, logit_proba, i, index, info)
index += 1
# Weighted logistic regression
if self.methodologies['wlogit']:
print('\twlogit:')
try:
init_logit
except NameError:
init_logit = LogisticRegression(penalty='none', max_iter=1, verbose=False, solver='sag', n_jobs=-1)
init_logit.fit(x_train, y_train)
init_theta = np.insert(init_logit.coef_, 0, values=init_logit.intercept_)
wlogit = CSLogit(init_theta, obj='weightedce')
# Tune regularization parameters, if necessary
wlogit.tune(self.l1, self.lambda1_list, self.l2, self.lambda2_list, x_train, y_train, cost_matrix_train,
x_val, y_val, cost_matrix_val)
lambda1 = wlogit.lambda1
lambda2 = wlogit.lambda2
start = time.perf_counter()
wlogit.fitting(x_train, y_train, cost_matrix_train)
end = time.perf_counter()
wlogit_proba = wlogit.predict(x_test)
wlogit_proba_val = wlogit.predict(x_val)
info = {'time': end - start, 'lambda1': lambda1, 'lambda2': lambda2, 'n_neurons': 0}
evaluate_model(wlogit_proba_val, wlogit_proba, i, index, info)
index += 1
# Cost-sensitive logistic regression
if self.methodologies['cslogit']:
print('\tcslogit:')
try:
init_logit
except NameError:
init_logit = LogisticRegression(penalty='none', max_iter=1, verbose=False, solver='sag',
n_jobs=-1)
init_logit.fit(x_train, y_train)
init_theta = np.insert(init_logit.coef_, 0, values=init_logit.intercept_)
cslogit = CSLogit(init_theta, obj='aec')
cslogit.tune(self.l1, self.lambda1_list, self.l2, self.lambda2_list, x_train, y_train,
cost_matrix_train, x_val, y_val, cost_matrix_val)
lambda1 = cslogit.lambda1
lambda2 = cslogit.lambda2
start = time.perf_counter()
cslogit.fitting(x_train, y_train, cost_matrix_train)
end = time.perf_counter()
cslogit_proba = cslogit.predict(x_test)
cslogit_proba_val = cslogit.predict(x_val)
info = {'time': end - start, 'lambda1': lambda1, 'lambda2': lambda2, 'n_neurons': 0}
evaluate_model(cslogit_proba_val, cslogit_proba, i, index, info)
index += 1
if self.methodologies['net']:
print('\tneural network:')
neural_network = CSNeuralNetwork(n_inputs=x_train.shape[1], obj='ce', directory=directory)
neural_network = neural_network.tune(self.l1, self.lambda1_list, self.l2, self.lambda2_list,
self.neurons_list, x_train, y_train, cost_matrix_train, x_val,
y_val, cost_matrix_val)
lambda1 = neural_network.lambda1
lambda2 = neural_network.lambda2
start = time.perf_counter()
neural_network = neural_network.model_train(neural_network, x_train, y_train, x_val, y_val,
cost_matrix_train=cost_matrix_train,
cost_matrix_val=cost_matrix_val)
end = time.perf_counter()
nn_proba = neural_network.model_predict(neural_network, x_test)
nn_proba_val = neural_network.model_predict(neural_network, x_val)
info = {'time': end - start, 'lambda1': lambda1, 'lambda2': lambda2,
'n_neurons': neural_network.lin_layer1.out_features}
evaluate_model(nn_proba_val, nn_proba, i, index, info)
index += 1
if self.methodologies['wnet']:
print('\twnet:')
wnet = CSNeuralNetwork(n_inputs=x_train.shape[1], obj='weightedce', directory=directory)
wnet = wnet.tune(self.l1, self.lambda1_list, self.l2, self.lambda2_list, self.neurons_list, x_train,
y_train, cost_matrix_train, x_val, y_val, cost_matrix_val)
lambda1 = wnet.lambda1
lambda2 = wnet.lambda2
start = time.perf_counter()
wnet = wnet.model_train(wnet, x_train, y_train, x_val, y_val,
cost_matrix_train=cost_matrix_train, cost_matrix_val=cost_matrix_val)
end = time.perf_counter()
wnet_proba = wnet.model_predict(wnet, x_test)
wnet_proba_val = wnet.model_predict(wnet, x_val)
info = {'time': end - start, 'lambda1': lambda1, 'lambda2': lambda2,
'n_neurons': wnet.lin_layer1.out_features}
evaluate_model(wnet_proba_val, wnet_proba, i, index, info)
index += | |
8*m.b42 + 8*m.b43 - m.x106 - m.x107 + m.x146 + m.x147 + m.x154 + m.x155 + m.x162 + m.x163
+ m.x202 + m.x203 <= 8)
m.c1826 = Constraint(expr= 8*m.b42 + 8*m.b44 - m.x106 - m.x108 + m.x146 + m.x148 + m.x154 + m.x156 + m.x162 + m.x164
+ m.x202 + m.x204 <= 8)
m.c1827 = Constraint(expr= 8*m.b42 + 8*m.b45 - m.x106 - m.x109 + m.x146 + m.x149 + m.x154 + m.x157 + m.x162 + m.x165
+ m.x202 + m.x205 <= 8)
m.c1828 = Constraint(expr= 8*m.b43 + 8*m.b46 - m.x107 - m.x110 + m.x147 + m.x150 + m.x155 + m.x158 + m.x163 + m.x166
+ m.x203 + m.x206 <= 8)
m.c1829 = Constraint(expr= 8*m.b43 + 8*m.b47 - m.x107 - m.x111 + m.x147 + m.x151 + m.x155 + m.x159 + m.x163 + m.x167
+ m.x203 + m.x207 <= 8)
m.c1830 = Constraint(expr= 8*m.b44 + 8*m.b48 - m.x108 - m.x112 + m.x148 + m.x152 + m.x156 + m.x160 + m.x164 + m.x168
+ m.x204 + m.x208 <= 8)
m.c1831 = Constraint(expr= 8*m.b45 + 8*m.b49 - m.x109 - m.x113 + m.x149 + m.x153 + m.x157 + m.x161 + m.x165 + m.x169
+ m.x205 + m.x209 <= 8)
m.c1832 = Constraint(expr= 8*m.b46 + 8*m.b48 - m.x110 - m.x112 + m.x150 + m.x152 + m.x158 + m.x160 + m.x166 + m.x168
+ m.x206 + m.x208 <= 8)
m.c1833 = Constraint(expr= 8*m.b47 + 8*m.b49 - m.x111 - m.x113 + m.x151 + m.x153 + m.x159 + m.x161 + m.x167 + m.x169
+ m.x207 + m.x209 <= 8)
m.c1834 = Constraint(expr= 8*m.b48 + 8*m.b49 - m.x112 - m.x113 + m.x152 + m.x153 + m.x160 + m.x161 + m.x168 + m.x169
+ m.x208 + m.x209 <= 8)
m.c1835 = Constraint(expr= 8*m.b50 + 8*m.b51 - m.x114 - m.x115 + m.x146 + m.x147 + m.x154 + m.x155 + m.x162 + m.x163
+ m.x170 + m.x171 + m.x202 + m.x203 <= 8)
m.c1836 = Constraint(expr= 8*m.b50 + 8*m.b52 - m.x114 - m.x116 + m.x146 + m.x148 + m.x154 + m.x156 + m.x162 + m.x164
+ m.x170 + m.x172 + m.x202 + m.x204 <= 8)
m.c1837 = Constraint(expr= 8*m.b50 + 8*m.b53 - m.x114 - m.x117 + m.x146 + m.x149 + m.x154 + m.x157 + m.x162 + m.x165
+ m.x170 + m.x173 + m.x202 + m.x205 <= 8)
m.c1838 = Constraint(expr= 8*m.b51 + 8*m.b54 - m.x115 - m.x118 + m.x147 + m.x150 + m.x155 + m.x158 + m.x163 + m.x166
+ m.x171 + m.x174 + m.x203 + m.x206 <= 8)
m.c1839 = Constraint(expr= 8*m.b51 + 8*m.b55 - m.x115 - m.x119 + m.x147 + m.x151 + m.x155 + m.x159 + m.x163 + m.x167
+ m.x171 + m.x175 + m.x203 + m.x207 <= 8)
m.c1840 = Constraint(expr= 8*m.b52 + 8*m.b56 - m.x116 - m.x120 + m.x148 + m.x152 + m.x156 + m.x160 + m.x164 + m.x168
+ m.x172 + m.x176 + m.x204 + m.x208 <= 8)
m.c1841 = Constraint(expr= 8*m.b53 + 8*m.b57 - m.x117 - m.x121 + m.x149 + m.x153 + m.x157 + m.x161 + m.x165 + m.x169
+ m.x173 + m.x177 + m.x205 + m.x209 <= 8)
m.c1842 = Constraint(expr= 8*m.b54 + 8*m.b56 - m.x118 - m.x120 + m.x150 + m.x152 + m.x158 + m.x160 + m.x166 + m.x168
+ m.x174 + m.x176 + m.x206 + m.x208 <= 8)
m.c1843 = Constraint(expr= 8*m.b55 + 8*m.b57 - m.x119 - m.x121 + m.x151 + m.x153 + m.x159 + m.x161 + m.x167 + m.x169
+ m.x175 + m.x177 + m.x207 + m.x209 <= 8)
m.c1844 = Constraint(expr= 8*m.b56 + 8*m.b57 - m.x120 - m.x121 + m.x152 + m.x153 + m.x160 + m.x161 + m.x168 + m.x169
+ m.x176 + m.x177 + m.x208 + m.x209 <= 8)
m.c1845 = Constraint(expr= 8*m.b58 + 8*m.b59 - m.x122 - m.x123 + m.x146 + m.x147 + m.x154 + m.x155 + m.x162 + m.x163
+ m.x170 + m.x171 + m.x178 + m.x179 + m.x202 + m.x203 <= 8)
m.c1846 = Constraint(expr= 8*m.b58 + 8*m.b60 - m.x122 - m.x124 + m.x146 + m.x148 + m.x154 + m.x156 + m.x162 + m.x164
+ m.x170 + m.x172 + m.x178 + m.x180 + m.x202 + m.x204 <= 8)
m.c1847 = Constraint(expr= 8*m.b58 + 8*m.b61 - m.x122 - m.x125 + m.x146 + m.x149 + m.x154 + m.x157 + m.x162 + m.x165
+ m.x170 + m.x173 + m.x178 + m.x181 + m.x202 + m.x205 <= 8)
m.c1848 = Constraint(expr= 8*m.b59 + 8*m.b62 - m.x123 - m.x126 + m.x147 + m.x150 + m.x155 + m.x158 + m.x163 + m.x166
+ m.x171 + m.x174 + m.x179 + m.x182 + m.x203 + m.x206 <= 8)
m.c1849 = Constraint(expr= 8*m.b59 + 8*m.b63 - m.x123 - m.x127 + m.x147 + m.x151 + m.x155 + m.x159 + m.x163 + m.x167
+ m.x171 + m.x175 + m.x179 + m.x183 + m.x203 + m.x207 <= 8)
m.c1850 = Constraint(expr= 8*m.b60 + 8*m.b64 - m.x124 - m.x128 + m.x148 + m.x152 + m.x156 + m.x160 + m.x164 + m.x168
+ m.x172 + m.x176 + m.x180 + m.x184 + m.x204 + m.x208 <= 8)
m.c1851 = Constraint(expr= 8*m.b61 + 8*m.b65 - m.x125 - m.x129 + m.x149 + m.x153 + m.x157 + m.x161 + m.x165 + m.x169
+ m.x173 + m.x177 + m.x181 + m.x185 + m.x205 + m.x209 <= 8)
m.c1852 = Constraint(expr= 8*m.b62 + 8*m.b64 - m.x126 - m.x128 + m.x150 + m.x152 + m.x158 + m.x160 + m.x166 + m.x168
+ m.x174 + m.x176 + m.x182 + m.x184 + m.x206 + m.x208 <= 8)
m.c1853 = Constraint(expr= 8*m.b63 + 8*m.b65 - m.x127 - m.x129 + m.x151 + m.x153 + m.x159 + m.x161 + m.x167 + m.x169
+ m.x175 + m.x177 + m.x183 + m.x185 + m.x207 + m.x209 <= 8)
m.c1854 = Constraint(expr= 8*m.b64 + 8*m.b65 - m.x128 - m.x129 + m.x152 + m.x153 + m.x160 + m.x161 + m.x168 + m.x169
+ m.x176 + m.x177 + m.x184 + m.x185 + m.x208 + m.x209 <= 8)
m.c1855 = Constraint(expr= 8*m.b26 + 8*m.b27 - m.x90 - m.x91 + m.x210 + m.x211 <= 8)
m.c1856 = Constraint(expr= 8*m.b26 + 8*m.b28 - m.x90 - m.x92 + m.x210 + m.x212 <= 8)
m.c1857 = Constraint(expr= 8*m.b26 + 8*m.b29 - m.x90 - m.x93 + m.x210 + m.x213 <= 8)
m.c1858 = Constraint(expr= 8*m.b27 + 8*m.b30 - m.x91 - m.x94 + m.x211 + m.x214 <= 8)
m.c1859 = Constraint(expr= 8*m.b27 + 8*m.b31 - m.x91 - m.x95 + m.x211 + m.x215 <= 8)
m.c1860 = Constraint(expr= 8*m.b28 + 8*m.b32 - m.x92 - m.x96 + m.x212 + m.x216 <= 8)
m.c1861 = Constraint(expr= 8*m.b29 + 8*m.b33 - m.x93 - m.x97 + m.x213 + m.x217 <= 8)
m.c1862 = Constraint(expr= 8*m.b30 + 8*m.b32 - m.x94 - m.x96 + m.x214 + m.x216 <= 8)
m.c1863 = Constraint(expr= 8*m.b31 + 8*m.b33 - m.x95 - m.x97 + m.x215 + m.x217 <= 8)
m.c1864 = Constraint(expr= 8*m.b32 + 8*m.b33 - m.x96 - m.x97 + m.x216 + m.x217 <= 8)
m.c1865 = Constraint(expr= 8*m.b34 + 8*m.b35 - m.x98 - m.x99 + m.x154 + m.x155 + m.x210 + m.x211 <= 8)
m.c1866 = Constraint(expr= 8*m.b34 + 8*m.b36 - m.x98 - m.x100 + m.x154 + m.x156 + m.x210 + m.x212 <= 8)
m.c1867 = Constraint(expr= 8*m.b34 + 8*m.b37 - m.x98 - m.x101 + m.x154 + m.x157 + m.x210 + m.x213 <= 8)
m.c1868 = Constraint(expr= 8*m.b35 + 8*m.b38 - m.x99 - m.x102 + m.x155 + m.x158 + m.x211 + m.x214 <= 8)
m.c1869 = Constraint(expr= 8*m.b35 + 8*m.b39 - m.x99 - m.x103 + m.x155 + m.x159 + m.x211 + m.x215 <= 8)
m.c1870 = Constraint(expr= 8*m.b36 + 8*m.b40 - m.x100 - m.x104 + m.x156 + m.x160 + m.x212 + m.x216 <= 8)
m.c1871 = Constraint(expr= 8*m.b37 + 8*m.b41 - m.x101 - m.x105 + m.x157 + m.x161 + m.x213 + m.x217 <= 8)
m.c1872 = Constraint(expr= 8*m.b38 + 8*m.b40 - m.x102 - m.x104 + m.x158 + m.x160 + m.x214 + m.x216 <= 8)
m.c1873 = Constraint(expr= 8*m.b39 + 8*m.b41 - m.x103 - m.x105 + m.x159 + m.x161 + m.x215 + m.x217 <= 8)
m.c1874 = Constraint(expr= 8*m.b40 + 8*m.b41 - m.x104 - m.x105 + m.x160 + m.x161 + m.x216 + m.x217 <= 8)
m.c1875 = Constraint(expr= 8*m.b42 + 8*m.b43 - m.x106 - m.x107 + m.x154 + | |
typing.Optional[str] = None,
prices: typing.Optional[typing.List["PriceDraft"]] = None,
attributes: typing.Optional[typing.List["Attribute"]] = None,
images: typing.Optional[typing.List["Image"]] = None
):
self.id = id
self.sku = sku
self.prices = prices
self.attributes = attributes
self.images = images
super().__init__()
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "ProductVariantImportDraft":
from ._schemas.order import ProductVariantImportDraftSchema
return ProductVariantImportDraftSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.order import ProductVariantImportDraftSchema
return ProductVariantImportDraftSchema().dump(self)
class ReturnInfo(_BaseType):
items: typing.List["ReturnItem"]
#: Identifies, which return tracking ID is connected to this particular return.
return_tracking_id: typing.Optional[str]
return_date: typing.Optional[datetime.datetime]
def __init__(
self,
*,
items: typing.List["ReturnItem"],
return_tracking_id: typing.Optional[str] = None,
return_date: typing.Optional[datetime.datetime] = None
):
self.items = items
self.return_tracking_id = return_tracking_id
self.return_date = return_date
super().__init__()
@classmethod
def deserialize(cls, data: typing.Dict[str, typing.Any]) -> "ReturnInfo":
from ._schemas.order import ReturnInfoSchema
return ReturnInfoSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.order import ReturnInfoSchema
return ReturnInfoSchema().dump(self)
class ReturnItem(_BaseType):
id: str
quantity: int
type: str
comment: typing.Optional[str]
shipment_state: "ReturnShipmentState"
payment_state: "ReturnPaymentState"
last_modified_at: datetime.datetime
created_at: datetime.datetime
def __init__(
self,
*,
id: str,
quantity: int,
type: str,
comment: typing.Optional[str] = None,
shipment_state: "ReturnShipmentState",
payment_state: "ReturnPaymentState",
last_modified_at: datetime.datetime,
created_at: datetime.datetime
):
self.id = id
self.quantity = quantity
self.type = type
self.comment = comment
self.shipment_state = shipment_state
self.payment_state = payment_state
self.last_modified_at = last_modified_at
self.created_at = created_at
super().__init__()
@classmethod
def deserialize(cls, data: typing.Dict[str, typing.Any]) -> "ReturnItem":
if data["type"] == "CustomLineItemReturnItem":
from ._schemas.order import CustomLineItemReturnItemSchema
return CustomLineItemReturnItemSchema().load(data)
if data["type"] == "LineItemReturnItem":
from ._schemas.order import LineItemReturnItemSchema
return LineItemReturnItemSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.order import ReturnItemSchema
return ReturnItemSchema().dump(self)
class CustomLineItemReturnItem(ReturnItem):
custom_line_item_id: str
def __init__(
self,
*,
id: str,
quantity: int,
comment: typing.Optional[str] = None,
shipment_state: "ReturnShipmentState",
payment_state: "ReturnPaymentState",
last_modified_at: datetime.datetime,
created_at: datetime.datetime,
custom_line_item_id: str
):
self.custom_line_item_id = custom_line_item_id
super().__init__(
id=id,
quantity=quantity,
comment=comment,
shipment_state=shipment_state,
payment_state=payment_state,
last_modified_at=last_modified_at,
created_at=created_at,
type="CustomLineItemReturnItem",
)
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "CustomLineItemReturnItem":
from ._schemas.order import CustomLineItemReturnItemSchema
return CustomLineItemReturnItemSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.order import CustomLineItemReturnItemSchema
return CustomLineItemReturnItemSchema().dump(self)
class LineItemReturnItem(ReturnItem):
line_item_id: str
def __init__(
self,
*,
id: str,
quantity: int,
comment: typing.Optional[str] = None,
shipment_state: "ReturnShipmentState",
payment_state: "ReturnPaymentState",
last_modified_at: datetime.datetime,
created_at: datetime.datetime,
line_item_id: str
):
self.line_item_id = line_item_id
super().__init__(
id=id,
quantity=quantity,
comment=comment,
shipment_state=shipment_state,
payment_state=payment_state,
last_modified_at=last_modified_at,
created_at=created_at,
type="LineItemReturnItem",
)
@classmethod
def deserialize(cls, data: typing.Dict[str, typing.Any]) -> "LineItemReturnItem":
from ._schemas.order import LineItemReturnItemSchema
return LineItemReturnItemSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.order import LineItemReturnItemSchema
return LineItemReturnItemSchema().dump(self)
class ReturnItemDraft(_BaseType):
quantity: int
line_item_id: typing.Optional[str]
custom_line_item_id: typing.Optional[str]
comment: typing.Optional[str]
shipment_state: "ReturnShipmentState"
def __init__(
self,
*,
quantity: int,
line_item_id: typing.Optional[str] = None,
custom_line_item_id: typing.Optional[str] = None,
comment: typing.Optional[str] = None,
shipment_state: "ReturnShipmentState"
):
self.quantity = quantity
self.line_item_id = line_item_id
self.custom_line_item_id = custom_line_item_id
self.comment = comment
self.shipment_state = shipment_state
super().__init__()
@classmethod
def deserialize(cls, data: typing.Dict[str, typing.Any]) -> "ReturnItemDraft":
from ._schemas.order import ReturnItemDraftSchema
return ReturnItemDraftSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.order import ReturnItemDraftSchema
return ReturnItemDraftSchema().dump(self)
class ReturnPaymentState(enum.Enum):
NON_REFUNDABLE = "NonRefundable"
INITIAL = "Initial"
REFUNDED = "Refunded"
NOT_REFUNDED = "NotRefunded"
class ReturnShipmentState(enum.Enum):
ADVISED = "Advised"
RETURNED = "Returned"
BACK_IN_STOCK = "BackInStock"
UNUSABLE = "Unusable"
class ShipmentState(enum.Enum):
SHIPPED = "Shipped"
READY = "Ready"
PENDING = "Pending"
DELAYED = "Delayed"
PARTIAL = "Partial"
BACKORDER = "Backorder"
class ShippingInfoImportDraft(_BaseType):
shipping_method_name: str
price: "Money"
#: The shipping rate used to determine the price.
shipping_rate: "ShippingRateDraft"
tax_rate: typing.Optional["TaxRate"]
tax_category: typing.Optional["TaxCategoryResourceIdentifier"]
#: Not set if custom shipping method is used.
shipping_method: typing.Optional["ShippingMethodResourceIdentifier"]
#: Deliveries are compilations of information on how the articles are being delivered to the customers.
deliveries: typing.Optional[typing.List["Delivery"]]
discounted_price: typing.Optional["DiscountedLineItemPriceDraft"]
#: Indicates whether the ShippingMethod referenced is allowed for the cart or not.
shipping_method_state: typing.Optional["ShippingMethodState"]
def __init__(
self,
*,
shipping_method_name: str,
price: "Money",
shipping_rate: "ShippingRateDraft",
tax_rate: typing.Optional["TaxRate"] = None,
tax_category: typing.Optional["TaxCategoryResourceIdentifier"] = None,
shipping_method: typing.Optional["ShippingMethodResourceIdentifier"] = None,
deliveries: typing.Optional[typing.List["Delivery"]] = None,
discounted_price: typing.Optional["DiscountedLineItemPriceDraft"] = None,
shipping_method_state: typing.Optional["ShippingMethodState"] = None
):
self.shipping_method_name = shipping_method_name
self.price = price
self.shipping_rate = shipping_rate
self.tax_rate = tax_rate
self.tax_category = tax_category
self.shipping_method = shipping_method
self.deliveries = deliveries
self.discounted_price = discounted_price
self.shipping_method_state = shipping_method_state
super().__init__()
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "ShippingInfoImportDraft":
from ._schemas.order import ShippingInfoImportDraftSchema
return ShippingInfoImportDraftSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.order import ShippingInfoImportDraftSchema
return ShippingInfoImportDraftSchema().dump(self)
class SyncInfo(_BaseType):
#: Connection to a particular synchronization destination.
channel: "ChannelReference"
#: Can be used to reference an external order instance, file etc.
external_id: typing.Optional[str]
synced_at: datetime.datetime
def __init__(
self,
*,
channel: "ChannelReference",
external_id: typing.Optional[str] = None,
synced_at: datetime.datetime
):
self.channel = channel
self.external_id = external_id
self.synced_at = synced_at
super().__init__()
@classmethod
def deserialize(cls, data: typing.Dict[str, typing.Any]) -> "SyncInfo":
from ._schemas.order import SyncInfoSchema
return SyncInfoSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.order import SyncInfoSchema
return SyncInfoSchema().dump(self)
class TaxedItemPriceDraft(_BaseType):
total_net: "Money"
total_gross: "Money"
def __init__(self, *, total_net: "Money", total_gross: "Money"):
self.total_net = total_net
self.total_gross = total_gross
super().__init__()
@classmethod
def deserialize(cls, data: typing.Dict[str, typing.Any]) -> "TaxedItemPriceDraft":
from ._schemas.order import TaxedItemPriceDraftSchema
return TaxedItemPriceDraftSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.order import TaxedItemPriceDraftSchema
return TaxedItemPriceDraftSchema().dump(self)
class TrackingData(_BaseType):
#: The ID to track one parcel.
tracking_id: typing.Optional[str]
#: The carrier that delivers the parcel.
carrier: typing.Optional[str]
provider: typing.Optional[str]
provider_transaction: typing.Optional[str]
#: Flag to distinguish if the parcel is on the way to the customer (false) or on the way back (true).
is_return: typing.Optional[bool]
def __init__(
self,
*,
tracking_id: typing.Optional[str] = None,
carrier: typing.Optional[str] = None,
provider: typing.Optional[str] = None,
provider_transaction: typing.Optional[str] = None,
is_return: typing.Optional[bool] = None
):
self.tracking_id = tracking_id
self.carrier = carrier
self.provider = provider
self.provider_transaction = provider_transaction
self.is_return = is_return
super().__init__()
@classmethod
def deserialize(cls, data: typing.Dict[str, typing.Any]) -> "TrackingData":
from ._schemas.order import TrackingDataSchema
return TrackingDataSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.order import TrackingDataSchema
return TrackingDataSchema().dump(self)
class OrderAddDeliveryAction(OrderUpdateAction):
items: typing.Optional[typing.List["DeliveryItem"]]
address: typing.Optional["Address"]
parcels: typing.Optional[typing.List["ParcelDraft"]]
def __init__(
self,
*,
items: typing.Optional[typing.List["DeliveryItem"]] = None,
address: typing.Optional["Address"] = None,
parcels: typing.Optional[typing.List["ParcelDraft"]] = None
):
self.items = items
self.address = address
self.parcels = parcels
super().__init__(action="addDelivery")
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "OrderAddDeliveryAction":
from ._schemas.order import OrderAddDeliveryActionSchema
return OrderAddDeliveryActionSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.order import OrderAddDeliveryActionSchema
return OrderAddDeliveryActionSchema().dump(self)
class OrderAddItemShippingAddressAction(OrderUpdateAction):
address: "Address"
def __init__(self, *, address: "Address"):
self.address = address
super().__init__(action="addItemShippingAddress")
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "OrderAddItemShippingAddressAction":
from ._schemas.order import OrderAddItemShippingAddressActionSchema
return OrderAddItemShippingAddressActionSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.order import OrderAddItemShippingAddressActionSchema
return OrderAddItemShippingAddressActionSchema().dump(self)
class OrderAddParcelToDeliveryAction(OrderUpdateAction):
delivery_id: str
measurements: typing.Optional["ParcelMeasurements"]
tracking_data: typing.Optional["TrackingData"]
items: typing.Optional[typing.List["DeliveryItem"]]
def __init__(
self,
*,
delivery_id: str,
measurements: typing.Optional["ParcelMeasurements"] = None,
tracking_data: typing.Optional["TrackingData"] = None,
items: typing.Optional[typing.List["DeliveryItem"]] = None
):
self.delivery_id = delivery_id
self.measurements = measurements
self.tracking_data = tracking_data
self.items = items
super().__init__(action="addParcelToDelivery")
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "OrderAddParcelToDeliveryAction":
from ._schemas.order import OrderAddParcelToDeliveryActionSchema
return OrderAddParcelToDeliveryActionSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.order import OrderAddParcelToDeliveryActionSchema
return OrderAddParcelToDeliveryActionSchema().dump(self)
class OrderAddPaymentAction(OrderUpdateAction):
payment: "PaymentResourceIdentifier"
def __init__(self, *, payment: "PaymentResourceIdentifier"):
self.payment = payment
super().__init__(action="addPayment")
@classmethod
def deserialize(cls, data: typing.Dict[str, typing.Any]) -> "OrderAddPaymentAction":
from ._schemas.order import OrderAddPaymentActionSchema
return OrderAddPaymentActionSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.order import OrderAddPaymentActionSchema
return OrderAddPaymentActionSchema().dump(self)
class OrderAddReturnInfoAction(OrderUpdateAction):
return_tracking_id: typing.Optional[str]
items: typing.List["ReturnItemDraft"]
return_date: typing.Optional[datetime.datetime]
def __init__(
self,
*,
return_tracking_id: typing.Optional[str] = None,
items: typing.List["ReturnItemDraft"],
return_date: typing.Optional[datetime.datetime] = None
):
self.return_tracking_id = return_tracking_id
self.items = items
self.return_date = return_date
super().__init__(action="addReturnInfo")
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "OrderAddReturnInfoAction":
from ._schemas.order import OrderAddReturnInfoActionSchema
return OrderAddReturnInfoActionSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.order import OrderAddReturnInfoActionSchema
return OrderAddReturnInfoActionSchema().dump(self)
class OrderChangeOrderStateAction(OrderUpdateAction):
order_state: "OrderState"
def __init__(self, *, order_state: "OrderState"):
self.order_state = order_state
super().__init__(action="changeOrderState")
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "OrderChangeOrderStateAction":
from ._schemas.order import OrderChangeOrderStateActionSchema
return OrderChangeOrderStateActionSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.order import OrderChangeOrderStateActionSchema
return OrderChangeOrderStateActionSchema().dump(self)
class OrderChangePaymentStateAction(OrderUpdateAction):
payment_state: typing.Optional["PaymentState"]
def __init__(self, *, payment_state: typing.Optional["PaymentState"] = None):
self.payment_state = payment_state
super().__init__(action="changePaymentState")
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "OrderChangePaymentStateAction":
from ._schemas.order import OrderChangePaymentStateActionSchema
return OrderChangePaymentStateActionSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.order import OrderChangePaymentStateActionSchema
return OrderChangePaymentStateActionSchema().dump(self)
class OrderChangeShipmentStateAction(OrderUpdateAction):
shipment_state: typing.Optional["ShipmentState"]
def __init__(self, *, shipment_state: typing.Optional["ShipmentState"] = None):
self.shipment_state = shipment_state
super().__init__(action="changeShipmentState")
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "OrderChangeShipmentStateAction":
from ._schemas.order import OrderChangeShipmentStateActionSchema
return OrderChangeShipmentStateActionSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.order import OrderChangeShipmentStateActionSchema
return OrderChangeShipmentStateActionSchema().dump(self)
class OrderImportCustomLineItemStateAction(OrderUpdateAction):
custom_line_item_id: str
state: typing.List["ItemState"]
def __init__(self, *, custom_line_item_id: str, state: typing.List["ItemState"]):
self.custom_line_item_id = custom_line_item_id
self.state = state
super().__init__(action="importCustomLineItemState")
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "OrderImportCustomLineItemStateAction":
| |
#!/usr/bin/env python2.3
#
# Copyright (C) 2004 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: <EMAIL>
# to discuss alternative licensing.
# -------------------------------------------------------------------------
"""\
====================================
Scheduler - runs things concurrently
====================================
The Scheduler runs active microprocesses - giving a regular timeslice to each.
It also provides the ability to pause and wake them; allowing an Axon based
system to play nicely and relinquish the cpu when idle.
* The Scheduler runs microprocesses that have been 'activated'
* The Scheduler is itself a microprocess
Using the scheduler
-------------------
The simplest way is to just use the default scheduler ``scheduler.run``.
Simply activate components or microprocesses then call the runThreads() method
of the scheduler::
from Axon.Scheduler import scheduler
from MyComponents import MyComponent, AnotherComponent
c1 = MyComponent().activate()
c2 = MyComponent().activate()
c3 = AnotherComponent().activate()
scheduler.run.runThreads()
Alternatively you can create a specific scheduler instance, and activate
them using that specific scheduler::
mySched = scheduler()
c1 = MyComponent().activate(Scheduler=mySched)
c2 = MyComponent().activate(Scheduler=mySched)
c3 = AnotherComponent().activate(Scheduler=mySched)
mySched.runThreads()
The runThreads() method is the way of bootstrapping the scheduler. Being a
microprocess, it needs something to schedule it! The runThreads() method does
exactly that.
The activate() method is fully thread-safe. It can handle multiple simultaneous
callers from different threads to the one the scheduler is running in.
Pausing and Waking microprocesses
---------------------------------
The Scheduler supports the ability to, in a thread safe manner, pause and wake
individual microprocesses under its control. Because it is thread safe, any
thread of execution can issue pause and wake requests for any scheduled
microprocess.
The pauseThread() and wakeThread() methods submit requests to pause or wake
microprocesses. The scheduler will process these when it is next able to - the
requests are queued rather than processed immediately. This is done to ensure
thread safety. It can handle multiple simultaneous callers from different
threads to the one the scheduler is running in.
Pausing a microprocess means the scheduler removes it from its 'run queue'. This
means that it no longer executes that microprocess. Waking it puts it back into
the 'run queue'.
If no microprocesses are awake then the scheduler relinquishes cpu usage by
blocking.
If however this scheduler is itself being scheduled by another microprocess then
it does not block. Ideally it should ask its scheduler to pause it, but instead
it busy-waits - self pausing functionality is not yet implemented.
'yielding' new components for activation and replacement generators
-------------------------------------------------------------------
In general, the main() generator in a microprocess (its thread of execution)
can return any values it likes when it uses the ``yield`` statement. It is
recommended to not yield zeros or other kinds of 'false' value as these are
reserved for possible future special meaning.
However, this scheduler does understand certain values that can be yielded:
* **Axon.Ipc.newComponent** - a microprocess can yield this to ask the scheduler
to activate a new component or microprocess::
def main(self):
...
x=MyComponent()
yield Axon.Ipc.newComponent(x)
...
This is simply an alternative to calling x.activate().
* **Axon.Ipc.WaitComplete** - this is a way for a microprocess to substitute
itself (temporarily) with another one that uses a new generator.
For example::
def main(self):
...
yield Axon.Ipc.WaitComplete(self.waitOneSecond())
...
def waitOneSecond(self):
t=time.time()
while time.time() < t+1.0:
yield 1
This is a convenient way to modularise parts of your main() code. But there
is an important limitation with the current implementation:
* self.pause() will not cause the replacement generator to pause. (Where
'self' is the original microprocess - as in the example code above)
What happens when a microprocess finishes?
------------------------------------------
The scheduler will stop running it! It will call the microprocess's stop()
method. It will also call the _closeDownMicroprocess() method and will act on
the return value if it is one of the following:
* **Axon.Ipc.shutdownMicroprocess** - the specified microprocess will also be
stopped. Use with caution as the implementation is currently untested and
likely to fail, possibly even crash the scheduler!
* **Axon.Ipc.reactivate** - the specified microprocess will be (re)activated.
The scheduler uses this internally to pick up where it left off when a
Axon.Ipc.WaitComplete instigated detour finishes (see above).
Querying the scheduler (Introspection)
--------------------------------------
The listAllThreads() method returns a list of all activated microprocesses -
both paused and awake.
The isThreadPaused() method lets you determine if an individual microprocess is
paused. Note that the result returned by this method is conservative (the
default assumption is that a thread is probably awake). the result will vary
depending on the exact moment it is called!
Both these methods are thread safe.
Slowing down execution (for debugging)
--------------------------------------
It also has a slow motion mode designed to help with debugging & testing. Call
runThreads() with the slowmo argument set to the number of seconds the scheduler
should pause after each cycle of executing all microprocesses. For example, to
wait half a second after each cycle of execution::
scheduler.run.runThreads(slowmo=0.5)
How does it work internally?
----------------------------
The scheduler keeps the following internal state:
* **time** - updated to time.time() every execution cycle - can be inspected by
microprocesses instead of having to call time.time() themselves.
* **threads** - a dictionary containing the state of activated microprocesses
(whether they are awake or not)
* **wakeRequests** and **pauseRequests** - the thread safe queues of requests to
wake and pause individual microprocesses
* Internal to the main() generator:
* **runqueue** - the list of active and awake microprocesses being run
* **nextrunqueue** - the list of microprocesses to be run next time round
The scheduler uses a simple round robin approach - it walks through its run
queue and calls the next() method of each microprocess in turn. As it goes, it
builds a new run queue, ready for the next cycle. If a microprocess terminates
(raises a StopIteration exception) then it is not included in the next cycle's
run queue.
After it has gone through all microprocesses, the scheduler then processes
messages in its wakeRequests and sleepRequests queues. Sleep requests are
processed first; then wake requests second. Suppose there is a sleep and wake
request queued for the same microprocess; should it be left awake or put to
sleep? By processing wake requests last, the scheduler can err on the side of
caution and prefer to leave it awake.
Microprocesses are all in one of three possible states (recorded in the
``threads`` dictionary):
* **ACTIVE** - the microprocess is awake. It should be in the run queue being
prepared for the next execution cycle.
* **SLEEPING** - the microprocess is asleep/paused. It should *not* be in the
run queue for the next cycle.
* **GOINGTOSLEEP** - the microprocess has been requested to be put to sleep.
A request to put a microprocess to sleep is handled as follows:
* If the microprocess is already *sleeping*, then nothing needs to happen.
* If the microprocess is *active*, then it is changed to "going to sleep". It
is not removed from the run queue immediately. Instead, what happens is:
* on the next cycle of execution, as the scheduler goes through items in the
run queue, it doesn't execute any that are "going to sleep" and doesn't
include them in the next run queue it is building. It also sets them to the
"sleeping" state,
Wake requests are used to both wake up sleeping microprocesses and also to
activate new ones. A request to wake a microprocess is handled like this:
* If the microprocess is already *active*, then nothing needs to happen.
* If the microprocess is *sleeping* then it is added to the next run queue and
changed to be *active*.
* If the microprocess is *going to sleep* then it is only changed to be *active*
(it will already be in the run queue, so doesn't need to be added)
If the request contains a flag indicating that this is actually an activation
request, | |
#!/usr/bin/env python
# Author: <NAME>
# Copyright 2014
# Unclassified
import Constants
import os, sys
import pexpect
import random
import time
import botlog
import Utils
import Data
import MetaData
import Mentat
import Strategy
from bbot import *
import botlog
import string
import State
import re
import json
from datetime import datetime
MAXWAIT = 60
class App:
def get_data_dir(self):
data_dir = self.get_app_value("data_dir")
data_dir = os.path.expandvars(data_dir)
return data_dir
def get_tag_str(self, joiner='_'):
if self.has_app_value('id'):
a = self.get_app_value('id')
else:
a = self.get_app_value('address')
a = a.split('.')
a = a[0]
s = joiner.join([
str(a),
str(self.get_app_value('realm'))])
return s
def file_roll(self, suffix):
name = self.get_tag_str() + '_' + suffix + ".txt"
name = Utils.clean_string(name)
logfile = os.path.join(self.get_data_dir(), name)
if os.path.exists(logfile):
name = self.get_tag_str() + suffix + ".txt"
name = Utils.clean_string(name)
logdir = os.path.join(self.get_data_dir(), 'old')
if not os.path.exists(logdir):
os.makedirs(logdir)
moddate = Utils.get_file_date_string(logfile)
oldname = (self.get_tag_str() + '_' + moddate + '_'
+ suffix + ".txt")
oldname = Utils.clean_string(oldname)
oldlogfile = os.path.join(logdir, oldname)
os.rename(logfile, oldlogfile)
return logfile
class ArgsEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return Utils.date_to_string(obj)
return json.JSONEncoder.default(self, obj)
def write_options_dict(self):
argfile = self.file_roll("arg")
with open(argfile, 'w') as outfile:
json.dump(self.options, outfile, cls=App.ArgsEncoder, indent=3,
sort_keys=True)
def __init__(self, options, query_func, secret_query_func):
self.debug = False
self.options = options
# if reading options from file, dates will still be strings
for key, value in options.items():
if Utils.is_date(value):
options[key] = Utils.string_to_date(value)
self.query_func = query_func
self.secret_query_func = secret_query_func
self.data = Data.Data()
self.metadata = MetaData.MetaData(self)
self.mentat = Mentat.Mentat(self)
self.cur_state = None
self.match = None
self.match_index = None
self.match_re = None
self.wait_time = 0
self.telnet = None
self.strategies = None
self.EOF = False
self.adaptive_timeout = self.get_app_value('human_delay')
self.timeout_alpha = 0.1
self.min_timeout = self.adaptive_timeout
self.max_timeout = 20.0
self.no_email_reason = None
self.last_full_buf = None
self.last_buf = None
self.buf = ''
level = botlog.DEBUG
self.logfile = None
self.tracefile = self.file_roll('out')
debug = True
if not self.get_app_value('debug'):
level = botlog.INFO
self.logfile = self.file_roll('log')
debug = False
self.debug = debug
self.human_pause_ratio = self.get_app_value('human_pause_ratio')
botlog.configure(
msglevel=level,
format='\n%(asctime)s:%(levelname)s::%(message)s',
logpath=self.logfile,
tracepath=self.tracefile)
self.write_options_dict()
def has_app_value(self, key):
if key in self.options and self.options[key] is not None:
return True
environkey = Constants.ENVIRON_PREFIX + key
# otherwise check if value is in environment
if environkey in os.environ and os.environ[environkey] != '':
return True
def try_get_app_value(self, key, secret=False, default=None):
if not self.has_app_value(key):
botlog.debug("reading application value [default] " +
key + ' : ' + str(default))
return default
return self.get_app_value(key, secret)
def get_app_value(self, key, secret=False):
# Check if value is in options
if key in self.options and self.options[key] is not None:
if not secret:
botlog.debug(
'reading application value [options] ' + key + ' : ' + str(
self.options[key]))
return self.options[key]
environkey = Constants.ENVIRON_PREFIX + key
# otherwise check if value is in environment
if environkey in os.environ and os.environ[environkey] != '':
if not secret:
botlog.debug(
'reading application value [environment] ' + environkey + ' : ' +
os.environ[environkey])
return os.environ[environkey]
# otherwise call query function, or secretquery Func
if secret:
return self.secret_query_func('Please enter ' + key)
return self.query_func(key)
def get_close_float(self, x):
return random.uniform(x * 0.9, x * 1.1)
def read_until_full_buffer(self,timeout=-1):
buf = ""
max_iterations = 10
while buf == "" and max_iterations >= 0:
buf = self.read(timeout=timeout)
if buf == "":
raise Exception("Could not read full buffer")
return buf
def read_until(self, stop_text, timeout=-1, maxwait=MAXWAIT):
return self.read_until_any([stop_text], timeout=timeout,
maxwait=maxwait)
def read_until_any(self, stop_text_list, timeout=-1, maxwait=MAXWAIT):
stop_patterns = []
for t in stop_text_list:
stop_patterns.append(re.compile('.*' + re.escape(t) + '.*'))
while True:
b = self.read(timeout, stop_patterns=stop_patterns)
if self.match_re is not None:
return b
if self.wait_time > maxwait:
raise Exception("Could not read any of " + str(
stop_text_list) + " in " + str(maxwait) + " seconds")
def read(self, timeout=-1, stop_patterns=None):
txt = []
self.match = None
self.match_index = None
self.match_re = None
adaptive = False
if timeout < -0:
timeout = self.adaptive_timeout
adaptive = True
timeout = self.get_close_float(timeout)
botlog.debug(
"Reading with " + str(round(timeout, 1)) + " second timeout...")
while True:
#todo infinte guard
try:
x = self.telnet.read_nonblocking(size=1, timeout=timeout)
# print the read data in debug mode
if self.debug:
sys.stdout.write(x)
sys.stdout.flush()
txt.append(x)
if stop_patterns is not None:
buf = ''.join(txt)
lines = buf.splitlines()
line = lines[-1]
for i in range(len(stop_patterns)):
self.match = stop_patterns[i].match(line)
if self.match is not None:
self.match_index = i
self.match_re = stop_patterns[i]
break
if self.match is not None:
break
except pexpect.TIMEOUT:
break
except pexpect.EOF:
botlog.info("No more data can be read from telnet")
self.EOF = True
break
newbuf = ''.join(txt)
if len(newbuf) > 0:
self.wait_time = 0
if adaptive:
self.adaptive_timeout = (
1 - self.timeout_alpha / 2.0) * self.adaptive_timeout
if self.adaptive_timeout < self.min_timeout: self.adaptive_timeout = self.min_timeout
else:
self.wait_time = self.wait_time + timeout
if adaptive:
self.adaptive_timeout = (
1 + self.timeout_alpha * 2.0) * self.adaptive_timeout
if self.adaptive_timeout > self.max_timeout: self.adaptive_timeout = self.max_timeout
self.last_buf = self.buf
if len(self.last_buf) > 0:
self.last_full_buf = self.last_buf
self.buf = newbuf
return self.buf
def send(self, msg, eol=False, sleep=0.5, comment=''):
"""
Send a message to the client use some rudemantry random delay to
semi similate a human's typing
"""
msg = str(msg)
if msg is not None and len(msg) > 0:
botlog.info('Sending {' + msg + '} # ' + str(comment))
for c in msg:
if sleep > 0:
sleep = self.get_close_float(sleep)
time.sleep(sleep)
if 1 != self.telnet.send(c):
raise Exception("1 char not sent")
if eol:
botlog.info('Sending {\\r} # ' + str(comment))
if sleep > 0:
sleep = self.get_close_float(sleep)
time.sleep(sleep)
if 1 != self.telnet.send('\r'):
raise Exception("1 char not sent")
def sendl(self, msg='', sleep=0.5, comment=''):
self.send(msg, eol=True, sleep=sleep, comment=comment)
def send_seq(self, seq, comment=''):
botlog.debug("Begin Macro: " + str(seq))
for i in range(len(seq)):
msg = seq[i]
newcomment = comment + " (" + str(i + 1) + " of " + str(
len(seq)) + ")"
if msg == '\r':
self.sendl(comment=newcomment)
else:
self.send(msg, comment=newcomment)
# do not read after the last char in the sequence
if i < len(seq) - 1:
# the sequencing is problematic, because the programmer isn't
# explicitly waiting to be sure he is at a prompt, he is
# just sending seperate chars and blindly reading for some
# time in between. We try to be smart and read until we get
# something
while not self.EOF:
# do not allow sequence to manipulate the adaptive timing
# because it is a wierd case and can throw it off. we do
# however use the time as a good default timeout to use
b = self.read(timeout=self.adaptive_timeout)
if self.wait_time > MAXWAIT:
botlog.warn(
"Last full buffer:\n" + str(self.last_full_buf))
raise Exception(
"Waited for about " + str(MAXWAIT) + " seconds when sending macro and nothing happened")
if len(b) > 0:
break
botlog.debug("End Macro: " + str(seq))
return self.buf
def has_strategy(self, strategy_name):
for s in self.strategies:
if strategy_name == s.get_name():
return True
return False
def get_strategy(self, strategy_name):
for s in self.strategies:
if strategy_name == s.get_name():
return s
raise Exception("No strategy named: " + str(strategy_name))
def call_strategies(self, func_name):
ret = Strategy.UNHANDLED
for s in self.strategies:
botlog.cur_strat = s.get_name()
curret = eval("s." + func_name + "()")
if curret != Strategy.UNHANDLED:
ret = curret
botlog.cur_strat = ''
return ret
def on_main_menu(self):
return self.call_strategies("on_main_menu")
def on_bank_menu(self):
return self.call_strategies("on_bank_menu")
def on_attack_menu(self):
return self.call_strategies("on_attack_menu")
def on_spending_menu(self):
return self.call_strategies("on_spending_menu")
def on_industry_menu(self):
return self.call_strategies("on_industry_menu")
def on_trading_menu(self):
return self.call_strategies("on_trading_menu")
def on_diplomacy_menu(self):
return self.call_strategies("on_diplomacy_menu")
def on_interplanetary_menu(self):
return self.call_strategies("on_interplanetary_menu")
def init_strategies(self):
strats = []
if self.has_app_value('strategies'):
strats = self.get_app_value('strategies')
# if one string is provided, someitmes they can just give it to us as one string,
# convert it to a list to implify
if isinstance(strats, basestring):
if ',' in strats:
strats = [x.strip() for x in strats.split(',')]
else:
strats = [strats]
# shouldn't happen with cmd line checking
if strats is None:
raise Exception("No Strategies provided")
# union with the default strategy handlers
default = []
strats = list(set(strats) | set(default))
# compile the strategies into indicators sorted by priority
self.strategies = Strategy.Strategies(self, strats)
def run_loop(self):
exitState = State.BailOut().get_name()
while self.state.get_name() != exitState and not self.EOF:
if self.skip_next_read:
self.skip_next_read = False
else:
self.read()
if self.wait_time > MAXWAIT:
botlog.warn("Last full buffer:\n" + str(self.last_full_buf))
raise Exception(
"Waited for about " + str(
MAXWAIT) + " seconds in main | |
on the session_date sort key.
"""
dialogues = self.db.query(
KeyConditionExpression=Key("sender_id").eq(sender_id),
Limit=1,
ScanIndexForward=False,
)["Items"]
if not dialogues:
return None
events = dialogues[0].get("events", [])
# `float`s are stored as `Decimal` objects - we need to convert them back
events_with_floats = core_utils.replace_decimals_with_floats(events)
return DialogueStateTracker.from_dict(
sender_id, events_with_floats, self.domain.slots
)
def keys(self) -> Iterable[Text]:
"""Returns sender_ids of the `DynamoTrackerStore`."""
response = self.db.scan(ProjectionExpression="sender_id")
sender_ids = [i["sender_id"] for i in response["Items"]]
while response.get("LastEvaluatedKey"):
response = self.db.scan(
ProjectionExpression="sender_id",
ExclusiveStartKey=response["LastEvaluatedKey"],
)
sender_ids.extend([i["sender_id"] for i in response["Items"]])
return sender_ids
class MongoTrackerStore(TrackerStore):
"""Stores conversation history in Mongo.
Property methods:
conversations: returns the current conversation
"""
def __init__(
self,
domain: Domain,
host: Optional[Text] = "mongodb://localhost:27017",
db: Optional[Text] = "rasa",
username: Optional[Text] = None,
password: Optional[Text] = None,
auth_source: Optional[Text] = "admin",
collection: Optional[Text] = "conversations",
event_broker: Optional[EventBroker] = None,
**kwargs: Dict[Text, Any],
) -> None:
from pymongo.database import Database
from pymongo import MongoClient
self.client = MongoClient(
host,
username=username,
password=password,
authSource=auth_source,
# delay connect until process forking is done
connect=False,
)
self.db = Database(self.client, db)
self.collection = collection
super().__init__(domain, event_broker, **kwargs)
self._ensure_indices()
@property
def conversations(self) -> Collection:
"""Returns the current conversation."""
return self.db[self.collection]
def _ensure_indices(self) -> None:
"""Create an index on the sender_id."""
self.conversations.create_index("sender_id")
@staticmethod
def _current_tracker_state_without_events(tracker: DialogueStateTracker) -> Dict:
# get current tracker state and remove `events` key from state
# since events are pushed separately in the `update_one()` operation
state = tracker.current_state(EventVerbosity.ALL)
state.pop("events", None)
return state
def save(self, tracker: DialogueStateTracker) -> None:
"""Saves the current conversation state."""
if self.event_broker:
self.stream_events(tracker)
additional_events = self._additional_events(tracker)
self.conversations.update_one(
{"sender_id": tracker.sender_id},
{
"$set": self._current_tracker_state_without_events(tracker),
"$push": {
"events": {"$each": [e.as_dict() for e in additional_events]}
},
},
upsert=True,
)
def _additional_events(self, tracker: DialogueStateTracker) -> Iterator:
"""Return events from the tracker which aren't currently stored.
Args:
tracker: Tracker to inspect.
Returns:
List of serialised events that aren't currently stored.
"""
stored = self.conversations.find_one({"sender_id": tracker.sender_id}) or {}
all_events = self._events_from_serialized_tracker(stored)
number_events_since_last_session = len(
self._events_since_last_session_start(all_events)
)
return itertools.islice(
tracker.events, number_events_since_last_session, len(tracker.events)
)
@staticmethod
def _events_from_serialized_tracker(serialised: Dict) -> List[Dict]:
return serialised.get("events", [])
@staticmethod
def _events_since_last_session_start(events: List[Dict]) -> List[Dict]:
"""Retrieve events since and including the latest `SessionStart` event.
Args:
events: All events for a conversation ID.
Returns:
List of serialised events since and including the latest `SessionStarted`
event. Returns all events if no such event is found.
"""
events_after_session_start = []
for event in reversed(events):
events_after_session_start.append(event)
if event["event"] == SessionStarted.type_name:
break
return list(reversed(events_after_session_start))
def _retrieve(
self, sender_id: Text, fetch_events_from_all_sessions: bool
) -> Optional[List[Dict[Text, Any]]]:
stored = self.conversations.find_one({"sender_id": sender_id})
# look for conversations which have used an `int` sender_id in the past
# and update them.
if not stored and sender_id.isdigit():
from pymongo import ReturnDocument
stored = self.conversations.find_one_and_update(
{"sender_id": int(sender_id)},
{"$set": {"sender_id": str(sender_id)}},
return_document=ReturnDocument.AFTER,
)
if not stored:
return None
events = self._events_from_serialized_tracker(stored)
if not fetch_events_from_all_sessions:
events = self._events_since_last_session_start(events)
return events
def retrieve(self, sender_id: Text) -> Optional[DialogueStateTracker]:
"""Retrieves tracker for the latest conversation session."""
events = self._retrieve(sender_id, fetch_events_from_all_sessions=False)
if not events:
return None
return DialogueStateTracker.from_dict(sender_id, events, self.domain.slots)
def retrieve_full_tracker(
self, conversation_id: Text
) -> Optional[DialogueStateTracker]:
"""Fetching all tracker events across conversation sessions."""
events = self._retrieve(conversation_id, fetch_events_from_all_sessions=True)
if not events:
return None
return DialogueStateTracker.from_dict(
conversation_id, events, self.domain.slots
)
def keys(self) -> Iterable[Text]:
"""Returns sender_ids of the Mongo Tracker Store."""
return [c["sender_id"] for c in self.conversations.find()]
def _create_sequence(table_name: Text) -> "Sequence":
"""Creates a sequence object for a specific table name.
If using Oracle you will need to create a sequence in your database,
as described here: https://rasa.com/docs/rasa/tracker-stores#sqltrackerstore
Args:
table_name: The name of the table, which gets a Sequence assigned
Returns: A `Sequence` object
"""
from sqlalchemy.ext.declarative import declarative_base
sequence_name = f"{table_name}_seq"
Base = declarative_base()
return sa.Sequence(sequence_name, metadata=Base.metadata, optional=True)
def is_postgresql_url(url: Union[Text, "URL"]) -> bool:
"""Determine whether `url` configures a PostgreSQL connection.
Args:
url: SQL connection URL.
Returns:
`True` if `url` is a PostgreSQL connection URL.
"""
if isinstance(url, str):
return "postgresql" in url
return url.drivername == "postgresql"
def create_engine_kwargs(url: Union[Text, "URL"]) -> Dict[Text, Any]:
"""Get `sqlalchemy.create_engine()` kwargs.
Args:
url: SQL connection URL.
Returns:
kwargs to be passed into `sqlalchemy.create_engine()`.
"""
if not is_postgresql_url(url):
return {}
kwargs = {}
schema_name = os.environ.get(POSTGRESQL_SCHEMA)
if schema_name:
logger.debug(f"Using PostgreSQL schema '{schema_name}'.")
kwargs["connect_args"] = {"options": f"-csearch_path={schema_name}"}
# pool_size and max_overflow can be set to control the number of
# connections that are kept in the connection pool. Not available
# for SQLite, and only tested for PostgreSQL. See
# https://docs.sqlalchemy.org/en/13/core/pooling.html#sqlalchemy.pool.QueuePool
kwargs["pool_size"] = int(
os.environ.get(POSTGRESQL_POOL_SIZE, POSTGRESQL_DEFAULT_POOL_SIZE)
)
kwargs["max_overflow"] = int(
os.environ.get(POSTGRESQL_MAX_OVERFLOW, POSTGRESQL_DEFAULT_MAX_OVERFLOW)
)
return kwargs
def ensure_schema_exists(session: "Session") -> None:
"""Ensure that the requested PostgreSQL schema exists in the database.
Args:
session: Session used to inspect the database.
Raises:
`ValueError` if the requested schema does not exist.
"""
schema_name = os.environ.get(POSTGRESQL_SCHEMA)
if not schema_name:
return
engine = session.get_bind()
if is_postgresql_url(engine.url):
query = sa.exists(
sa.select([(sa.text("schema_name"))])
.select_from(sa.text("information_schema.schemata"))
.where(sa.text(f"schema_name = '{schema_name}'"))
)
if not session.query(query).scalar():
raise ValueError(schema_name)
class SQLTrackerStore(TrackerStore):
"""Store which can save and retrieve trackers from an SQL database."""
Base: DeclarativeMeta = declarative_base()
class SQLEvent(Base):
"""Represents an event in the SQL Tracker Store."""
__tablename__ = "events"
# `create_sequence` is needed to create a sequence for databases that
# don't autoincrement Integer primary keys (e.g. Oracle)
id = sa.Column(sa.Integer, _create_sequence(__tablename__), primary_key=True)
sender_id = sa.Column(sa.String(255), nullable=False, index=True)
type_name = sa.Column(sa.String(255), nullable=False)
timestamp = sa.Column(sa.Float)
intent_name = sa.Column(sa.String(255))
action_name = sa.Column(sa.String(255))
data = sa.Column(sa.Text)
def __init__(
self,
domain: Optional[Domain] = None,
dialect: Text = "sqlite",
host: Optional[Text] = None,
port: Optional[int] = None,
db: Text = "rasa.db",
username: Text = None,
password: Text = None,
event_broker: Optional[EventBroker] = None,
login_db: Optional[Text] = None,
query: Optional[Dict] = None,
**kwargs: Dict[Text, Any],
) -> None:
import sqlalchemy.exc
engine_url = self.get_db_url(
dialect, host, port, db, username, password, login_db, query
)
self.engine = sa.create_engine(engine_url, **create_engine_kwargs(engine_url))
logger.debug(
f"Attempting to connect to database via '{repr(self.engine.url)}'."
)
# Database might take a while to come up
while True:
try:
# if `login_db` has been provided, use current channel with
# that database to create working database `db`
if login_db:
self._create_database_and_update_engine(db, engine_url)
try:
self.Base.metadata.create_all(self.engine)
except (
sqlalchemy.exc.OperationalError,
sqlalchemy.exc.ProgrammingError,
) as e:
# Several Rasa services started in parallel may attempt to
# create tables at the same time. That is okay so long as
# the first services finishes the table creation.
logger.error(f"Could not create tables: {e}")
self.sessionmaker = sa.orm.session.sessionmaker(bind=self.engine)
break
except (
sqlalchemy.exc.OperationalError,
sqlalchemy.exc.IntegrityError,
) as error:
logger.warning(error)
sleep(5)
logger.debug(f"Connection to SQL database '{db}' successful.")
super().__init__(domain, event_broker, **kwargs)
@staticmethod
def get_db_url(
dialect: Text = "sqlite",
host: Optional[Text] = None,
port: Optional[int] = None,
db: Text = "rasa.db",
username: Text = None,
password: Text = None,
login_db: Optional[Text] = None,
query: Optional[Dict] = None,
) -> Union[Text, "URL"]:
"""Build an SQLAlchemy `URL` object representing the parameters needed
to connect to an SQL database.
Args:
dialect: SQL database type.
host: Database network host.
port: Database network port.
db: Database name.
username: User name to use when connecting to the database.
password: <PASSWORD>.
login_db: Alternative database name to which initially connect, and create
the database specified by `db` (PostgreSQL only).
query: Dictionary of options to be passed to the dialect and/or the
DBAPI upon connect.
Returns:
URL ready to be used with an SQLAlchemy `Engine` object.
"""
from urllib import parse
# Users might specify a url in the host
if host and "://" in host:
# assumes this is a complete database host name including
# e.g. `postgres://...`
return host
elif host:
# add fake scheme to properly parse components
parsed = parse.urlsplit(f"scheme://{host}")
# users might include the port in the url
port = parsed.port or port
host = parsed.hostname or host
return sa.engine.url.URL(
dialect,
username,
password,
host,
port,
database=login_db if login_db else db,
query=query,
)
def _create_database_and_update_engine(self, db: Text, engine_url: "URL") -> None:
"""Creates database `db` and updates engine accordingly."""
from sqlalchemy import create_engine
if not self.engine.dialect.name == "postgresql":
rasa.shared.utils.io.raise_warning(
"The parameter 'login_db' can only be used with a postgres database."
)
return
self._create_database(self.engine, db)
self.engine.dispose()
engine_url = sa.engine.url.URL(
drivername=engine_url.drivername,
username=engine_url.username,
password=engine_url.password,
host=engine_url.host,
port=engine_url.port,
database=db,
query=engine_url.query,
)
self.engine = create_engine(engine_url)
@staticmethod
| |
<gh_stars>0
"""
Definitions of standard library functions and methods, and their
conversions into Rust
"""
import sys
import ast
from var_utils import is_iterator_type, is_reference_type, \
dict_type_from_list, strip_container, detemplatise, \
extract_types, UNKNOWN_TYPE
ALLOWED_COMPARISON_OPERATORS = { "Eq", "NotEq", "Lt", "LtE", "Gt", "GtE" }
REPLACE_CONSTANTS = {
True : "true",
False : "false",
}
# Fortunately, the precedence of Python operators is the same as Rust,
# except for ** (doesn't exist in Rust), is/in (don't exist in Rust)
# "not", which is very highest precedence in Rust, but just above the
# other boolean operators in Rust.
OPERATOR_PRECEDENCE = {
"Pow": 12,
"UAdd": 11, "USub": 11, "Invert": 11, "Not": 11,
"Mult": 10, "Div": 10, "FloorDiv": 10, "Mod": 10,
"Add": 9, "Sub": 9,
"LShift": 8, "RShift": 8,
"BitAnd": 7,
"BitXor": 6,
"BitOr": 5,
"Eq": 4, "NotEq": 4, "Gt": 4, "GtE": 4, "Lt": 4, "LtE": 4,
# "Not": 3, (this would be right for Python, but not for Rust)
"And": 2,
"Or": 1,
}
# One bigger than any actual precedence. Use this to force parentheses
MAX_PRECEDENCE = 13
STANDARD_METHOD_RETURNS = {
("HashMap<_>", "keys"): lambda types: f"[{types[0]}]",
("HashMap<_>", "values"): lambda types: f"[{types[1]}]",
("HashMap<_>", "items"): lambda types: f"[({types[0]}, {types[1]})]",
("HashMap<_>", "get"): lambda types: f"&{types[1]}",
("HashMap<_>", "clear"): lambda types: "()",
("HashMap<_>", "update"): lambda types: "()",
("HashMap<_>", "pop"): lambda types: types[1],
("HashMap<_>", "popitem"): lambda types: f"({types[0]}, {types[1]})",
("HashMap<_>", "setdefault"): lambda types: f"&{types[1]}",
("HashSet<_>", "add"): lambda types: "()",
("HashSet<_>", "clear"): lambda types: "()",
("HashSet<_>", "copy"): lambda types: f"HashSet<{types[0]}>",
("HashSet<_>", "difference"): lambda types: f"HashSet<{types[0]}>",
("HashSet<_>", "difference_update"): lambda types: "()",
("HashSet<_>", "discard"): lambda types: "()",
("HashSet<_>", "intersection"): lambda types: f"HashSet<{types[0]}>",
("HashSet<_>", "intersection_update"): lambda types: "()",
("HashSet<_>", "isdisjoint"): lambda types: "bool",
("HashSet<_>", "issubset"): lambda types: "bool",
("HashSet<_>", "issuperset"): lambda types: "bool",
("HashSet<_>", "remove"): lambda types: "()",
("HashSet<_>", "symmetric_difference"): lambda types: f"HashSet<{types[0]}>",
("HashSet<_>", "symmetric_difference_update"): lambda types: "()",
("HashSet<_>", "union"): lambda types: f"HashSet<{types[0]}>",
("HashSet<_>", "union_update"): lambda types: "()",
("Vec<_>", "append"): lambda types: "()",
("Vec<_>", "insert"): lambda types: "()",
("Vec<_>", "extend"): lambda types: "()",
("Vec<_>", "index"): lambda types: "i64",
("Vec<_>", "sum"): lambda types: types[0],
("Vec<_>", "count"): lambda types: "i64",
("Vec<_>", "min"): lambda types: types[0],
("Vec<_>", "max"): lambda types: types[0],
("Vec<_>", "reverse"): lambda types: "()",
("Vec<_>", "sort"): lambda types: "()",
("Vec<_>", "pop"): lambda types: types[0],
}
STANDARD_METHODS = {
("HashMap<_>", "get") : lambda v, n: handle_get_or_default("get", v, n, True),
("HashMap<_>", "items"): lambda v, n: handle_items(v, n),
("HashMap<_>", "pop") : lambda v, n: handle_get_or_default("remove", v, n, False),
("HashMap<_>", "popitem"): lambda v, n: handle_popitem(v, n),
("HashMap<_>", "setdefault"): lambda v, n: handle_set_default(v, n),
("HashMap<_>", "update"): lambda v, n: handle_update(v, n),
("HashSet<_>", "add") : lambda v, n: handle_method("insert", v, n),
("HashSet<_>", "clear"): lambda v, n: handle_method("clear", v, n),
("HashSet<_>", "copy"): lambda v, n: handle_method("clone", v, n),
("HashSet<_>", "difference"): lambda v, n: handle_collect("difference", v, n),
("HashSet<_>", "difference_update"): lambda v, n: handle_todo("difference_update", v, n),
("HashSet<_>", "discard"): lambda v, n: handle_refargs("remove", v, n),
("HashSet<_>", "intersection"): lambda v, n: handle_collect("intersection", v, n),
("HashSet<_>", "intersection_update"): lambda v, n: handle_todo("intersection_update", v, n),
("HashSet<_>", "isdisjoint"): lambda v, n: handle_refargs("is_disjoint", v, n),
("HashSet<_>", "issubset"): lambda v, n: handle_refargs("is_subset", v, n),
("HashSet<_>", "issuperset"): lambda v, n: handle_refargs("is_superset", v, n),
("HashSet<_>", "remove"): lambda v, n: handle_refargs("remove", v, n),
("HashSet<_>", "symmetric_difference"): lambda v, n: handle_collect("symmetric_difference", v, n),
("HashSet<_>", "symmetric_difference_update"): lambda v, n: handle_todo("symmetric_difference_update", v, n),
("HashSet<_>", "union"): lambda v, n: handle_collect("union", v, n),
("HashSet<_>", "union_update"): lambda v, n: handle_method("union_update", v, n),
("Vec<_>", "append") : lambda v, n: handle_method("push", v, n),
("Vec<_>", "insert"): lambda v, n: handle_method("insert", v, n),
("Vec<_>", "extend"): lambda v, n: handle_method("extend", v, n),
("Vec<_>", "index"): lambda v, n: handle_index(v, n),
("Vec<_>", "sum"): lambda v, n: handle_sum(v, n),
("Vec<_>", "count"): lambda v, n: handle_count(v, n),
("Vec<_>", "min"): lambda v, n: handle_iter_method_unwrapped("min", v, n),
("Vec<_>", "max"): lambda v, n: handle_iter_method_unwrapped("max", v, n),
("Vec<_>", "reverse"): lambda v, n: handle_method("reverse", v, n),
("Vec<_>", "sort"): lambda v, n: handle_method("sort", v, n),
("Vec<_>", "pop"): lambda v, n: handle_method_unwrapped("pop", v, n),
}
# Mapping from Python function name to Rust return type
STANDARD_FUNCTION_RETURNS = {
"dict": lambda args: dict_type_from_list(args[0]),
"print": lambda args: "()",
"range": lambda args: f"[{args[0]}]",
"zip": lambda args: f"[({', '.join([ strip_container(x) for x in args ])})]",
"len": lambda args: "i64",
}
STANDARD_FUNCTIONS = {
"dict": lambda visitor, node: handle_dict(visitor, node),
"len": lambda visitor, node: handle_len(visitor, node),
"print": lambda visitor, node: handle_print(visitor, node),
"range": lambda visitor, node: handle_range(visitor, node),
"zip": lambda visitor, node: handle_zip(visitor, node),
}
def method_return_type(class_type: str, method_name: str) -> str:
"""
Given the name of a class and a method on the class, return
the return type of the method.
"""
method = (detemplatise(class_type), method_name)
if method not in STANDARD_METHOD_RETURNS:
return UNKNOWN_TYPE
types = extract_types(class_type)
return STANDARD_METHOD_RETURNS[method](types)
def handle_method(method_name: str, visitor, node):
"""
Handle a method that takes args that may need a to_string, such as push
"""
print(f".{method_name}(", end='')
separator = ""
for arg in node.args:
print(separator, end='')
visitor.visit_and_optionally_convert(arg)
separator = ", "
print(")", end='')
def handle_todo(method_name: str, visitor, node):
"""
Handle a method that is not currently supported, for example
because there is no equivalent in Rust.
Replace it with the "clear" method, which at least ensures
the Rust clean compiles and does not return unwanted data.
"""
print(f"Warning: there is no Rust equivalent of {method_name}", file=sys.stderr)
print(".clear();")
print(f"{visitor.pretty()}// TODO {method_name}(", end='')
separator = ""
for arg in node.args:
print(separator, end='')
visitor.visit_and_optionally_convert(arg)
separator = ", "
print(")", end='')
def handle_method_unwrapped(method_name: str, visitor, node):
handle_method(method_name, visitor, node)
print(".unwrap()", end='')
def handle_iter_method(method_name: str, visitor, node):
print_iter_if_needed(visitor.type_by_node[node.func])
handle_method(method_name, visitor, node)
def handle_iter_method_unwrapped(method_name: str, visitor, node):
handle_iter_method(method_name, visitor, node)
print(".unwrap()", end='')
def handle_refargs(method_name: str, visitor, node):
"""
Handle a method that takes reference args, such as insert
"""
print(f".{method_name}(", end='')
separator = ""
for arg in node.args:
print(separator, end='')
add_reference_if_needed(visitor.type_by_node[arg])
visitor.visit(arg)
separator = ", "
print(")", end='')
def handle_collect(method_name: str, visitor, node):
"""
Handle a method that takes reference args and returns an
iterator that must be collected, such as intersection.
"""
print(f".{method_name}(", end='')
separator = ""
for arg in node.args:
print(separator, end='')
add_reference_if_needed(visitor.type_by_node[arg])
visitor.visit_and_optionally_convert(arg)
separator = ", "
typed = visitor.type_by_node[node.func]
print(f").cloned().collect::<{typed}>()", end='')
def handle_get_or_default(method_name: str, visitor, node, returns_ref: bool):
"""
Handle a method on a Map that returns either a value from
the map or a default value.
"""
print(f".{method_name}(", end='')
add_reference_if_needed(visitor.type_by_node[node.args[0]])
visitor.visit(node.args[0])
print(").unwrap_or(", end='')
if returns_ref:
# note we should always add a reference (&) as
# visit_and_optionally_convert always converts a reference
print("&", end='')
visitor.visit_and_optionally_convert(node.args[1])
print(")", end='')
def handle_set_default(visitor, node):
"""
In Python, set_default returns the value associated with the
given key if it is in the dictionary. Otherwise it adds the
given default value to the dictionary and returns that.
In Rust, the entry() method returns an optional value, and
or_insert optionally inserts its argument. This does the
same as Python, though more flexibly.
"""
print(".entry(", end='')
add_reference_if_needed(visitor.type_by_node[node.args[0]])
visitor.visit_and_optionally_convert(node.args[0])
print(").or_insert(", end='')
visitor.visit_and_optionally_convert(node.args[1])
print(")", end='')
def add_reference_if_needed(typed: str):
"""
Adds a & character to make a reference if needed.
"""
if not is_reference_type(typed):
print("&", end='')
def print_iter_if_needed(typed: str):
"""
If the given type is not already an iterator, invoke
.iter() to make one
"""
if not is_iterator_type(typed):
print(".iter()", end='')
def handle_items(visitor, node):
"""
Returns an iterator to a (key, value) pair. In Rust this is tricky
because iter() returns an iterator to (&key, &value) so we need
to convert this.
This is an example of a place where Rust is really hard to handle
because of its rules about borrowing, and the lack of overloaded
functions.
"""
print(".iter().map(|(ref k, ref v)| ((*k).clone(), (*v).clone()))", end='')
def handle_popitem(visitor, node):
"""
In Python returns some arbitrary (key, value) pair, which is removed.
Rust has a similar remove_entry, but this requires a key. We use
drain, which returns an iterator, and just take the first entry.
If the iterator is exhausted, in other words there are no more elements,
Rust like Python just panics. (Why is this sensible behaviour?)
"""
print(".drain().next().unwrap()", end='')
def handle_update(visitor, node):
"""
In Python, update takes an iterator yielding (key, value) pairs
or a dictionary, and adds them all to self. The equivalent in
Rust is extend.
"""
print(".extend(", end='')
visitor.visit(node.args[0])
print_iter_if_needed(visitor.type_by_node[node.args[0]])
print(")", end='')
def handle_count(visitor, node):
"""
In Python, the count method counts the number of | |
<reponame>profx5/naz<gh_stars>0
import typing
# TODO: try and turn these classes to enum
class SmppSessionState:
"""
Represensts the states in which an SMPP session can be in.
"""
# see section 2.2 of SMPP spec document v3.4
# we are ignoring the other states since we are only concerning ourselves with an ESME in Transceiver mode.
# An ESME has established a network connection to the SMSC but has not yet issued a Bind request.
OPEN: str = "OPEN"
# A connected ESME has requested to bind as an ESME Transceiver (by issuing a bind_transceiver PDU)
# and has received a response from the SMSC authorising its Bind request.
BOUND_TRX: str = "BOUND_TRX"
# An ESME has unbound from the SMSC and has closed the network connection. The SMSC may also unbind from the ESME.
CLOSED: str = "CLOSED"
class SmppCommand:
"""
Represensts the various SMPP commands.
"""
# see section 4 of SMPP spec document v3.4
BIND_TRANSCEIVER: str = "bind_transceiver"
BIND_TRANSCEIVER_RESP: str = "bind_transceiver_resp"
BIND_TRANSMITTER: str = "bind_transmitter"
BIND_RECEIVER: str = "bind_receiver"
UNBIND: str = "unbind"
UNBIND_RESP: str = "unbind_resp"
SUBMIT_SM: str = "submit_sm"
SUBMIT_SM_RESP: str = "submit_sm_resp"
DELIVER_SM: str = "deliver_sm"
DELIVER_SM_RESP: str = "deliver_sm_resp"
ENQUIRE_LINK: str = "enquire_link"
ENQUIRE_LINK_RESP: str = "enquire_link_resp"
GENERIC_NACK: str = "generic_nack"
# naz currently does not handle the following smpp commands.
# open a github issue if you use naz and require support of a command in this list
BIND_RECEIVER_RESP: str = "bind_receiver_resp"
BIND_TRANSMITTER_RESP: str = "bind_transmitter_resp"
QUERY_SM: str = "query_sm"
QUERY_SM_RESP: str = "query_sm_resp"
REPLACE_SM: str = "replace_sm"
REPLACE_SM_RESP: str = "replace_sm_resp"
CANCEL_SM: str = "cancel_sm"
CANCEL_SM_RESP: str = "cancel_sm_resp"
SUBMIT_MULTI: str = "submit_multi"
SUBMIT_MULTI_RESP: str = "submit_multi_resp"
OUTBIND: str = "outbind"
ALERT_NOTIFICATION: str = "alert_notification"
DATA_SM: str = "data_sm"
DATA_SM_RESP: str = "data_sm_resp"
RESERVED_A: str = "reserved_a"
RESERVED_B: str = "reserved_b"
RESERVED_C: str = "reserved_c"
RESERVED_D: str = "reserved_d"
RESERVED_E: str = "reserved_e"
RESERVED_F: str = "reserved_f"
RESERVED_G: str = "reserved_g"
RESERVED_LIST_A: str = "reserved_list_a"
RESERVED_LIST_B: str = "reserved_list_b"
RESERVED_LIST_C: str = "reserved_list_c"
RESERVED_LIST_D: str = "reserved_list_d"
RESERVED_LIST_E: str = "reserved_list_e"
RESERVED_LIST_F: str = "reserved_list_f"
RESERVED_LIST_G: str = "reserved_list_g"
RESERVED_LIST_H: str = "reserved_list_h"
RESERVED_LIST_I: str = "reserved_list_i"
RESERVED_FOR_SMPP_EXTENSION_A: str = "reserved_for_smpp_extension_a"
RESERVED_FOR_SMPP_EXTENSION_B: str = "reserved_for_smpp_extension_b"
RESERVED_FOR_SMSC_VENDOR_A: str = "reserved_for_smsc_vendor_a"
RESERVED_FOR_SMSC_VENDOR_B: str = "reserved_for_smsc_vendor_b"
class CommandStatus(typing.NamedTuple):
"""
An SMPP command status
"""
code: str
value: typing.Union[int, typing.List[int]]
description: str
class SmppCommandStatus:
"""
Represensts the various SMPP commands statuses.
"""
# see section 5.1.3 of smpp ver 3.4 spec document
ESME_ROK: CommandStatus = CommandStatus(
code="ESME_ROK", value=0x00000000, description="Success"
)
ESME_RINVMSGLEN: CommandStatus = CommandStatus(
code="ESME_RINVMSGLEN", value=0x00000001, description="Message Length is invalid"
)
ESME_RINVCMDLEN: CommandStatus = CommandStatus(
code="ESME_RINVCMDLEN", value=0x00000002, description="Command Length is invalid"
)
ESME_RINVCMDID: CommandStatus = CommandStatus(
code="ESME_RINVCMDID", value=0x00000003, description="Invalid Command ID"
)
ESME_RINVBNDSTS: CommandStatus = CommandStatus(
code="ESME_RINVBNDSTS",
value=0x00000004,
description="Incorrect BIND Status for given command",
)
ESME_RALYBND: CommandStatus = CommandStatus(
code="ESME_RALYBND", value=0x00000005, description="ESME Already in Bound State"
)
ESME_RINVPRTFLG: CommandStatus = CommandStatus(
code="ESME_RINVPRTFLG", value=0x00000006, description="Invalid Priority Flag"
)
ESME_RINVREGDLVFLG: CommandStatus = CommandStatus(
code="ESME_RINVREGDLVFLG", value=0x00000007, description="Invalid Registered Delivery Flag"
)
ESME_RSYSERR: CommandStatus = CommandStatus(
code="ESME_RSYSERR", value=0x00000008, description="System Error"
)
ESME_RINVSRCADR: CommandStatus = CommandStatus(
code="ESME_RINVSRCADR", value=0x0000000A, description="Invalid Source Address"
)
ESME_RINVDSTADR: CommandStatus = CommandStatus(
code="ESME_RINVDSTADR", value=0x0000000B, description="Invalid Dest Addr"
)
ESME_RINVMSGID: CommandStatus = CommandStatus(
code="ESME_RINVMSGID", value=0x0000000C, description="Message ID is invalid"
)
ESME_RBINDFAIL: CommandStatus = CommandStatus(
code="ESME_RBINDFAIL", value=0x0000000D, description="Bind Failed"
)
ESME_RINVPASWD: CommandStatus = CommandStatus(
code="ESME_RINVPASWD", value=0x0000000E, description="Invalid Password"
)
ESME_RINVSYSID: CommandStatus = CommandStatus(
code="ESME_RINVSYSID", value=0x0000000F, description="Invalid System ID"
)
ESME_RCANCELFAIL: CommandStatus = CommandStatus(
code="ESME_RCANCELFAIL", value=0x00000011, description="Cancel SM Failed"
)
ESME_RREPLACEFAIL: CommandStatus = CommandStatus(
code="ESME_RREPLACEFAIL", value=0x00000013, description="Replace SM Failed"
)
ESME_RMSGQFUL: CommandStatus = CommandStatus(
code="ESME_RMSGQFUL", value=0x00000014, description="Message Broker Full"
)
ESME_RINVSERTYP: CommandStatus = CommandStatus(
code="ESME_RINVSERTYP", value=0x00000015, description="Invalid Service Type"
)
ESME_RINVNUMDESTS: CommandStatus = CommandStatus(
code="ESME_RINVNUMDESTS", value=0x00000033, description="Invalid number of destinations"
)
ESME_RINVDLNAME: CommandStatus = CommandStatus(
code="ESME_RINVNUMDESTS", value=0x00000034, description="Invalid Distribution List name"
)
ESME_RINVDESTFLAG: CommandStatus = CommandStatus(
code="ESME_RINVDESTFLAG",
value=0x00000040,
description="Destination flag is invalid (submit_multi)",
)
ESME_RINVSUBREP: CommandStatus = CommandStatus(
code="ESME_RINVSUBREP",
value=0x00000042,
description="Invalid (submit with replace) request(i.e. submit_sm with replace_if_present_flag set)",
)
ESME_RINVESMCLASS: CommandStatus = CommandStatus(
code="ESME_RINVESMCLASS", value=0x00000043, description="Invalid esm_class field data"
)
ESME_RCNTSUBDL: CommandStatus = CommandStatus(
code="ESME_RCNTSUBDL", value=0x00000044, description="Cannot Submit to Distribution List"
)
ESME_RSUBMITFAIL: CommandStatus = CommandStatus(
code="ESME_RSUBMITFAIL", value=0x00000045, description="Submit_sm or submit_multi failed"
)
ESME_RINVSRCTON: CommandStatus = CommandStatus(
code="ESME_RINVSRCTON", value=0x00000048, description="Invalid Source address TON"
)
ESME_RINVSRCNPI: CommandStatus = CommandStatus(
code="ESME_RINVSRCNPI", value=0x00000049, description="Invalid Source address NPI"
)
ESME_RINVDSTTON: CommandStatus = CommandStatus(
code="ESME_RINVDSTTON", value=0x00000050, description="Invalid Destination address TON"
)
ESME_RINVDSTNPI: CommandStatus = CommandStatus(
code="ESME_RINVDSTNPI", value=0x00000051, description="Invalid Destination address NPI"
)
ESME_RINVSYSTYP: CommandStatus = CommandStatus(
code="ESME_RINVSYSTYP", value=0x00000053, description="Invalid system_type field"
)
ESME_RINVREPFLAG: CommandStatus = CommandStatus(
code="ESME_RINVREPFLAG", value=0x00000054, description="Invalid replace_if_present flag"
)
ESME_RINVNUMMSGS: CommandStatus = CommandStatus(
code="ESME_RINVNUMMSGS", value=0x00000055, description="Invalid number of messages"
)
ESME_RTHROTTLED: CommandStatus = CommandStatus(
code="ESME_RTHROTTLED",
value=0x00000058,
description="Throttling error (ESME has exceeded allowed message limits)",
)
ESME_RINVSCHED: CommandStatus = CommandStatus(
code="ESME_RINVSCHED", value=0x00000061, description="Invalid Scheduled Delivery Time"
)
ESME_RINVEXPIRY: CommandStatus = CommandStatus(
code="ESME_RINVEXPIRY",
value=0x00000062,
description="Invalid message validity period (Expiry time)",
)
ESME_RINVDFTMSGID: CommandStatus = CommandStatus(
code="ESME_RINVDFTMSGID",
value=0x00000063,
description="Predefined Message Invalid or Not Found",
)
ESME_RX_T_APPN: CommandStatus = CommandStatus(
code="ESME_RX_T_APPN",
value=0x00000064,
description="ESME Receiver Temporary App Error Code",
)
ESME_RX_P_APPN: CommandStatus = CommandStatus(
code="ESME_RX_P_APPN",
value=0x00000065,
description="ESME Receiver Permanent App Error Code",
)
ESME_RX_R_APPN: CommandStatus = CommandStatus(
code="ESME_RX_R_APPN",
value=0x00000066,
description="ESME Receiver Reject Message Error Code",
)
ESME_RQUERYFAIL: CommandStatus = CommandStatus(
code="ESME_RQUERYFAIL", value=0x00000067, description="query_sm request failed"
)
ESME_RINVOPTPARSTREAM: CommandStatus = CommandStatus(
code="ESME_RINVOPTPARSTREAM",
value=0x000000C0,
description="Error in the optional part of the PDU Body.",
)
ESME_ROPTPARNOTALLWD: CommandStatus = CommandStatus(
code="ESME_ROPTPARNOTALLWD", value=0x000000C1, description="Optional Parameter not allowed"
)
ESME_RINVPARLEN: CommandStatus = CommandStatus(
code="ESME_RINVPARLEN", value=0x000000C2, description="Invalid Parameter Length."
)
ESME_RMISSINGOPTPARAM: CommandStatus = CommandStatus(
code="ESME_RMISSINGOPTPARAM",
value=0x000000C3,
description="Expected Optional Parameter missing",
)
ESME_RINVOPTPARAMVAL: CommandStatus = CommandStatus(
code="ESME_RINVOPTPARAMVAL",
value=0x000000C4,
description="Invalid Optional Parameter Value",
)
ESME_RDELIVERYFAILURE: CommandStatus = CommandStatus(
code="ESME_RDELIVERYFAILURE",
value=0x000000FE,
description="Delivery Failure (used for data_sm_resp)",
)
ESME_RUNKNOWNERR: CommandStatus = CommandStatus(
code="ESME_RUNKNOWNERR", value=0x000000FF, description="Unknown Error"
)
RESERVED_A: CommandStatus = CommandStatus(
code="Reserved", value=0x00000009, description="Reserved"
)
RESERVED_B: CommandStatus = CommandStatus(
code="Reserved", value=0x00000010, description="Reserved"
)
RESERVED_C: CommandStatus = CommandStatus(
code="Reserved", value=0x00000012, description="Reserved"
)
RESERVED_D: CommandStatus = CommandStatus(
code="Reserved", value=0x00000041, description="Reserved"
)
RESERVED_E: CommandStatus = CommandStatus(
code="Reserved", value=0x00000052, description="Reserved"
)
RESERVED_LIST_A: CommandStatus = CommandStatus(
code="Reserved", value=[0x00000016, 0x00000032], description="Reserved"
)
RESERVED_LIST_B: CommandStatus = CommandStatus(
code="Reserved", value=[0x00000035, 0x0000003F], description="Reserved"
)
RESERVED_LIST_C: CommandStatus = CommandStatus(
code="Reserved", value=[0x00000046, 0x00000047], description="Reserved"
)
RESERVED_LIST_D: CommandStatus = CommandStatus(
code="Reserved", value=[0x00000056, 0x00000057], description="Reserved"
)
RESERVED_LIST_E: CommandStatus = CommandStatus(
code="Reserved", value=[0x00000059, 0x00000060], description="Reserved"
)
RESERVED_LIST_F: CommandStatus = CommandStatus(
code="Reserved", value=[0x00000068, 0x000000BF], description="Reserved"
)
RESERVED_LIST_G: CommandStatus = CommandStatus(
code="Reserved", value=[0x000000C5, 0x000000FD], description="Reserved"
)
RESERVED_LIST_H: CommandStatus = CommandStatus(
code="Reserved", value=[0x00000100, 0x000003FF], description="Reserved for SMPP extension"
)
RESERVED_LIST_I: CommandStatus = CommandStatus(
code="Reserved",
value=[0x00000400, 0x000004FF],
description="Reserved for SMSC vendor specific errors",
)
RESERVED_LIST_J: CommandStatus = CommandStatus(
code="Reserved", value=[0x00000500, 0xFFFFFFFF], description="Reserved"
)
class DataCoding(typing.NamedTuple):
"""
An SMPP data encoding.
"""
code: str
value: int
description: str
class SmppDataCoding:
"""
Represensts the various SMPP data encodings.
"""
# see section 5.2.19 of smpp ver 3.4 spec document.
# also see:
# 1. https://github.com/praekelt/vumi/blob/767eac623c81cc4b2e6ea9fbd6a3645f121ef0aa/vumi/transports/smpp/processors/default.py#L260
# 2. https://docs.python.org/3/library/codecs.html
# 3. https://docs.python.org/3/library/codecs.html#standard-encodings
# The attributes of this class are equivalent to some of the names found in the python standard-encodings documentation
# ie; https://docs.python.org/3/library/codecs.html#standard-encodings
gsm0338: DataCoding = DataCoding(
code="gsm0338", value=0b00000000, description="SMSC Default Alphabet"
)
ascii: DataCoding = DataCoding(
code="ascii", value=0b00000001, description="IA5(CCITT T.50) / ASCII(ANSI X3.4)"
)
octet_unspecified_I: DataCoding = DataCoding(
code="octet_unspecified_I",
value=0b00000010,
description="Octet unspecified(8 - bit binary)",
)
latin_1: DataCoding = DataCoding(
code="latin_1", value=0b00000011, description="Latin 1 (ISO - 8859 - 1)"
)
octet_unspecified_II: DataCoding = DataCoding(
code="octet_unspecified_II",
value=0b00000100,
description="Octet unspecified(8 - bit binary)",
)
# iso2022_jp, iso2022jp and iso-2022-jp are aliases
# see: https://stackoverflow.com/a/43240579/2768067
iso2022_jp: DataCoding = DataCoding(
code="iso2022_jp", value=0b00000101, description="JIS(X 0208 - 1990)"
)
iso8859_5: DataCoding = DataCoding(
code="iso8859_5", value=0b00000110, description="Cyrllic(ISO - 8859 - 5)"
)
iso8859_8: DataCoding = DataCoding(
code="iso8859_8", value=0b00000111, description="Latin / Hebrew(ISO - 8859 - 8)"
)
# see: https://stackoverflow.com/a/14488478/2768067
utf_16_be: DataCoding = DataCoding(
code="utf_16_be", value=0b00001000, description="UCS2(ISO / IEC - 10646)"
)
ucs2: DataCoding = DataCoding(
code="ucs2", value=0b00001000, description="UCS2(ISO / IEC - 10646)"
)
shift_jis: DataCoding = DataCoding(
code="shift_jis", value=0b00001001, description="Pictogram |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.