prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import numpy as np
grille = np.array([
[0,0,0,0,0,0,0],
[0,0,1,0,5,0,0],
[0,0,1,1,0,0,0],
[0,0,1,0,0,0,0],
[0,1,2,0,0,1,0],
[1,1,2,2,2,1,1],
])
for k in range(-2,4):
print(
|
np.diag(grille, k=k)
|
numpy.diag
|
"""
Flux calibration code that looks at the red and blue data together.
This code replaces the original fluxcal module. The key functionality
(and the most difficult part to develop) is to infer the "total" observed
spectrum of a star based on the light in the fibres, i.e. to work out how
much light was lost between the fibres. This is done based on a model
that incorporates our understanding of how the atmosphere affects light,
both in terms of the PSF and the atmospheric refraction. A few different
models are available, with different amounts of freedom.
The allowed models are:
-- ref_centre_alpha_angle --
The centre position and alpha are fit for the reference wavelength,
and the positions and alpha values are then determined by the known
alpha dependence and the DAR, with the zenith distance and direction
also as free parameters.
-- ref_centre_alpha_angle_circ --
The same as ref_centre_alpha_angle, but with the Moffat function
constrained to be circular.
-- ref_centre_alpha_dist_circ --
The same as ref_centre_alpha_angle_circ, but with the zenith direction
fixed.
-- ref_centre_alpha_angle_circ_atm --
The same as ref_centre_alpha_angle_circ, but with atmospheric values
as free parameters too. Note, however, that the atmospheric parameters
are completely degenerate with each other and with ZD.
-- ref_centre_alpha_dist_circ_hdratm --
As ref_centre_alpha_dist_circ, but uses atmospheric values read from the
FITS header instead of the default values.
-- ref_centre_alpha_circ_hdratm --
Uses a circular Moffat function, fixed zenith distance and atmospheric
values from the FITS header.
Other than the functions for reading parameters in and out, the
functionality for doing the actual fitting is the same for all models,
so can be extended for further models quite straightforwardly.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import warnings
import numpy as np
from scipy.optimize import leastsq, curve_fit
from scipy.interpolate import LSQUnivariateSpline
from scipy.ndimage.filters import median_filter, gaussian_filter1d
from scipy.ndimage import zoom
from astropy import coordinates as coord
from astropy import units
from astropy import table
from astropy.io import fits as pf
from astropy.io import ascii
from astropy import __version__ as ASTROPY_VERSION
# extra astropy bits to calculate airmass
import astropy.units as u
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation, AltAz
from scipy.special import erfc
from scipy.stats import binned_statistic
from shutil import copyfile
# required for test plotting:
import pylab as py
from ..utils.ifu import IFU
from ..utils.mc_adr import parallactic_angle, adr_r
from ..utils.other import saturated_partial_pressure_water
from ..config import millibar_to_mmHg
from ..utils.fluxcal2_io import read_model_parameters, save_extracted_flux
from .telluric2 import TelluricCorrectPrimary as telluric_correct_primary
from . import dust
#from ..manager import read_stellar_mags
#from ..qc.fluxcal import measure_band
try:
from bottleneck import nansum, nanmean
except ImportError:
from numpy import nansum, nanmean
warnings.warn("Not Using bottleneck: Speed will be improved if you install bott leneck")
# import of ppxf for fitting of secondary stds:
import ppxf as ppxf_package
from ppxf.ppxf import ppxf
from ppxf.ppxf_util import log_rebin
# Get the astropy version as a tuple of integers
ASTROPY_VERSION = tuple(int(x) for x in ASTROPY_VERSION.split('.'))
STANDARD_CATALOGUES = ('./standards/ESO/ESOstandards.dat',
'./standards/Bessell/Bessellstandards.dat')
SSO_EXTINCTION_TABLE = './standards/extinsso.tab'
REFERENCE_WAVELENGTH = 5000.0
FIBRE_RADIUS = 0.798
TELLURIC_BANDS = np.array([[6850, 6960],
[7130, 7360],
[7560, 7770],
[8100, 8360]])
def generate_subgrid(fibre_radius, n_inner=6, n_rings=10, wt_profile=False):
"""Generate a subgrid of points within a fibre."""
radii = np.arange(0., n_rings) + 0.5
rot_angle = 0.0
radius = []
theta = []
for i_ring, radius_ring in enumerate(radii):
n_points = np.int(np.round(n_inner * radius_ring))
theta_ring = (np.linspace(0.0, 2.0*np.pi, n_points, endpoint=False) +
rot_angle)
radius = np.hstack((radius, np.ones(n_points) * radius_ring))
theta = np.hstack((theta, theta_ring))
rot_angle += theta_ring[1] / 2.0
radius *= fibre_radius / n_rings
xsub = radius * np.cos(theta)
ysub = radius * np.sin(theta)
# generate a weight for the points based on the radial profile. In this case
# we use an error function that goes to 0.5 at 0.8 of the radius of the fibre.
# this is just experimental, no evidence it makes much improvement:
if (wt_profile):
wsub = 0.5*erfc((radius-fibre_radius*0.8)*4.0)
wnorm = float(np.size(radius))/np.sum(wsub)
wsub = wsub * wnorm
else:
# or unit weighting:
wsub = np.ones(np.size(xsub))
return xsub, ysub, wsub
XSUB, YSUB, WSUB= generate_subgrid(FIBRE_RADIUS)
N_SUB = len(XSUB)
def in_telluric_band(wavelength):
"""Return boolean array, True if in a telluric band."""
retarray = np.zeros(np.shape(wavelength), dtype='bool')
for band in TELLURIC_BANDS:
retarray = retarray | ((wavelength >= band[0]) &
(wavelength <= band[1]))
return retarray
def read_chunked_data(path_list, probenum, n_drop=None, n_chunk=None,
sigma_clip=None):
"""Read flux from a list of files, chunk it and combine."""
if isinstance(path_list, str):
path_list = [path_list]
for i_file, path in enumerate(path_list):
ifu = IFU(path, probenum, flag_name=False)
remove_atmosphere(ifu)
data_i, variance_i, wavelength_i = chunk_data(
ifu, n_drop=n_drop, n_chunk=n_chunk, sigma_clip=sigma_clip)
if i_file == 0:
data = data_i
variance = variance_i
wavelength = wavelength_i
else:
data = np.hstack((data, data_i))
variance = np.hstack((variance, variance_i))
wavelength = np.hstack((wavelength, wavelength_i))
xfibre = ifu.xpos_rel * np.cos(np.deg2rad(np.mean(ifu.ypos)))
yfibre = ifu.ypos_rel
# Only keep unbroken fibres
good_fibre = (ifu.fib_type == 'P')
chunked_data = {'data': data[good_fibre, :],
'variance': variance[good_fibre, :],
'wavelength': wavelength,
'xfibre': xfibre[good_fibre],
'yfibre': yfibre[good_fibre]}
return chunked_data
def trim_chunked_data(chunked_data, n_trim):
"""Trim off the extreme blue end of the chunked data, because it's bad."""
chunked_data['data'] = chunked_data['data'][:, n_trim:]
chunked_data['variance'] = chunked_data['variance'][:, n_trim:]
chunked_data['wavelength'] = chunked_data['wavelength'][n_trim:]
return
def chunk_data(ifu, n_drop=None, n_chunk=None, sigma_clip=None):
"""Condence a spectrum into a number of chunks."""
n_pixel = ifu.naxis1
n_fibre = len(ifu.data)
if n_drop is None:
n_drop = 24
if n_chunk is None:
n_chunk = round((n_pixel - 2*n_drop) / 100.0)
chunk_size = round((n_pixel - 2*n_drop) / n_chunk)
if sigma_clip:
good = np.isfinite(ifu.data)
data_smooth = ifu.data.copy()
data_smooth[~good] = np.median(ifu.data[good])
data_smooth = median_filter(data_smooth, size=(1, 51))
data_smooth[~good] = np.nan
# Estimate of variance; don't trust 2dfdr values
std_smooth = 1.4826 * np.median(np.abs(ifu.data[good] -
data_smooth[good]))
data = ifu.data
clip = abs(data - data_smooth) > (sigma_clip * std_smooth)
data[clip] = data_smooth[clip]
else:
data = ifu.data
# Convert to integer for future compatibility.
n_chunk = np.int(np.floor(n_chunk))
chunk_size = np.int(np.floor(chunk_size))
start = n_drop
end = n_drop + n_chunk * chunk_size
data = data[:, start:end].reshape(n_fibre, n_chunk, chunk_size)
variance = ifu.var[:, start:end].reshape(n_fibre, n_chunk, chunk_size)
wavelength = ifu.lambda_range[start:end].reshape(n_chunk, chunk_size)
data = nanmean(data, axis=2)
variance = (np.nansum(variance, axis=2) /
np.sum(np.isfinite(variance), axis=2)**2)
# Replace any remaining NaNs with 0.0; not ideal but should be very rare
bad_data = ~
|
np.isfinite(data)
|
numpy.isfinite
|
# -*- coding: utf-8 -*-
from collections import defaultdict
import os
import re
import numpy as np
from pyfr.shapes import BaseShape
from pyfr.util import memoize, subclass_where
from pyfr.writers import BaseWriter
class VTKWriter(BaseWriter):
# Supported file types and extensions
name = 'vtk'
extn = ['.vtu', '.pvtu']
vtk_types_ho = dict(tri=69, quad=70, tet=71, pri=73, hex=72)
# Mappings betwen the node ordering of PyFR and that of VTK
_nodemaps = {
('quad', 4): [0, 1, 3, 2],
('quad', 9): [0, 2, 8, 6, 1, 5, 7, 3, 4],
('quad', 16): [0, 3, 15, 12, 1, 2, 7, 11, 13, 14, 4, 8, 5, 6, 9, 10],
('quad', 25): [0, 4, 24, 20, 1, 2, 3, 9, 14, 19, 21, 22, 23, 5, 10, 15,
6, 7, 8, 11, 12, 13, 16, 17, 18],
('quad', 36): [0, 5, 35, 30, 1, 2, 3, 4, 11, 17, 23, 29, 31, 32, 33,
34, 6, 12, 18, 24, 7, 8, 9, 10, 13, 14, 15, 16, 19, 20,
21, 22, 25, 26, 27, 28],
('quad', 49): [0, 6, 48, 42, 1, 2, 3, 4, 5, 13, 20, 27, 34, 41, 43, 44,
45, 46, 47, 7, 14, 21, 28, 35, 8, 9, 10, 11, 12, 15, 16,
17, 18, 19, 22, 23, 24, 25, 26, 29, 30, 31, 32, 33, 36,
37, 38, 39, 40],
('quad', 64): [
0, 7, 63, 56, 1, 2, 3, 4, 5, 6, 15, 23, 31, 39, 47, 55, 57, 58, 59,
60, 61, 62, 8, 16, 24, 32, 40, 48, 9, 10, 11, 12, 13, 14, 17, 18,
19, 20, 21, 22, 25, 26, 27, 28, 29, 30, 33, 34, 35, 36, 37, 38, 41,
42, 43, 44, 45, 46, 49, 50, 51, 52, 53, 54
],
('quad', 81): [
0, 8, 80, 72, 1, 2, 3, 4, 5, 6, 7, 17, 26, 35, 44, 53, 62, 71, 73,
74, 75, 76, 77, 78, 79, 9, 18, 27, 36, 45, 54, 63, 10, 11, 12, 13,
14, 15, 16, 19, 20, 21, 22, 23, 24, 25, 28, 29, 30, 31, 32, 33, 34,
37, 38, 39, 40, 41, 42, 43, 46, 47, 48, 49, 50, 51, 52, 55, 56, 57,
58, 59, 60, 61, 64, 65, 66, 67, 68, 69, 70
],
('hex', 8): [0, 1, 3, 2, 4, 5, 7, 6],
('hex', 27): [0, 2, 8, 6, 18, 20, 26, 24, 1, 5, 7, 3, 19, 23, 25, 21,
9, 11, 17, 15, 12, 14, 10, 16, 4, 22, 13],
('hex', 64): [
0, 3, 15, 12, 48, 51, 63, 60, 1, 2, 7, 11, 13, 14, 4, 8, 49, 50,
55, 59, 61, 62, 52, 56, 16, 32, 19, 35, 31, 47, 28, 44, 20, 24, 36,
40, 23, 27, 39, 43, 17, 18, 33, 34, 29, 30, 45, 46, 5, 6, 9, 10,
53, 54, 57, 58, 21, 22, 25, 26, 37, 38, 41, 42
],
('hex', 125): [
0, 4, 24, 20, 100, 104, 124, 120, 1, 2, 3, 9, 14, 19, 21, 22, 23,
5, 10, 15, 101, 102, 103, 109, 114, 119, 121, 122, 123, 105, 110,
115, 25, 50, 75, 29, 54, 79, 49, 74, 99, 45, 70, 95, 30, 35, 40,
55, 60, 65, 80, 85, 90, 34, 39, 44, 59, 64, 69, 84, 89, 94, 26, 27,
28, 51, 52, 53, 76, 77, 78, 46, 47, 48, 71, 72, 73, 96, 97, 98, 6,
7, 8, 11, 12, 13, 16, 17, 18, 106, 107, 108, 111, 112, 113, 116,
117, 118, 31, 32, 33, 36, 37, 38, 41, 42, 43, 56, 57, 58, 61, 62,
63, 66, 67, 68, 81, 82, 83, 86, 87, 88, 91, 92, 93
],
('hex', 216): [
0, 5, 35, 30, 180, 185, 215, 210, 1, 2, 3, 4, 11, 17, 23, 29, 31,
32, 33, 34, 6, 12, 18, 24, 181, 182, 183, 184, 191, 197, 203, 209,
211, 212, 213, 214, 186, 192, 198, 204, 36, 72, 108, 144, 41, 77,
113, 149, 71, 107, 143, 179, 66, 102, 138, 174, 42, 48, 54, 60, 78,
84, 90, 96, 114, 120, 126, 132, 150, 156, 162, 168, 47, 53, 59, 65,
83, 89, 95, 101, 119, 125, 131, 137, 155, 161, 167, 173, 37, 38,
39, 40, 73, 74, 75, 76, 109, 110, 111, 112, 145, 146, 147, 148, 67,
68, 69, 70, 103, 104, 105, 106, 139, 140, 141, 142, 175, 176, 177,
178, 7, 8, 9, 10, 13, 14, 15, 16, 19, 20, 21, 22, 25, 26, 27, 28,
187, 188, 189, 190, 193, 194, 195, 196, 199, 200, 201, 202, 205,
206, 207, 208, 43, 44, 45, 46, 49, 50, 51, 52, 55, 56, 57, 58, 61,
62, 63, 64, 79, 80, 81, 82, 85, 86, 87, 88, 91, 92, 93, 94, 97, 98,
99, 100, 115, 116, 117, 118, 121, 122, 123, 124, 127, 128, 129,
130, 133, 134, 135, 136, 151, 152, 153, 154, 157, 158, 159, 160,
163, 164, 165, 166, 169, 170, 171, 172
],
('hex', 343): [
0, 6, 48, 42, 294, 300, 342, 336, 1, 2, 3, 4, 5, 13, 20, 27, 34,
41, 43, 44, 45, 46, 47, 7, 14, 21, 28, 35, 295, 296, 297, 298, 299,
307, 314, 321, 328, 335, 337, 338, 339, 340, 341, 301, 308, 315,
322, 329, 49, 98, 147, 196, 245, 55, 104, 153, 202, 251, 97, 146,
195, 244, 293, 91, 140, 189, 238, 287, 56, 63, 70, 77, 84, 105,
112, 119, 126, 133, 154, 161, 168, 175, 182, 203, 210, 217, 224,
231, 252, 259, 266, 273, 280, 62, 69, 76, 83, 90, 111, 118, 125,
132, 139, 160, 167, 174, 181, 188, 209, 216, 223, 230, 237, 258,
265, 272, 279, 286, 50, 51, 52, 53, 54, 99, 100, 101, 102, 103,
148, 149, 150, 151, 152, 197, 198, 199, 200, 201, 246, 247, 248,
249, 250, 92, 93, 94, 95, 96, 141, 142, 143, 144, 145, 190, 191,
192, 193, 194, 239, 240, 241, 242, 243, 288, 289, 290, 291, 292, 8,
9, 10, 11, 12, 15, 16, 17, 18, 19, 22, 23, 24, 25, 26, 29, 30, 31,
32, 33, 36, 37, 38, 39, 40, 302, 303, 304, 305, 306, 309, 310, 311,
312, 313, 316, 317, 318, 319, 320, 323, 324, 325, 326, 327, 330,
331, 332, 333, 334, 57, 58, 59, 60, 61, 64, 65, 66, 67, 68, 71, 72,
73, 74, 75, 78, 79, 80, 81, 82, 85, 86, 87, 88, 89, 106, 107, 108,
109, 110, 113, 114, 115, 116, 117, 120, 121, 122, 123, 124, 127,
128, 129, 130, 131, 134, 135, 136, 137, 138, 155, 156, 157, 158,
159, 162, 163, 164, 165, 166, 169, 170, 171, 172, 173, 176, 177,
178, 179, 180, 183, 184, 185, 186, 187, 204, 205, 206, 207, 208,
211, 212, 213, 214, 215, 218, 219, 220, 221, 222, 225, 226, 227,
228, 229, 232, 233, 234, 235, 236, 253, 254, 255, 256, 257, 260,
261, 262, 263, 264, 267, 268, 269, 270, 271, 274, 275, 276, 277,
278, 281, 282, 283, 284, 285
],
('hex', 512): [
0, 7, 63, 56, 448, 455, 511, 504, 1, 2, 3, 4, 5, 6, 15, 23, 31, 39,
47, 55, 57, 58, 59, 60, 61, 62, 8, 16, 24, 32, 40, 48, 449, 450,
451, 452, 453, 454, 463, 471, 479, 487, 495, 503, 505, 506, 507,
508, 509, 510, 456, 464, 472, 480, 488, 496, 64, 128, 192, 256,
320, 384, 71, 135, 199, 263, 327, 391, 127, 191, 255, 319, 383,
447, 120, 184, 248, 312, 376, 440, 72, 80, 88, 96, 104, 112, 136,
144, 152, 160, 168, 176, 200, 208, 216, 224, 232, 240, 264, 272,
280, 288, 296, 304, 328, 336, 344, 352, 360, 368, 392, 400, 408,
416, 424, 432, 79, 87, 95, 103, 111, 119, 143, 151, 159, 167, 175,
183, 207, 215, 223, 231, 239, 247, 271, 279, 287, 295, 303, 311,
335, 343, 351, 359, 367, 375, 399, 407, 415, 423, 431, 439, 65, 66,
67, 68, 69, 70, 129, 130, 131, 132, 133, 134, 193, 194, 195, 196,
197, 198, 257, 258, 259, 260, 261, 262, 321, 322, 323, 324, 325,
326, 385, 386, 387, 388, 389, 390, 121, 122, 123, 124, 125, 126,
185, 186, 187, 188, 189, 190, 249, 250, 251, 252, 253, 254, 313,
314, 315, 316, 317, 318, 377, 378, 379, 380, 381, 382, 441, 442,
443, 444, 445, 446, 9, 10, 11, 12, 13, 14, 17, 18, 19, 20, 21, 22,
25, 26, 27, 28, 29, 30, 33, 34, 35, 36, 37, 38, 41, 42, 43, 44, 45,
46, 49, 50, 51, 52, 53, 54, 457, 458, 459, 460, 461, 462, 465, 466,
467, 468, 469, 470, 473, 474, 475, 476, 477, 478, 481, 482, 483,
484, 485, 486, 489, 490, 491, 492, 493, 494, 497, 498, 499, 500,
501, 502, 73, 74, 75, 76, 77, 78, 81, 82, 83, 84, 85, 86, 89, 90,
91, 92, 93, 94, 97, 98, 99, 100, 101, 102, 105, 106, 107, 108, 109,
110, 113, 114, 115, 116, 117, 118, 137, 138, 139, 140, 141, 142,
145, 146, 147, 148, 149, 150, 153, 154, 155, 156, 157, 158, 161,
162, 163, 164, 165, 166, 169, 170, 171, 172, 173, 174, 177, 178,
179, 180, 181, 182, 201, 202, 203, 204, 205, 206, 209, 210, 211,
212, 213, 214, 217, 218, 219, 220, 221, 222, 225, 226, 227, 228,
229, 230, 233, 234, 235, 236, 237, 238, 241, 242, 243, 244, 245,
246, 265, 266, 267, 268, 269, 270, 273, 274, 275, 276, 277, 278,
281, 282, 283, 284, 285, 286, 289, 290, 291, 292, 293, 294, 297,
298, 299, 300, 301, 302, 305, 306, 307, 308, 309, 310, 329, 330,
331, 332, 333, 334, 337, 338, 339, 340, 341, 342, 345, 346, 347,
348, 349, 350, 353, 354, 355, 356, 357, 358, 361, 362, 363, 364,
365, 366, 369, 370, 371, 372, 373, 374, 393, 394, 395, 396, 397,
398, 401, 402, 403, 404, 405, 406, 409, 410, 411, 412, 413, 414,
417, 418, 419, 420, 421, 422, 425, 426, 427, 428, 429, 430, 433,
434, 435, 436, 437, 438
],
('hex', 729): [
0, 8, 80, 72, 648, 656, 728, 720, 1, 2, 3, 4, 5, 6, 7, 17, 26, 35,
44, 53, 62, 71, 73, 74, 75, 76, 77, 78, 79, 9, 18, 27, 36, 45, 54,
63, 649, 650, 651, 652, 653, 654, 655, 665, 674, 683, 692, 701,
710, 719, 721, 722, 723, 724, 725, 726, 727, 657, 666, 675, 684,
693, 702, 711, 81, 162, 243, 324, 405, 486, 567, 89, 170, 251, 332,
413, 494, 575, 161, 242, 323, 404, 485, 566, 647, 153, 234, 315,
396, 477, 558, 639, 90, 99, 108, 117, 126, 135, 144, 171, 180, 189,
198, 207, 216, 225, 252, 261, 270, 279, 288, 297, 306, 333, 342,
351, 360, 369, 378, 387, 414, 423, 432, 441, 450, 459, 468, 495,
504, 513, 522, 531, 540, 549, 576, 585, 594, 603, 612, 621, 630,
98, 107, 116, 125, 134, 143, 152, 179, 188, 197, 206, 215, 224,
233, 260, 269, 278, 287, 296, 305, 314, 341, 350, 359, 368, 377,
386, 395, 422, 431, 440, 449, 458, 467, 476, 503, 512, 521, 530,
539, 548, 557, 584, 593, 602, 611, 620, 629, 638, 82, 83, 84, 85,
86, 87, 88, 163, 164, 165, 166, 167, 168, 169, 244, 245, 246, 247,
248, 249, 250, 325, 326, 327, 328, 329, 330, 331, 406, 407, 408,
409, 410, 411, 412, 487, 488, 489, 490, 491, 492, 493, 568, 569,
570, 571, 572, 573, 574, 154, 155, 156, 157, 158, 159, 160, 235,
236, 237, 238, 239, 240, 241, 316, 317, 318, 319, 320, 321, 322,
397, 398, 399, 400, 401, 402, 403, 478, 479, 480, 481, 482, 483,
484, 559, 560, 561, 562, 563, 564, 565, 640, 641, 642, 643, 644,
645, 646, 10, 11, 12, 13, 14, 15, 16, 19, 20, 21, 22, 23, 24, 25,
28, 29, 30, 31, 32, 33, 34, 37, 38, 39, 40, 41, 42, 43, 46, 47, 48,
49, 50, 51, 52, 55, 56, 57, 58, 59, 60, 61, 64, 65, 66, 67, 68, 69,
70, 658, 659, 660, 661, 662, 663, 664, 667, 668, 669, 670, 671,
672, 673, 676, 677, 678, 679, 680, 681, 682, 685, 686, 687, 688,
689, 690, 691, 694, 695, 696, 697, 698, 699, 700, 703, 704, 705,
706, 707, 708, 709, 712, 713, 714, 715, 716, 717, 718, 91, 92, 93,
94, 95, 96, 97, 100, 101, 102, 103, 104, 105, 106, 109, 110, 111,
112, 113, 114, 115, 118, 119, 120, 121, 122, 123, 124, 127, 128,
129, 130, 131, 132, 133, 136, 137, 138, 139, 140, 141, 142, 145,
146, 147, 148, 149, 150, 151, 172, 173, 174, 175, 176, 177, 178,
181, 182, 183, 184, 185, 186, 187, 190, 191, 192, 193, 194, 195,
196, 199, 200, 201, 202, 203, 204, 205, 208, 209, 210, 211, 212,
213, 214, 217, 218, 219, 220, 221, 222, 223, 226, 227, 228, 229,
230, 231, 232, 253, 254, 255, 256, 257, 258, 259, 262, 263, 264,
265, 266, 267, 268, 271, 272, 273, 274, 275, 276, 277, 280, 281,
282, 283, 284, 285, 286, 289, 290, 291, 292, 293, 294, 295, 298,
299, 300, 301, 302, 303, 304, 307, 308, 309, 310, 311, 312, 313,
334, 335, 336, 337, 338, 339, 340, 343, 344, 345, 346, 347, 348,
349, 352, 353, 354, 355, 356, 357, 358, 361, 362, 363, 364, 365,
366, 367, 370, 371, 372, 373, 374, 375, 376, 379, 380, 381, 382,
383, 384, 385, 388, 389, 390, 391, 392, 393, 394, 415, 416, 417,
418, 419, 420, 421, 424, 425, 426, 427, 428, 429, 430, 433, 434,
435, 436, 437, 438, 439, 442, 443, 444, 445, 446, 447, 448, 451,
452, 453, 454, 455, 456, 457, 460, 461, 462, 463, 464, 465, 466,
469, 470, 471, 472, 473, 474, 475, 496, 497, 498, 499, 500, 501,
502, 505, 506, 507, 508, 509, 510, 511, 514, 515, 516, 517, 518,
519, 520, 523, 524, 525, 526, 527, 528, 529, 532, 533, 534, 535,
536, 537, 538, 541, 542, 543, 544, 545, 546, 547, 550, 551, 552,
553, 554, 555, 556, 577, 578, 579, 580, 581, 582, 583, 586, 587,
588, 589, 590, 591, 592, 595, 596, 597, 598, 599, 600, 601, 604,
605, 606, 607, 608, 609, 610, 613, 614, 615, 616, 617, 618, 619,
622, 623, 624, 625, 626, 627, 628, 631, 632, 633, 634, 635, 636,
637
],
('tri', 3): [2, 0, 1],
('tri', 6): [5, 0, 2, 3, 1, 4],
('tri', 10): [9, 0, 3, 7, 4, 1, 2, 6, 8, 5],
('tri', 15): [14, 0, 4, 12, 9, 5, 1, 2, 3, 8, 11, 13, 10, 6, 7],
('tri', 21): [20, 0, 5, 18, 15, 11, 6, 1, 2, 3, 4, 10, 14, 17, 19, 16,
7, 9, 12, 8, 13],
('tri', 28): [27, 0, 6, 25, 22, 18, 13, 7, 1, 2, 3, 4, 5, 12, 17, 21,
24, 26, 23, 8, 11, 19, 14, 9, 10, 16, 20, 15],
('tri', 36): [35, 0, 7, 33, 30, 26, 21, 15, 8, 1, 2, 3, 4, 5, 6, 14,
20, 25, 29, 32, 34, 31, 9, 13, 27, 22, 16, 10, 11, 12,
19, 24, 28, 23, 17, 18],
('tri', 45): [44, 0, 8, 42, 39, 35, 30, 24, 17, 9, 1, 2, 3, 4, 5, 6, 7,
16, 23, 29, 34, 38, 41, 43, 40, 10, 15, 36, 31, 25, 18,
11, 12, 13, 14, 22, 28, 33, 37, 32, 19, 21, 26, 20, 27],
('tet', 4): [3, 0, 1, 2],
('tet', 10): [9, 0, 2, 5, 6, 1, 7, 8, 3, 4],
('tet', 20): [19, 0, 3, 9, 16, 10, 1, 2, 12, 17, 18, 15, 4, 7, 6, 8,
13, 5, 14, 11],
('tet', 35): [34, 0, 4, 14, 31, 25, 15, 1, 2, 3, 18, 27, 32, 33, 30,
24, 5, 9, 12, 8, 11, 13, 28, 19, 22, 7, 10, 6, 29, 23,
21, 26, 17, 16, 20],
('tet', 56): [ 55, 0, 5, 20, 52, 46, 36, 21, 1, 2, 3, 4, 25, 39, 48,
53, 54, 51, 45, 35, 6, 11, 15, 18, 10, 14, 17, 19, 49,
26, 33, 40, 30, 43, 9, 16, 7, 13, 12, 8, 50, 34, 29, 44,
32, 42, 47, 24, 22, 38, 23, 37, 41, 27, 28, 31],
('tet', 84): [
83, 0, 6, 27, 80, 74, 64, 49, 28, 1, 2, 3, 4, 5, 33, 53, 67, 76,
81, 82, 79, 73, 63, 48, 7, 13, 18, 22, 25, 12, 17, 21, 24, 26, 77,
34, 46, 68, 54, 39, 43, 61, 71, 58, 11, 23, 8, 16, 20, 19, 14, 9,
10, 15, 78, 47, 38, 72, 62, 45, 42, 57, 70, 60, 75, 32, 29, 66, 52,
31, 30, 50, 65, 51, 69, 35, 37, 44, 55, 36, 56, 59, 40, 41
],
('tet', 120): [
119, 0, 7, 35, 116, 110, 100, 85, 64, 36, 1, 2, 3, 4, 5, 6, 42, 69,
89, 103, 112, 117, 118, 115, 109, 99, 84, 63, 8, 15, 21, 26, 30,
33, 14, 20, 25, 29, 32, 34, 113, 43, 61, 104, 90, 70, 49, 54, 58,
82, 97, 107, 94, 75, 79, 13, 31, 9, 19, 24, 28, 27, 22, 16, 10, 11,
12, 18, 23, 17, 114, 62, 48, 108, 98, 83, 60, 57, 53, 74, 93, 106,
96, 81, 78, 111, 41, 37, 102, 88, 68, 40, 39, 38, 65, 86, 101, 87,
67, 66, 105, 44, 47, 59, 91, 71, 45, 46, 73, 92, 95, 80, 50, 55,
52, 56, 76, 51, 77, 72
],
('tet', 165): [
164, 0, 8, 44, 161, 155, 145, 130, 109, 81, 45, 1, 2, 3, 4, 5, 6,
7, 52, 87, 114, 134, 148, 157, 162, 163, 160, 154, 144, 129, 108,
80, 9, 17, 24, 30, 35, 39, 42, 16, 23, 29, 34, 38, 41, 43, 158, 53,
78, 149, 135, 115, 88, 60, 66, 71, 75, 106, 127, 142, 152, 139, 94,
103, 120, 99, 124, 15, 40, 10, 22, 28, 33, 37, 36, 31, 25, 18, 11,
12, 13, 14, 21, 32, 19, 27, 26, 20, 159, 79, 59, 153, 143, 128,
107, 77, 74, 70, 65, 93, 119, 138, 151, 141, 105, 98, 126, 102,
123, 156, 51, 46, 147, 133, 113, 86, 50, 49, 48, 47, 82, 110, 131,
146, 132, 85, 83, 112, 84, 111, 150, 54, 58, 76, 136, 116, 89, 55,
56, 57, 92, 118, 137, 140, 125, 104, 61, 67, 72, 64, 69, 73, 121,
95, 100, 63, 68, 62, 122, 101, 97, 117, 91, 90, 96
],
('pri', 6): [0, 1, 2, 3, 4, 5],
('pri', 18): [0, 2, 5, 12, 14, 17, 1, 4, 3, 13, 16, 15, 6, 8, 11, 7,
10, 9],
('pri', 40): [0, 3, 9, 30, 33, 39, 1, 2, 6, 8, 7, 4, 31, 32, 36, 38,
37, 34, 10, 20, 13, 23, 19, 29, 5, 35, 11, 12, 21, 22,
16, 18, 26, 28, 17, 14, 27, 24, 15, 25],
('pri', 75): [
0, 4, 14, 60, 64, 74, 1, 2, 3, 8, 11, 13, 12, 9, 5, 61, 62, 63, 68,
71, 73, 72, 69, 65, 15, 30, 45, 19, 34, 49, 29, 44, 59, 6, 7, 10,
66, 67, 70, 16, 17, 18, 31, 32, 33, 46, 47, 48, 23, 26, 28, 38, 41,
43, 53, 56, 58, 27, 24, 20, 42, 39, 35, 57, 54, 50, 21, 22, 25, 36,
37, 40, 51, 52, 55
],
('pri', 126): [
0, 5, 20, 105, 110, 125, 1, 2, 3, 4, 10, 14, 17, 19, 18, 15, 11, 6,
106, 107, 108, 109, 115, 119, 122, 124, 123, 120, 116, 111, 21, 42,
63, 84, 26, 47, 68, 89, 41, 62, 83, 104, 7, 8, 9, 12, 13, 16, 112,
113, 114, 117, 118, 121, 22, 23, 24, 25, 43, 44, 45, 46, 64, 65,
66, 67, 85, 86, 87, 88, 31, 35, 38, 40, 52, 56, 59, 61, 73, 77, 80,
82, 94, 98, 101, 103, 39, 36, 32, 27, 60, 57, 53, 48, 81, 78, 74,
69, 102, 99, 95, 90, 28, 29, 30, 33, 34, 37, 49, 50, 51, 54, 55,
58, 70, 71, 72, 75, 76, 79, 91, 92, 93, 96, 97, 100
],
('pri', 196): [
0, 6, 27, 168, 174, 195, 1, 2, 3, 4, 5, 12, 17, 21, 24, 26, 25, 22,
18, 13, 7, 169, 170, 171, 172, 173, 180, 185, 189, 192, 194, 193,
190, 186, 181, 175, 28, 56, 84, 112, 140, 34, 62, 90, 118, 146, 55,
83, 111, 139, 167, 8, 9, 10, 11, 14, 15, 16, 19, 20, 23, 176, 177,
178, 179, 182, 183, 184, 187, 188, 191, 29, 30, 31, 32, 33, 57, 58,
59, 60, 61, 85, 86, 87, 88, 89, 113, 114, 115, 116, 117, 141, 142,
143, 144, 145, 40, 45, 49, 52, 54, 68, 73, 77, 80, 82, 96, 101,
105, 108, 110, 124, 129, 133, 136, 138, 152, 157, 161, 164, 166,
53, 50, 46, 41, 35, 81, 78, 74, 69, 63, 109, 106, 102, 97, 91, 137,
134, 130, 125, 119, 165, 162, 158, 153, 147, 36, 37, 38, 39, 42,
43, 44, 47, 48, 51, 64, 65, 66, 67, 70, 71, 72, 75, 76, 79, 92, 93,
94, 95, 98, 99, 100, 103, 104, 107, 120, 121, 122, 123, 126, 127,
128, 131, 132, 135, 148, 149, 150, 151, 154, 155, 156, 159, 160,
163
],
('pri', 288): [
0, 7, 35, 252, 259, 287, 1, 2, 3, 4, 5, 6, 14, 20, 25, 29, 32, 34,
33, 30, 26, 21, 15, 8, 253, 254, 255, 256, 257, 258, 266, 272, 277,
281, 284, 286, 285, 282, 278, 273, 267, 260, 36, 72, 108, 144, 180,
216, 43, 79, 115, 151, 187, 223, 71, 107, 143, 179, 215, 251, 9,
10, 11, 12, 13, 16, 17, 18, 19, 22, 23, 24, 27, 28, 31, 261, 262,
263, 264, 265, 268, 269, 270, 271, 274, 275, 276, 279, 280, 283,
37, 38, 39, 40, 41, 42, 73, 74, 75, 76, 77, 78, 109, 110, 111, 112,
113, 114, 145, 146, 147, 148, 149, 150, 181, 182, 183, 184, 185,
186, 217, 218, 219, 220, 221, 222, 50, 56, 61, 65, 68, 70, 86, 92,
97, 101, 104, 106, 122, 128, 133, 137, 140, 142, 158, 164, 169,
173, 176, 178, 194, 200, 205, 209, 212, 214, 230, 236, 241, 245,
248, 250, 69, 66, 62, 57, 51, 44, 105, 102, 98, 93, 87, 80, 141,
138, 134, 129, 123, 116, 177, 174, 170, 165, 159, 152, 213, 210,
206, 201, 195, 188, 249, 246, 242, 237, 231, 224, 45, 46, 47, 48,
49, 52, 53, 54, 55, 58, 59, 60, 63, 64, 67, 81, 82, 83, 84, 85, 88,
89, 90, 91, 94, 95, 96, 99, 100, 103, 117, 118, 119, 120, 121,
124, 125, 126, 127, 130, 131, 132, 135, 136, 139, 153, 154, 155,
156, 157, 160, 161, 162, 163, 166, 167, 168, 171, 172, 175, 189,
190, 191, 192, 193, 196, 197, 198, 199, 202, 203, 204, 207, 208,
211, 225, 226, 227, 228, 229, 232, 233, 234, 235, 238, 239, 240,
243, 244, 247
],
('pri', 405): [
0, 8, 44, 360, 368, 404, 1, 2, 3, 4, 5, 6, 7, 16, 23, 29, 34, 38,
41, 43, 42, 39, 35, 30, 24, 17, 9, 361, 362, 363, 364, 365, 366,
367, 376, 383, 389, 394, 398, 401, 403, 402, 399, 395, 390, 384,
377, 369, 45, 90, 135, 180, 225, 270, 315, 53, 98, 143, 188, 233,
278, 323, 89, 134, 179, 224, 269, 314, 359, 10, 11, 12, 13, 14, 15,
18, 19, 20, 21, 22, 25, 26, 27, 28, 31, 32, 33, 36, 37, 40, 370,
371, 372, 373, 374, 375, 378, 379, 380, 381, 382, 385, 386, 387,
388, 391, 392, 393, 396, 397, 400, 46, 47, 48, 49, 50, 51, 52, 91,
92, 93, 94, 95, 96, 97, 136, 137, 138, 139, 140, 141, 142, 181,
182, 183, 184, 185, 186, 187, 226, 227, 228, 229, 230, 231, 232,
271, 272, 273, 274, 275, 276, 277, 316, 317, 318, 319, 320, 321,
322, 61, 68, 74, 79, 83, 86, 88, 106, 113, 119, 124, 128, 131, 133,
151, 158, 164, 169, 173, 176, 178, 196, 203, 209, 214, 218, 221,
223, 241, 248, 254, 259, 263, 266, 268, 286, 293, 299, 304, 308,
311, 313, 331, 338, 344, 349, 353, 356, 358, 87, 84, 80, 75, 69,
62, 54, 132, 129, 125, 120, 114, 107, 99, 177, 174, 170, 165, 159,
152, 144, 222, 219, 215, 210, 204, 197, 189, 267, 264, 260, 255,
249, 242, 234, 312, 309, 305, 300, 294, 287, 279, 357, 354, 350,
345, 339, 332, 324, 55, 56, 57, 58, 59, 60, 63, 64, 65, 66, 67, 70,
71, 72, 73, 76, 77, 78, 81, 82, 85, 100, 101, 102, 103, 104, 105,
108, 109, 110, 111, 112, 115, 116, 117, 118, 121, 122, 123, 126,
127, 130, 145, 146, 147, 148, 149, 150, 153, 154, 155, 156, 157,
160, 161, 162, 163, 166, 167, 168, 171, 172, 175, 190, 191, 192,
193, 194, 195, 198, 199, 200, 201, 202, 205, 206, 207, 208, 211,
212, 213, 216, 217, 220, 235, 236, 237, 238, 239, 240, 243, 244,
245, 246, 247, 250, 251, 252, 253, 256, 257, 258, 261, 262, 265,
280, 281, 282, 283, 284, 285, 288, 289, 290, 291, 292, 295, 296,
297, 298, 301, 302, 303, 306, 307, 310, 325, 326, 327, 328, 329,
330, 333, 334, 335, 336, 337, 340, 341, 342, 343, 346, 347, 348,
351, 352, 355
]
}
def __init__(self, args):
super().__init__(args)
self.dtype = np.dtype(args.precision).type
# Divisor for each type element
self.etypes_div = defaultdict(lambda: self.divisor)
# Choose whether to output subdivided cells or high order VTK cells
if args.order or args.divisor is None:
self.ho_output = True
self.divisor = args.order or self.cfg.getint('solver', 'order')
self.vtkfile_version = '2.1'
self._get_npts_ncells_nnodes = self._get_npts_ncells_nnodes_ho
self.etypes_div['pyr'] += 2
else:
self.ho_output = False
self.divisor = args.divisor
self.vtkfile_version = '0.1'
self._get_npts_ncells_nnodes = self._get_npts_ncells_nnodes_lin
# Solutions need a separate processing pipeline to other data
if self.dataprefix == 'soln':
self._pre_proc_fields = self._pre_proc_fields_soln
self._post_proc_fields = self._post_proc_fields_soln
self._soln_fields = list(self.elementscls.privarmap[self.ndims])
self._vtk_vars = list(self.elementscls.visvarmap[self.ndims])
self.tcurr = self.stats.getfloat('solver-time-integrator', 'tcurr')
# Otherwise we're dealing with simple scalar data
else:
self._pre_proc_fields = self._pre_proc_fields_scal
self._post_proc_fields = self._post_proc_fields_scal
self._soln_fields = self.stats.get('data', 'fields').split(',')
self._vtk_vars = [(k, [k]) for k in self._soln_fields]
self.tcurr = None
def _pre_proc_fields_soln(self, name, mesh, soln):
# Convert from conservative to primitive variables
return np.array(self.elementscls.con_to_pri(soln, self.cfg))
def _pre_proc_fields_scal(self, name, mesh, soln):
return soln
def _post_proc_fields_soln(self, vsoln):
# Primitive and visualisation variable maps
privarmap = self.elementscls.privarmap[self.ndims]
visvarmap = self.elementscls.visvarmap[self.ndims]
# Prepare the fields
fields = []
for fnames, vnames in visvarmap:
ix = [privarmap.index(vn) for vn in vnames]
fields.append(vsoln[ix])
return fields
def _post_proc_fields_scal(self, vsoln):
return [vsoln[self._soln_fields.index(v)] for v, _ in self._vtk_vars]
def _get_npts_ncells_nnodes_lin(self, sk):
etype, neles = self.soln_inf[sk][0], self.soln_inf[sk][1][2]
# Get the shape and sub division classes
shapecls = subclass_where(BaseShape, name=etype)
subdvcls = subclass_where(BaseShapeSubDiv, name=etype)
# Number of vis points
npts = shapecls.nspts_from_order(self.etypes_div[etype] + 1)*neles
# Number of sub cells and nodes
ncells = len(subdvcls.subcells(self.etypes_div[etype]))*neles
nnodes = len(subdvcls.subnodes(self.etypes_div[etype]))*neles
return npts, ncells, nnodes
def _get_npts_ncells_nnodes_ho(self, sk):
etype, neles = self.soln_inf[sk][0], self.soln_inf[sk][1][2]
# Fallback to subdivision for pyramids
if etype == 'pyr':
return self._get_npts_ncells_nnodes_lin(sk)
# Get the shape and sub division classes
shapecls = subclass_where(BaseShape, name=etype)
# Total number of vis points
npts = neles*shapecls.nspts_from_order(self.etypes_div[etype] + 1)
return npts, neles, npts
def _get_array_attrs(self, sk=None):
dtype = 'Float32' if self.dtype == np.float32 else 'Float64'
dsize = np.dtype(self.dtype).itemsize
vvars = self._vtk_vars
names = ['', 'connectivity', 'offsets', 'types', 'Partition']
types = [dtype, 'Int32', 'Int32', 'UInt8', 'Int32']
comps = ['3', '', '', '', '1']
for fname, varnames in vvars:
names.append(fname.title())
types.append(dtype)
comps.append(str(len(varnames)))
# If a solution has been given the compute the sizes
if sk:
npts, ncells, nnodes = self._get_npts_ncells_nnodes(sk)
nb = npts*dsize
sizes = [3*nb, 4*nnodes, 4*ncells, ncells, 4*ncells]
sizes.extend(len(varnames)*nb for fname, varnames in vvars)
return names, types, comps, sizes
else:
return names, types, comps
@memoize
def _get_shape(self, name, nspts):
shapecls = subclass_where(BaseShape, name=name)
return shapecls(nspts, self.cfg)
@memoize
def _get_std_ele(self, name, nspts):
return self._get_shape(name, nspts).std_ele(self.etypes_div[name])
@memoize
def _get_mesh_op(self, name, nspts, svpts):
shape = self._get_shape(name, nspts)
return shape.sbasis.nodal_basis_at(svpts).astype(self.dtype)
@memoize
def _get_soln_op(self, name, nspts, svpts):
shape = self._get_shape(name, nspts)
return shape.ubasis.nodal_basis_at(svpts).astype(self.dtype)
def write_out(self):
name, extn = os.path.splitext(self.outf)
parallel = extn == '.pvtu'
parts = defaultdict(list)
for sk, (etype, shape) in self.soln_inf.items():
part = int(sk.split('_p')[-1])
pname = f'{name}_p{part}.vtu' if parallel else self.outf
parts[pname].append((part, f'spt_{etype}_p{part}', sk))
write_s_to_fh = lambda s: fh.write(s.encode())
for pfn, misil in parts.items():
with open(pfn, 'wb') as fh:
write_s_to_fh('<?xml version="1.0" ?>\n<VTKFile '
'byte_order="LittleEndian" '
'type="UnstructuredGrid" '
f'version="{self.vtkfile_version}">\n'
'<UnstructuredGrid>\n')
if self.tcurr is not None and not parallel:
self._write_time_value(write_s_to_fh)
# Running byte-offset for appended data
off = 0
# Header
for pn, mk, sk in misil:
off = self._write_serial_header(fh, sk, off)
write_s_to_fh('</UnstructuredGrid>\n'
'<AppendedData encoding="raw">\n_')
# Data
for pn, mk, sk in misil:
self._write_data(fh, pn, mk, sk)
write_s_to_fh('\n</AppendedData>\n</VTKFile>')
if parallel:
with open(self.outf, 'wb') as fh:
write_s_to_fh('<?xml version="1.0" ?>\n<VTKFile '
'byte_order="LittleEndian" '
'type="PUnstructuredGrid" '
f'version="{self.vtkfile_version}">\n'
'<PUnstructuredGrid>\n')
if self.tcurr is not None:
self._write_time_value(write_s_to_fh)
# Header
self._write_parallel_header(fh)
# Constitutent pieces
for pfn in parts:
bname = os.path.basename(pfn)
write_s_to_fh(f'<Piece Source="{bname}"/>\n')
write_s_to_fh('</PUnstructuredGrid>\n</VTKFile>\n')
def _write_darray(self, array, vtuf, dtype):
array = array.astype(dtype)
|
np.uint32(array.nbytes)
|
numpy.uint32
|
from __future__ import division
import random
random.seed(1)
import inspect
from collections import namedtuple, defaultdict, OrderedDict
import numpy as np
np.random.seed(1)
from concise.utils.plot import seqlogo, seqlogo_fig
try:
from sklearn.model_selection import train_test_split # sklearn >= 0.18
except ImportError:
from sklearn.cross_validation import train_test_split # sklearn < 0.18
from simdna import simulations
from simdna.synthetic import StringEmbeddable
from dragonn.utils import get_motif_scores, one_hot_encode
from dragonn.models import SequenceDNN
from dragonn.plot import add_letters_to_axis, plot_motif
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
def plot_learning_curve(history):
train_losses=history.history['loss']
valid_losses=history.history['val_loss']
min_loss_indx = min(enumerate(valid_losses), key=lambda x: x[1])[0]
f = plt.figure(figsize=(10, 4))
ax = f.add_subplot(1, 1, 1)
ax.plot(range(len(train_losses)), train_losses, 'b', label='Training',lw=4)
ax.plot(range(len(train_losses)), valid_losses, 'r', label='Validation', lw=4)
ax.plot([min_loss_indx, min_loss_indx], [0, 1.0], 'k--', label='Early Stop')
ax.legend(loc="upper right")
ax.set_ylabel("Loss")
ax.set_ylim((min(train_losses+valid_losses),max(train_losses+valid_losses)))
ax.set_xlabel("Epoch")
plt.show()
def plot_ism(ism_mat,title,vmin=None,vmax=None):
# create discrete colormap of ISM scores
extent = [0, ism_mat.shape[0], 0, 100*ism_mat.shape[1]]
plt.figure(figsize=(20,3))
if vmin==None:
vmin=np.amin(ism_mat)
if vmax==None:
vmax=np.amax(ism_mat)
plt.imshow(ism_mat.T,extent=extent,vmin=vmin, vmax=vmax)
plt.xlabel("Sequence base")
plt.ylabel("ISM Score")
plt.title(title)
plt.yticks(np.arange(50,100*ism_mat.shape[1],100),("A","C","G","T"))
plt.set_cmap('RdBu')
plt.colorbar()
plt.show()
def plot_sequence_filters(model):
fig = plt.figure(figsize=(15, 8))
fig.subplots_adjust(hspace=0.1, wspace=0.1)
conv_filters=model.layers[0].get_weights()[0]
#transpose for plotting
conv_filters=np.transpose(conv_filters,(3,1,2,0)).squeeze(axis=-1)
num_plots_per_axis = int(len(conv_filters)**0.5) + 1
for i, conv_filter in enumerate(conv_filters):
ax = fig.add_subplot(num_plots_per_axis, num_plots_per_axis, i+1)
add_letters_to_axis(ax, conv_filter)
ax.axis("off")
ax.set_title("Filter %s" % (str(i+1)))
def plot_seq_importance(grads, x, xlim=None, ylim=None, layer_idx=-2, figsize=(25, 3),title=""):
"""Plot sequence importance score
Args:
grads: either deeplift or gradientxinput score matrix
x: one-hot encoded DNA sequence
xlim: restrict the plotted xrange
figsize: matplotlib figure size
"""
grads=grads.squeeze()
x=x.squeeze()
seq_len = x.shape[0]
vals_to_plot=grads*x
if xlim is None:
xlim = (0, seq_len)
if ylim is None:
ylim= (np.amin(vals_to_plot),np.amax(vals_to_plot))
seqlogo_fig(vals_to_plot, figsize=figsize)
plt.xticks(list(range(xlim[0], xlim[1], 5)))
plt.xlim(xlim)
plt.ylim(ylim)
plt.title(title)
Data = namedtuple('Data', ('X_train', 'X_valid', 'X_test',
'train_embeddings', 'valid_embeddings', 'test_embeddings',
'y_train', 'y_valid', 'y_test',
'motif_names'))
def get_available_simulations():
return [function_name for function_name in dir(simulations)
if "simulate" in function_name]
def print_available_simulations():
for function_name in get_available_simulations():
print(function_name)
def get_simulation_function(simulation_name):
if simulation_name in get_available_simulations():
return getattr(simulations, simulation_name)
else:
print("%s is not available. Available simulations are:" % (simulation_name))
print_available_simulations()
def print_simulation_info(simulation_name):
simulation_function = get_simulation_function(simulation_name)
if simulation_function is not None:
print(simulation_function.__doc__)
def get_simulation_data(simulation_name, simulation_parameters,
test_set_size=4000, validation_set_size=3200):
simulation_function = get_simulation_function(simulation_name)
sequences, y, embeddings = simulation_function(**simulation_parameters)
if simulation_name == "simulate_heterodimer_grammar":
motif_names = [simulation_parameters["motif1"],
simulation_parameters["motif2"]]
elif simulation_name == "simulate_multi_motif_embedding":
motif_names = simulation_parameters["motif_names"]
else:
motif_names = [simulation_parameters["motif_name"]]
train_sequences, test_sequences, train_embeddings, test_embeddings, y_train, y_test = \
train_test_split(sequences, embeddings, y, test_size=test_set_size)
train_sequences, valid_sequences, train_embeddings, valid_embeddings, y_train, y_valid = \
train_test_split(train_sequences, train_embeddings, y_train, test_size=validation_set_size)
X_train = one_hot_encode(train_sequences)
X_valid = one_hot_encode(valid_sequences)
X_test = one_hot_encode(test_sequences)
return Data(X_train, X_valid, X_test, train_embeddings, valid_embeddings, test_embeddings,
y_train, y_valid, y_test, motif_names)
def inspect_SequenceDNN():
print(inspect.getdoc(SequenceDNN))
print("\nAvailable methods:\n")
for (method_name, _) in inspect.getmembers(SequenceDNN, predicate=inspect.ismethod):
if method_name != "__init__":
print(method_name)
def get_SequenceDNN(SequenceDNN_parameters):
return SequenceDNN(**SequenceDNN_parameters)
def train_SequenceDNN(dnn, simulation_data):
assert issubclass(type(simulation_data), tuple)
random.seed(1)
|
np.random.seed(1)
|
numpy.random.seed
|
#!/usr/bin/env python
from __future__ import print_function
import roslib
roslib.load_manifest('mct_tracking_2d')
import rospy
import sys
import functools
import threading
import math
import cv
import numpy
import Image as PILImage
import ImageDraw as PILImageDraw
import ImageFont as PILImageFont
import time
import mct_introspection
from cv_bridge.cv_bridge import CvBridge
from mct_utilities import file_tools
from mct_transform_2d import transform_2d
from sensor_msgs.msg import Image
from mct_msg_and_srv.msg import Point2d
from mct_msg_and_srv.msg import ThreePointTracker
from mct_msg_and_srv.msg import ThreePointTrackerRaw
from std_srvs.srv import Empty
from std_srvs.srv import EmptyResponse
from mct_msg_and_srv.srv import GetRandSyncSignal
class ThreePointTracker_Synchronizer:
"""
Synchronization node for all three point tracker in a given tracking region.
"""
def __init__(self,region,max_seq_age=200):
self.lock = threading.Lock()
self.region = region
regions_dict = file_tools.read_tracking_2d_regions()
self.camera_list = regions_dict[region]
self.camera_list.sort()
self.create_camera_to_tracking_dict()
self.latest_seq = None
self.max_seq_age = max_seq_age
self.tracking_pts_pool = {}
self.tracking_pts_roi_size = rospy.get_param('/three_point_tracker_params/roi_size', (150,150))
# Color and font for tracking points image
self.magenta = (255,255,0)
self.cv_text_font = cv.InitFont(cv.CV_FONT_HERSHEY_TRIPLEX, 0.6, 0.6, thickness=0)
# Font for PIL tracking info image
self.info_image_size = (180,100)
self.font = PILImageFont.truetype("/usr/share/fonts/truetype/ubuntu-font-family/Ubuntu-B.ttf", 16)
# Get transforms from cameras to tracking and stitched image planes
self.tf2d = transform_2d.Transform2d()
self.bridge = CvBridge()
self.ready = False
rospy.init_node('three_point_tracker_synchronizer', log_level=rospy.DEBUG)
# Subscribe to raw tracking pts topics
self.tracking_pts_sub = {}
for camera, topic in self.camera_to_tracking.iteritems():
handler = functools.partial(self.tracking_pts_handler, camera)
self.tracking_pts_sub[camera] = rospy.Subscriber(
topic,
ThreePointTrackerRaw,
handler
)
# Create publishers
self.tracking_pts_pub = rospy.Publisher('tracking_pts', ThreePointTracker)
self.image_tracking_pts = None
self.image_tracking_pts_pub = rospy.Publisher('image_tracking_pts', Image)
self.image_tracking_info_pub = rospy.Publisher('image_tracking_info', Image)
# Setup rand sync service
rospy.wait_for_service('/get_rand_sync_signal')
self.rand_sync_srv = rospy.ServiceProxy('/get_rand_sync_signal',GetRandSyncSignal)
# Setup reset service - needs to be called anytime the camera trigger is
# stopped - before it is restarted. Empties buffers of images and sequences.
self.reset_srv = rospy.Service('reset_tracker_synchronizer', Empty, self.reset_handler)
self.ready = True
def create_camera_to_tracking_dict(self):
"""
Creates a dictionary relating the cameras in the tracking region to their
corresponding three point tracker nodes.
"""
self.camera_to_tracking = {}
for camera in self.camera_list:
camera_fullpath_topic = mct_introspection.get_camera_fullpath_topic(camera)
tracking_pts_topic = '{0}/tracking_pts'.format(camera_fullpath_topic)
self.camera_to_tracking[camera] = tracking_pts_topic
def reset_handler(self,req):
"""
Reset service handler. Empties the tracking_pts_pool.
"""
with self.lock:
self.latest_seq = None
self.tracking_pts_pool = {}
return EmptyResponse()
def tracking_pts_handler(self,camera,msg):
"""
Handler for messages from the individual tracker nodes. Sticks the tracking
point data into a dictionary by sequence number and camera.
"""
if not self.ready:
return
with self.lock:
self.latest_seq = msg.data.seq
try:
self.tracking_pts_pool[msg.data.seq][camera] = msg
except KeyError:
self.tracking_pts_pool[msg.data.seq] = {camera: msg}
def process_tracking_pts(self,tracking_pts_dict):
"""
Determines whether the object has been found in any of the three point
trackers for the region. If is has been found than best tracking point
data is selected in a winner takes all fashion. The best tracking point
data is that which is nearest to the center of the image on camera upon
which is was captured.
"""
# Get time stamp (secs, nsecs) - always from the same camera to avoid jumps due to
# possible system clock differences.
time_camera = self.camera_list[0]
time_camera_secs = tracking_pts_dict[time_camera].data.secs
time_camera_nsecs = tracking_pts_dict[time_camera].data.nsecs
# Get list of messages in which the object was found
found_list = [msg for msg in tracking_pts_dict.values() if msg.data.found]
tracking_pts_msg = ThreePointTracker()
if found_list:
# Object found - select object with largest ROI or if the ROIs are of equal size
# select the object whose distance to center of the # image is the smallest
found_list.sort(cmp=tracking_pts_sort_cmp)
best = found_list[0]
camera = best.data.camera
# Get coordintates of points in tracking and stitching planes
best_points_array = numpy.array([(p.x,p.y) for p in best.data.points])
pts_anchor_plane = self.tf2d.camera_pts_to_anchor_plane(camera, best_points_array)
pts_stitching_plane = self.tf2d.camera_pts_to_stitching_plane(camera, best_points_array)
pts_anchor_plane = [Point2d(p[0],p[1]) for p in list(pts_anchor_plane)]
pts_stitching_plane = [Point2d(p[0],p[1]) for p in list(pts_stitching_plane)]
# Get orientation angle and mid point of object in anchor and stitching planes
angle = get_angle(pts_anchor_plane)
midpt_anchor_plane = get_midpoint(pts_anchor_plane)
midpt_stitching_plane = get_midpoint(pts_stitching_plane)
#self.camera_fail = max([(err,cam) for cam, err in self.max_error_by_camera.items()])[1]
# Get size of tracking points image in the anchor (tracking) plane
roi = best.data.roi
x0, x1 = roi[0], roi[0] + roi[2]
y0, y1 = roi[1], roi[1] + roi[3]
bndry_camera = [(x0,y0), (x1,y0), (x1,y1), (x0,y1)]
bndry_camera_array = numpy.array(bndry_camera)
bndry_anchor = self.tf2d.camera_pts_to_anchor_plane(camera,bndry_camera_array)
bndry_stitching = self.tf2d.camera_pts_to_stitching_plane(camera,bndry_camera_array)
bndry_anchor = [tuple(x) for x in list(bndry_anchor)]
bndry_stitching = [tuple(x) for x in list(bndry_stitching)]
dx1 = abs(bndry_anchor[1][0] - bndry_anchor[0][0])
dx2 = abs(bndry_anchor[3][0] - bndry_anchor[2][0])
dy1 = abs(bndry_anchor[2][1] - bndry_anchor[1][1])
dy2 = abs(bndry_anchor[3][1] - bndry_anchor[0][1])
dx_max = max([dx1, dx2])
dy_max = max([dy1, dy2])
dim_max = max([dx_max, dy_max])
# Convert tracking points image to opencv image.
image_tracking_pts = self.bridge.imgmsg_to_cv(best.image,desired_encoding="passthrough")
image_tracking_pts = cv.GetImage(image_tracking_pts)
image_size = cv.GetSize(image_tracking_pts)
image_dim_max = max(image_size)
# Get matrix for homography from camera to anchor plane
tf_matrix = self.tf2d.get_camera_to_anchor_plane_tf(camera)
# Shift for local ROI
tf_shift = numpy.array([
[1.0, 0.0, roi[0]],
[0.0, 1.0, roi[1]],
[0.0, 0.0, 1.0],
])
tf_matrix = numpy.dot(tf_matrix, tf_shift)
# Get scaling factor
shift_x = -min([x for x,y in bndry_anchor])
shift_y = -min([y for x,y in bndry_anchor])
scale_factor = float(image_dim_max)/dim_max
# Scale and shift transform so that homography maps the tracking points
# sub-image into a image_size image starting at coord. 0,0
tf_shift_and_scale = numpy.array([
[scale_factor, 0.0, scale_factor*shift_x],
[ 0.0, scale_factor, scale_factor*shift_y],
[ 0.0, 0.0, 1.0],
])
tf_matrix =
|
numpy.dot(tf_shift_and_scale, tf_matrix)
|
numpy.dot
|
#Standard Libraries
from math import acos, pi, ceil, sin,cos,sqrt
import numpy as np
import re
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
plt.style.use("ggplot")
#PyXtal imports
from pyxtal.database.element import Element
def angle(a,b):
""" calculate the angle between vector a and b """
return acos(np.dot(a,b)/np.linalg.norm(a)/np.linalg.norm(b))
class crystal(object):
"""a class of crystal structure.
Attributes:
cell_para: a,b,c, alpha, beta, gamma
cell_matrix: 3*3 matrix
rec_matrix: reciprocal of cell matrix
atom_type: elemental type (e.g. Na Cl)
composition: chemical composition (e.g., [1,1])
coordinate: atomic positions (e.g., [[0,0,0],[0.5,0.5,0.5]])
"""
def __init__(self, fileformat='POSCAR', filename=None, \
lattice=None, atom_type=None, composition=None, coordinate=None):
"""Return a structure object with the proper structures info"""
if fileformat == 'POSCAR':
self.from_POSCAR(filename)
elif fileformat == 'cif':
self.from_cif(filename)
else:
self.from_dict(lattice, atom_type, composition, coordinate)
def from_cif(self, filename):
cif_struc = cif(filename)
lattice = self.para2matrix(cif_struc.cell_para)
composition = cif_struc.composition
coordinate = cif_struc.coordinate
atom_type = cif_struc.atom_type
self.from_dict(lattice, atom_type, composition, coordinate)
def from_POSCAR(self, filename):
f = open(filename)
tag = f.readline()
lattice_constant = float(f.readline().split()[0])
# Now the lattice vectors
a = []
for ii in range(3):
s = f.readline().split()
floatvect = float(s[0]), float(s[1]), float(s[2])
a.append(floatvect)
lattice = np.array(a) * lattice_constant
# Number of atoms.
atom_type = f.readline().split()
comp = f.readline().split()
composition = []
if len(atom_type)==len(comp):
for num in comp:
composition.append(int(num))
else:
print('Value Error POSCAR symbol and composition is inconsistent')
ac_type = f.readline().split()
# Check if atom coordinates are cartesian or direct
cartesian = ac_type[0].lower() == "c" or ac_type[0].lower() == "k"
tot_natoms = sum(composition)
coordinate = np.empty((tot_natoms, 3))
for atom in range(tot_natoms):
ac = f.readline().split()
coordinate[atom] = (float(ac[0]), float(ac[1]), float(ac[2]))
# Done with all reading
f.close()
if cartesian:
coordinate *= lattice_constant
cell_para = []
self.from_dict(lattice, atom_type, composition, coordinate)
def from_dict(self, lattice, atom_type, composition, coordinate):
self.cell_matrix = np.array(lattice)
self.atom_type = atom_type
self.composition = np.array(composition)
self.coordinate = np.array(coordinate)
self.cell_para = self.matrix2para(self.cell_matrix)
self.rec_matrix = self.rec_lat(self.cell_matrix)
self.name = ''
for ele, num in zip(self.atom_type, self.composition):
self.name += ele
if num > 1:
self.name += str(num)
@staticmethod
def rec_lat(matrix):
""" calculate the reciprocal lattice """
rec_lat = np.zeros([3,3])
V = np.linalg.det(matrix)
rec_lat[0] = np.cross(matrix[1], matrix[2])/V
rec_lat[1] = np.cross(matrix[2], matrix[0])/V
rec_lat[2] = np.cross(matrix[0], matrix[1])/V
return rec_lat #* 2 * pi
@staticmethod
def matrix2para(matrix):
""" 3x3 representation -> 1x6 (a, b, c, alpha, beta, gamma)"""
cell_para = np.zeros(6)
cell_para[0] = np.linalg.norm(matrix[0])
cell_para[1] = np.linalg.norm(matrix[1])
cell_para[2] = np.linalg.norm(matrix[2])
cell_para[5] = angle(matrix[0], matrix[1])
cell_para[4] = angle(matrix[0], matrix[2])
cell_para[3] = angle(matrix[1], matrix[2])
return cell_para
@staticmethod
def para2matrix(cell_para):
""" 1x6 (a, b, c, alpha, beta, gamma) -> 3x3 representation -> """
matrix = np.zeros([3,3])
matrix[0][0] = cell_para[0]
matrix[1][0] = cell_para[1]*cos(cell_para[5])
matrix[1][1] = cell_para[1]*sin(cell_para[5])
matrix[2][0] = cell_para[2]*cos(cell_para[4])
matrix[2][1] = cell_para[2]*cos(cell_para[3])*sin(cell_para[4])
matrix[2][2] = sqrt(cell_para[2]**2 - matrix[2][0]**2 - matrix[2][1]**2)
return matrix
class cif(object):
"""a class of cif reader
Attributes:
wavelength: default: 1.54181a, namely Cu-Ka
max2theta: the range of 2theta angle
intensity: intensities for all hkl planes
pxrd: powder diffraction data
"""
def __init__(self, filename):
"""Return a XRD object with the proper info"""
self.from_file(filename)
self.parse_cell()
self.parse_atom()
self.apply_symops()
def from_file(self, filename):
cif = np.genfromtxt(filename, dtype=str, delimiter='\n')
# 3 modes in each flag:
# 0: not started;
# 1: reading;
# 2: done
flags = {'cell':0, 'symops':0, 'atom':0}
atom = {}
cell = {}
symops = {'string':[], 'matrix':[]}
for lines in cif:
if 'loop_' in lines:
#if a _loop lines starts, the current reading flag switch to 0
for item in flags.keys():
if flags[item] == 1:
flags[item] = 2
elif '_cell_length_' in lines or '_cell_angle_' in lines:
#_cell_length_a 4.77985
flags['cell'] = 1
cell_str = lines.split()
item = cell_str[0].replace(' ','')
value = float(cell_str[1].split("(")[0])
cell[item] = value
elif '_symmetry_equiv_pos_as_xyz' in lines:
#_symmetry_equiv_pos_as_xyz
flags['symops'] = 1
elif '_space_group_symop_operation_xyz' in lines:
#_space_group_symop_operation_xyz
flags['symops'] = 1
elif flags['symops'] == 1:
#1, 'x, y, z'
# x, -y, z
raw_line = lines.strip().strip("'").split(' ', 1)
if raw_line[0].isdigit():
sym_str = raw_line[1].strip("'")
else:
sym_str = lines.strip().strip("'").replace(' ', '')
sym_str = sym_str.replace("'","")
symops['string'].append(sym_str)
symops['matrix'].append(self.xyz2sym_ops(sym_str))
elif '_atom_site' in lines:
flags['atom'] = 1
atom_str = lines.replace(' ','')
item = atom_str
atom[item] = []
elif flags['atom'] == 1:
raw_line = lines.split()
for i, item in enumerate(atom.keys()):
raw_text = raw_line[i]
if item.find('fract')>0:
value = float(raw_text.split("(")[0])
elif item.find('symbol')>0:
m_symbol = re.compile("([A-Z]+[a-z]*)")
value = str(m_symbol.findall(raw_text)).strip("[]").strip("''")
#print(raw_text, value)
else:
value = raw_text
atom[item].append(value)
elif flags['cell'] + flags['symops'] + flags['atom'] == 6:
break
self.cell = cell
self.atom = atom
self.symops = symops
def parse_cell(self):
cell_para = np.zeros(6)
cell = self.cell
for item in cell.keys():
if item.find('_length_a') > 0:
cell_para[0] = cell[item]
elif item.find('_length_b') > 0:
cell_para[1] = cell[item]
elif item.find('_length_c') > 0:
cell_para[2] = cell[item]
elif item.find('_angle_alpha') > 0:
cell_para[3] = np.radians(cell[item])
elif item.find('_angle_beta') > 0:
cell_para[4] = np.radians(cell[item])
elif item.find('_angle_gamma') > 0:
cell_para[5] = np.radians(cell[item])
self.cell_para = cell_para
def parse_atom(self):
atom = self.atom
N_atom = len(atom['_atom_site_fract_x'])
cif_xyz = np.zeros([N_atom, 3])
for item in atom.keys():
if item.find('_fract_x') > 0:
cif_xyz[:,0] = np.array(atom[item])
elif item.find('_fract_y') > 0:
cif_xyz[:,1] = np.array(atom[item])
elif item.find('_fract_z') > 0:
cif_xyz[:,2] = np.array(atom[item])
self.cif_xyz = cif_xyz
#generates all coordinates from rotation matrices and translation vectors
def apply_symops(self):
fract_xyz = self.cif_xyz
symops_matrix = self.symops['matrix']
atom_type = self.atom['_atom_site_type_symbol']
sym_coordinates = {}
for item in atom_type:
sym_coordinates[item] = []
for ii,item in enumerate(atom_type):
for mat_vec in symops_matrix:
sym_temp = np.dot(mat_vec[0], fract_xyz[ii].transpose()) + mat_vec[1]
sym_coordinates[item].append(sym_temp)
self.coordinate, self.composition, self.atom_type = \
self.remove_duplicate(sym_coordinates)
#remove equivalent points and keep the unique ones
#get the numbers of atoms per species
@staticmethod
def remove_duplicate(sym_coordinates):
coordinate = []
composition = []
atom_type = []
for item in sym_coordinates.keys():
atom_type.append(item)
raw_equiv =
|
np.array(sym_coordinates[item])
|
numpy.array
|
import numpy
import ctypes
def find_dyn_parm_deps(dof, parm_num, regressor_func):
'''
Find dynamic parameter dependencies (i.e., regressor column dependencies).
'''
samples = 10000
round = 10
pi = numpy.pi
Z = numpy.zeros((dof * samples, parm_num))
for i in range(samples):
q = [float(
|
numpy.random.random()
|
numpy.random.random
|
"""
This library collects a bunch of Optimizers inspired by the paper
The older optimizers are stored in Optimizer.py. Those classes are equipped with a `step_simple` function taking in
scores and codes to generate the next batch of codes.
"""
# from matplotlib import use as use_backend
# use_backend("Agg")
import matplotlib.pylab as plt
# plt.ioff()
#
import os
import time
import sys
# import utils
import numpy as np
from numpy.linalg import norm
from numpy.random import randn
from numpy import sqrt, zeros, abs, floor, log, log2, eye, exp
from geometry_utils import ExpMap, VecTransport, radial_proj, orthogonalize, renormalize
orig_stdout = sys.stdout
#%% Classic Optimizers as Reference
class CholeskyCMAES:
""" Note this is a variant of CMAES Cholesky suitable for high dimensional optimization"""
def __init__(self, space_dimen, population_size=None, init_sigma=3.0, init_code=None, Aupdate_freq=10,
maximize=True, random_seed=None, optim_params={}):
N = space_dimen
self.space_dimen = space_dimen
# Overall control parameter
self.maximize = maximize # if the program is to maximize or to minimize
# Strategy parameter setting: Selection
if population_size is None:
self.lambda_ = int(4 + floor(3 * log2(N))) # population size, offspring number
# the relation between dimension and population size.
else:
self.lambda_ = population_size # use custom specified population size
mu = self.lambda_ / 2 # number of parents/points for recombination
# Select half the population size as parents
weights = log(mu + 1 / 2) - (log(np.arange(1, 1 + floor(mu)))) # muXone array for weighted recombination
self.mu = int(floor(mu))
self.weights = weights / sum(weights) # normalize recombination weights array
mueff = self.weights.sum() ** 2 / sum(self.weights ** 2) # variance-effectiveness of sum w_i x_i
self.weights.shape = (1, -1) # Add the 1st dim 1 to the weights mat
self.mueff = mueff # add to class variable
self.sigma = init_sigma # Note by default, sigma is None here.
print("Space dimension: %d, Population size: %d, Select size:%d, Optimization Parameters:\nInitial sigma: %.3f"
% (self.space_dimen, self.lambda_, self.mu, self.sigma))
# Strategy parameter settiself.weightsng: Adaptation
self.cc = 4 / (N + 4) # defaultly 0.0009756
self.cs = sqrt(mueff) / (sqrt(mueff) + sqrt(N)) # 0.0499
self.c1 = 2 / (N + sqrt(2)) ** 2 # 1.1912701410022985e-07
if "cc" in optim_params.keys(): # if there is outside value for these parameter, overwrite them
self.cc = optim_params["cc"]
if "cs" in optim_params.keys():
self.cs = optim_params["cs"]
if "c1" in optim_params.keys():
self.c1 = optim_params["c1"]
self.damps = 1 + self.cs + 2 * max(0, sqrt((mueff - 1) / (N + 1)) - 1) # damping for sigma usually close to 1
print("cc=%.3f, cs=%.3f, c1=%.3f damps=%.3f" % (self.cc, self.cs, self.c1, self.damps))
if init_code is not None:
self.init_x = np.asarray(init_code)
self.init_x.shape = (1, N)
else:
self.init_x = None # FIXED Nov. 1st
self.xmean = zeros((1, N))
self.xold = zeros((1, N))
# Initialize dynamic (internal) strategy parameters and constants
self.pc = zeros((1, N))
self.ps = zeros((1, N)) # evolution paths for C and sigma
self.A = eye(N, N) # covariant matrix is represent by the factors A * A '=C
self.Ainv = eye(N, N)
self.eigeneval = 0 # track update of B and D
self.counteval = 0
if Aupdate_freq is None:
self.update_crit = self.lambda_ / self.c1 / N / 10
else:
self.update_crit = Aupdate_freq * self.lambda_
self.chiN = sqrt(N) * (1 - 1 / (4 * N) + 1 / (21 * N ** 2))
# expectation of ||N(0,I)|| == norm(randn(N,1)) in 1/N expansion formula
self._istep = 0
def step_simple(self, scores, codes):
""" Taking scores and codes to return new codes, without generating images
Used in cases when the images are better handled in outer objects like Experiment object
"""
# Note it's important to decide which variable is to be saved in the `Optimizer` object
# Note to confirm with other code, this part is transposed.
# set short name for everything to simplify equations
N = self.space_dimen
lambda_, mu, mueff, chiN = self.lambda_, self.mu, self.mueff, self.chiN
cc, cs, c1, damps = self.cc, self.cs, self.c1, self.damps
sigma, A, Ainv, ps, pc, = self.sigma, self.A, self.Ainv, self.ps, self.pc,
# Sort by fitness and compute weighted mean into xmean
if self.maximize is False:
code_sort_index = np.argsort( scores) # add - operator it will do maximization.
else:
code_sort_index = np.argsort(-scores)
# scores = scores[code_sort_index] # Ascending order. minimization
if self._istep == 0:
# Population Initialization: if without initialization, the first xmean is evaluated from weighted average all the natural images
if self.init_x is None:
select_n = len(code_sort_index[0:mu])
temp_weight = self.weights[:, :select_n] / np.sum(self.weights[:, :select_n]) # in case the codes is not enough
self.xmean = temp_weight @ codes[code_sort_index[0:mu], :]
else:
self.xmean = self.init_x
else:
self.xold = self.xmean
self.xmean = self.weights @ codes[code_sort_index[0:mu], :] # Weighted recombination, new mean value
# Cumulation statistics through steps: Update evolution paths
randzw = self.weights @ self.randz[code_sort_index[0:mu], :]
ps = (1 - cs) * ps + sqrt(cs * (2 - cs) * mueff) * randzw
pc = (1 - cc) * pc + sqrt(cc * (2 - cc) * mueff) * randzw @ A
# Adapt step size sigma
sigma = sigma * exp((cs / damps) * (norm(ps) / chiN - 1))
# self.sigma = self.sigma * exp((self.cs / self.damps) * (norm(ps) / self.chiN - 1))
print("sigma: %.2f" % sigma)
# Update A and Ainv with search path
if self.counteval - self.eigeneval > self.update_crit: # to achieve O(N ^ 2) do decomposition less frequently
self.eigeneval = self.counteval
t1 = time.time()
v = pc @ Ainv
normv = v @ v.T
# Directly update the A Ainv instead of C itself
A = sqrt(1 - c1) * A + sqrt(1 - c1) / normv * (
sqrt(1 + normv * c1 / (1 - c1)) - 1) * v.T @ pc # FIXME, dimension error, # FIXED aug.13th
Ainv = 1 / sqrt(1 - c1) * Ainv - 1 / sqrt(1 - c1) / normv * (
1 - 1 / sqrt(1 + normv * c1 / (1 - c1))) * Ainv @ v.T @ v
t2 = time.time()
print("A, Ainv update! Time cost: %.2f s" % (t2 - t1))
# Generate new sample by sampling from Gaussian distribution
new_samples = zeros((self.lambda_, N))
self.randz = randn(self.lambda_, N) # save the random number for generating the code.
for k in range(self.lambda_):
new_samples[k:k + 1, :] = self.xmean + sigma * (self.randz[k, :] @ A) # m + sig * Normal(0,C)
# Clever way to generate multivariate gaussian!!
# Stretch the guassian hyperspher with D and transform the
# ellipsoid by B mat linear transform between coordinates
self.counteval += 1
self.sigma, self.A, self.Ainv, self.ps, self.pc = sigma, A, Ainv, ps, pc,
self._istep += 1
return new_samples
#%% Optimizers that use pre-computed Hessian information
class HessCMAES:
""" Note this is a variant of CMAES Cholesky suitable for high dimensional optimization"""
def __init__(self, space_dimen, population_size=None, cutoff=None, init_sigma=3.0, init_code=None, Aupdate_freq=10, maximize=True, random_seed=None, optim_params={}):
if cutoff is None: cutoff = space_dimen
N = cutoff
self.code_len = space_dimen
self.space_dimen = cutoff # Overall control parameter
self.maximize = maximize # if the program is to maximize or to minimize
# Strategy parameter setting: Selection
if population_size is None:
self.lambda_ = int(4 + floor(3 * log2(N))) # population size, offspring number
# the relation between dimension and population size.
else:
self.lambda_ = population_size # use custom specified population size
mu = self.lambda_ / 2 # number of parents/points for recombination
# Select half the population size as parents
weights = log(mu + 1 / 2) - (log(np.arange(1, 1 + floor(mu)))) # muXone array for weighted recombination
self.mu = int(floor(mu))
self.weights = weights / sum(weights) # normalize recombination weights array
mueff = self.weights.sum() ** 2 / sum(self.weights ** 2) # variance-effectiveness of sum w_i x_i
self.weights.shape = (1, -1) # Add the 1st dim 1 to the weights mat
self.mueff = mueff # add to class variable
self.sigma = init_sigma # Note by default, sigma is None here.
print("Space dimension: %d, Population size: %d, Select size:%d, Optimization Parameters:\nInitial sigma: %.3f"
% (self.space_dimen, self.lambda_, self.mu, self.sigma))
# Strategy parameter settiself.weightsng: Adaptation
self.cc = 4 / (N + 4) # defaultly 0.0009756
self.cs = sqrt(mueff) / (sqrt(mueff) + sqrt(N)) # 0.0499
self.c1 = 2 / (N + sqrt(2)) ** 2 # 1.1912701410022985e-07
if "cc" in optim_params.keys(): # if there is outside value for these parameter, overwrite them
self.cc = optim_params["cc"]
if "cs" in optim_params.keys():
self.cs = optim_params["cs"]
if "c1" in optim_params.keys():
self.c1 = optim_params["c1"]
self.damps = 1 + self.cs + 2 * max(0, sqrt((mueff - 1) / (N + 1)) - 1) # damping for sigma usually close to 1
print("cc=%.3f, cs=%.3f, c1=%.3f damps=%.3f" % (self.cc, self.cs, self.c1, self.damps))
if init_code is not None:
self.init_x = np.asarray(init_code).reshape(1,-1)
# if self.init_x.shape[1] == space_dimen:
# self.projection = True
# elif self.init_x.shape[1] == cutoff:
# self.projection = False
# else:
# raise ValueError
else:
self.init_x = None # FIXED Nov. 1st
self.xmean = zeros((1, N))
self.xold = zeros((1, N))
# Initialize dynamic (internal) strategy parameters and constants
self.pc = zeros((1, space_dimen))
self.ps = zeros((1, N)) # evolution paths for C and sigma
self.A = eye(N, space_dimen, ) # covariant matrix is represent by the factors A * A '=C
self.Ainv = eye(space_dimen, N, )
self.eigeneval = 0 # track update of B and D
self.counteval = 0
if Aupdate_freq is None:
self.update_crit = self.lambda_ / self.c1 / N / 10
else:
self.update_crit = Aupdate_freq * self.lambda_
self.chiN = sqrt(N) * (1 - 1 / (4 * N) + 1 / (21 * N ** 2))
# expectation of ||N(0,I)|| == norm(randn(N,1)) in 1/N expansion formula
self._istep = 0
def set_Hessian(self, eigvals, eigvects, cutoff=None, expon=1/2.5):
cutoff = self.space_dimen
self.eigvals = eigvals[:cutoff]
self.eigvects = eigvects[:, :cutoff]
self.scaling = self.eigvals ** (-expon)
self.A = self.scaling[:,np.newaxis] * self.eigvects.T # cutoff by spacedimen
self.Ainv = (1 / self.scaling[np.newaxis,:]) * self.eigvects # spacedimen by cutoff
# if self.projection:
# self.init_x = self.init_x @ self.Ainv
def step_simple(self, scores, codes):
""" Taking scores and codes to return new codes, without generating images
Used in cases when the images are better handled in outer objects like Experiment object
"""
# Note it's important to decide which variable is to be saved in the `Optimizer` object
# Note to confirm with other code, this part is transposed.
# set short name for everything to simplify equations
N = self.space_dimen
lambda_, mu, mueff, chiN = self.lambda_, self.mu, self.mueff, self.chiN
cc, cs, c1, damps = self.cc, self.cs, self.c1, self.damps
sigma, A, Ainv, ps, pc, = self.sigma, self.A, self.Ainv, self.ps, self.pc,
# Sort by fitness and compute weighted mean into xmean
if self.maximize is False:
code_sort_index = np.argsort( scores) # add - operator it will do maximization.
else:
code_sort_index = np.argsort(-scores)
# scores = scores[code_sort_index] # Ascending order. minimization
if self._istep == 0:
# Population Initialization: if without initialization, the first xmean is evaluated from weighted average all the natural images
if self.init_x is None:
select_n = len(code_sort_index[0:mu])
temp_weight = self.weights[:, :select_n] / np.sum(self.weights[:, :select_n]) # in case the codes is not enough
self.xmean = temp_weight @ codes[code_sort_index[0:mu], :]
else:
self.xmean = self.init_x
else:
self.xold = self.xmean
self.xmean = self.weights @ codes[code_sort_index[0:mu], :] # Weighted recombination, new mean value
# Cumulation statistics through steps: Update evolution paths
randzw = self.weights @ self.randz[code_sort_index[0:mu], :]
ps = (1 - cs) * ps + sqrt(cs * (2 - cs) * mueff) * randzw
pc = (1 - cc) * pc + sqrt(cc * (2 - cc) * mueff) * randzw @ A
# Adapt step size sigma
sigma = sigma * exp((cs / damps) * (norm(ps) / chiN - 1))
# self.sigma = self.sigma * exp((self.cs / self.damps) * (norm(ps) / self.chiN - 1))
print("sigma: %.2f" % sigma)
# Update A and Ainv with search path
if self.counteval - self.eigeneval > self.update_crit: # to achieve O(N ^ 2) do decomposition less frequently
self.eigeneval = self.counteval
t1 = time.time()
v = pc @ Ainv # (1, spacedimen) * (spacedimen, N) -> (1,N)
normv = v @ v.T
# Directly update the A Ainv instead of C itself
A = sqrt(1 - c1) * A + sqrt(1 - c1) / normv * (
sqrt(1 + normv * c1 / (1 - c1)) - 1) * v.T @ pc # FIXME, dimension error
Ainv = 1 / sqrt(1 - c1) * Ainv - 1 / sqrt(1 - c1) / normv * (
1 - 1 / sqrt(1 + normv * c1 / (1 - c1))) * Ainv @ v.T @ v
t2 = time.time()
print("A, Ainv update! Time cost: %.2f s" % (t2 - t1))
# Generate new sample by sampling from Gaussian distribution
new_samples = zeros((self.lambda_, N))
self.randz = randn(self.lambda_, N) # save the random number for generating the code.
new_samples = self.xmean + sigma * self.randz @ A
self.counteval += self.lambda_
# Clever way to generate multivariate gaussian!!
# Stretch the guassian hyperspher with D and transform the
# ellipsoid by B mat linear transform between coordinates
self.sigma, self.A, self.Ainv, self.ps, self.pc = sigma, A, Ainv, ps, pc,
self._istep += 1
return new_samples
#%% New Optimizers from the paper.
class HessAware_ADAM:
def __init__(self, space_dimen, population_size=40, lr=0.1, mu=1, nu=0.9, maximize=True):
self.dimen = space_dimen # dimension of input space
self.B = population_size # population batch size
self.mu = mu # scale of estimating gradient
self.nu = nu # update rate for D
self.lr = lr # learning rate (step size) of moving along gradient
self.grad = np.zeros((1, self.dimen)) # estimated gradient
self.D = np.ones((1, self.dimen)) # running average of gradient square
self.Hdiag = np.ones((1, self.dimen)) # Diagonal of estimated Hessian
self.innerU = np.zeros((self.B, self.dimen)) # inner random vectors with covariance matrix Id
self.outerV = np.zeros((self.B, self.dimen)) # outer random vectors with covariance matrix H^{-1}
self.xcur = np.zeros((1, self.dimen)) # current base point
self.xnew = np.zeros((1, self.dimen)) # new base point
self.fcur = 0 # f(xcur)
self.fnew = 0 # f(xnew)
self._istep = 0 # step counter
self.maximize = maximize # maximize / minimize the function
def step_simple(self, scores, codes):
''' Assume the 1st row of codes is the xnew new starting point '''
# set short name for everything to simplify equations
N = self.dimen
if self._istep == 0:
# Population Initialization: if without initialization, the first xmean is evaluated from weighted average all the natural images
self.xcur = codes[0:1, :]
self.xnew = codes[0:1, :]
else:
# self.xcur = self.xnew # should be same as following
self.xcur = codes[0:1, :]
self.weights = (scores - scores[0]) / self.mu
HAgrad = self.weights[1:] @ (codes[1:] - self.xcur) / self.B # it doesn't matter if it includes the 0 row!
if self.maximize is True:
self.xnew = self.xcur + self.lr * HAgrad # add - operator it will do maximization.
else:
self.xnew = self.xcur - self.lr * HAgrad
self.D = self.nu * self.D + (1 - self.nu) * HAgrad ** 2 # running average of gradient square # Missing square before
self.Hdiag = self.D / (1 - self.nu ** self._istep) # Diagonal of estimated Hessian
# Generate new sample by sampling from Gaussian distribution
new_samples = zeros((self.B + 1, N))
self.innerU = randn(self.B, N) # save the random number for generating the code.
self.outerV = self.innerU / sqrt(self.Hdiag) # H^{-1/2}U
new_samples[0:1, :] = self.xnew
new_samples[1: , :] = self.xnew + self.mu * self.outerV # m + sig * Normal(0,C)
self._istep += 1
return new_samples
#%%
class HessAware_Gauss:
"""Gaussian Sampling method for estimating Hessian"""
def __init__(self, space_dimen, population_size=40, lr=0.1, mu=1, Lambda=0.9, Hupdate_freq=5, maximize=True):
self.dimen = space_dimen # dimension of input space
self.B = population_size # population batch size
self.mu = mu # scale of the Gaussian distribution to estimate gradient
assert Lambda > 0
self.Lambda = Lambda # diagonal regularizer for Hessian matrix
self.lr = lr # learning rate (step size) of moving along gradient
self.grad = np.zeros((1, self.dimen)) # estimated gradient
self.innerU = np.zeros((self.B, self.dimen)) # inner random vectors with covariance matrix Id
self.outerV = np.zeros((self.B, self.dimen)) # outer random vectors with covariance matrix H^{-1}, equals self.innerU @ H^{-1/2}
self.xcur = np.zeros((1, self.dimen)) # current base point
self.xnew = np.zeros((1, self.dimen)) # new base point
self.fcur = 0 # f(xcur)
self.fnew = 0 # f(xnew)
self.Hupdate_freq = int(Hupdate_freq) # Update Hessian (add additional samples every how many generations)
self.HB = population_size # Batch size of samples to estimate Hessian, can be different from self.B
self.HinnerU = np.zeros((self.HB, self.dimen)) # sample deviation vectors for Hessian construction
# SVD of the weighted HinnerU for Hessian construction
self.HessUC = np.zeros((self.HB, self.dimen)) # Basis vector for the linear subspace defined by the samples
self.HessD = np.zeros(self.HB) # diagonal values of the Lambda matrix
self.HessV = np.zeros((self.HB, self.HB)) # seems not used....
self.HUDiag = np.zeros(self.HB)
self.hess_comp = False
self._istep = 0 # step counter
self.maximize = maximize # maximize / minimize the function
def step_hessian(self, scores):
'''Currently only use part of the samples to estimate hessian, maybe need more '''
fbasis = scores[0]
fpos = scores[-2*self.HB:-self.HB]
fneg = scores[-self.HB:]
weights = abs((fpos + fneg - 2 * fbasis) / 2 / self.mu ** 2 / self.HB) # use abs to enforce positive definiteness
C = sqrt(weights[:, np.newaxis]) * self.HinnerU # or the sqrt may not work.
# H = C^TC + Lambda * I
self.HessV, self.HessD, self.HessUC = np.linalg.svd(C, full_matrices=False)
self.HUDiag = 1 / sqrt(self.HessD ** 2 + self.Lambda) - 1 / sqrt(self.Lambda)
print("Hessian Samples Spectrum", self.HessD)
print("Hessian Samples Full Power:%f \nLambda:%f" % ((self.HessD ** 2).sum(), self.Lambda) )
def step_simple(self, scores, codes):
''' Assume the 1st row of codes is the xnew new starting point '''
# set short name for everything to simplify equations
N = self.dimen
if self.hess_comp: # if this flag is True then more samples have been added to the trial
self.step_hessian(scores)
# you should only get images for gradient estimation, get rid of the Hessian samples, or make use of it to estimate gradient
codes = codes[:self.B+1, :]
scores = scores[:self.B+1]
self.hess_comp = False
if self._istep == 0:
# Population Initialization: if without initialization, the first xmean is evaluated from weighted average all the natural images
self.xcur = codes[0:1, :]
self.xnew = codes[0:1, :]
else:
# self.xcur = self.xnew # should be same as following line
self.xcur = codes[0:1, :]
self.weights = (scores - scores[0]) / self.mu
# estimate gradient from the codes and scores
HAgrad = self.weights[1:] @ (codes[1:] - self.xcur) / self.B # it doesn't matter if it includes the 0 row!
print("Estimated Gradient Norm %f"%np.linalg.norm(HAgrad))
if self.maximize is True:
self.xnew = self.xcur + self.lr * HAgrad # add - operator it will do maximization.
else:
self.xnew = self.xcur - self.lr * HAgrad
# Generate new sample by sampling from Gaussian distribution
new_samples = zeros((self.B + 1, N))
self.innerU = randn(self.B, N) # Isotropic gaussian distributions
self.outerV = self.innerU / sqrt(self.Lambda) + ((self.innerU @ self.HessUC.T) * self.HUDiag) @ self.HessUC # H^{-1/2}U
new_samples[0:1, :] = self.xnew
new_samples[1: , :] = self.xnew + self.mu * self.outerV # m + sig * Normal(0,C)
if self._istep % self.Hupdate_freq == 0:
# add more samples to next batch for hessian computation
self.hess_comp = True
self.HinnerU = randn(self.HB, N)
H_pos_samples = self.xnew + self.mu * self.HinnerU
H_neg_samples = self.xnew - self.mu * self.HinnerU
new_samples = np.concatenate((new_samples, H_pos_samples, H_neg_samples), axis=0)
self._istep += 1
return new_samples
def rankweight(lambda_, mu=None):
""" Rank weight inspired by CMA-ES code
mu is the cut off number, how many samples will be kept while `lambda_ - mu` will be ignore
"""
if mu is None:
mu = lambda_ / 2 # number of parents/points for recombination
# Defaultly Select half the population size as parents
weights = zeros(int(lambda_))
mu_int = int(floor(mu))
weights[:mu_int] = log(mu + 1 / 2) - (log(np.arange(1, 1 + floor(mu)))) # muXone array for weighted recombination
weights = weights / sum(weights)
return weights
# Major Classes.
class HessAware_Gauss_Spherical:
"""Gaussian Sampling method for estimating Hessian"""
def __init__(self, space_dimen, population_size=40, lr=0.1, mu=1, Lambda=0.9, Hupdate_freq=5,
sphere_norm=300, maximize=True, rankweight=False):
self.dimen = space_dimen # dimension of input space
self.B = population_size # population batch size
self.mu = mu # scale of the Gaussian distribution to estimate gradient
assert Lambda > 0
self.Lambda = Lambda # diagonal regularizer for Hessian matrix
self.lr = lr # learning rate (step size) of moving along gradient
self.sphere_norm = sphere_norm
self.tang_codes = zeros((self.B, self.dimen))
self.grad = np.zeros((1, self.dimen)) # estimated gradient
self.innerU = np.zeros((self.B, self.dimen)) # inner random vectors with covariance matrix Id
self.outerV = np.zeros(
(self.B, self.dimen)) # outer random vectors with covariance matrix H^{-1}, equals self.innerU @ H^{-1/2}
self.xcur = np.zeros((1, self.dimen)) # current base point
self.xnew = np.zeros((1, self.dimen)) # new base point
self.fcur = 0 # f(xcur)
self.fnew = 0 # f(xnew)
self.Hupdate_freq = int(Hupdate_freq) # Update Hessian (add additional samples every how many generations)
self.HB = population_size # Batch size of samples to estimate Hessian, can be different from self.B
self.HinnerU = np.zeros((self.HB, self.dimen)) # sample deviation vectors for Hessian construction
# SVD of the weighted HinnerU for Hessian construction
self.HessUC = np.zeros((self.HB, self.dimen)) # Basis vector for the linear subspace defined by the samples
self.HessD = np.zeros(self.HB) # diagonal values of the Lambda matrix
self.HessV = np.zeros((self.HB, self.HB)) # seems not used....
self.HUDiag = np.zeros(self.HB)
self.hess_comp = False
self._istep = 0 # step counter
self.maximize = maximize # maximize / minimize the function
self.rankweight = rankweight # Switch between using raw score as weight VS use rank weight as score
print(
"Spereical Space dimension: %d, Population size: %d, Optimization Parameters:\n Exploration: %.3f\n Learning rate: %.3f"
% (self.dimen, self.B, self.mu, self.lr))
if self.rankweight:
if select_cutoff is None:
self.select_cutoff = int(population_size / 2)
else:
self.select_cutoff = select_cutoff
print("Using rank weight, selection size: %d\n" % self.select_cutoff)
def step_hessian(self, scores):
'''Currently not implemented in Spherical Version.'''
fbasis = scores[0]
fpos = scores[-2 * self.HB:-self.HB]
fneg = scores[-self.HB:]
weights = abs(
(fpos + fneg - 2 * fbasis) / 2 / self.mu ** 2 / self.HB) # use abs to enforce positive definiteness
C = sqrt(weights[:, np.newaxis]) * self.HinnerU # or the sqrt may not work.
# H = C^TC + Lambda * I
self.HessV, self.HessD, self.HessUC = np.linalg.svd(C, full_matrices=False)
self.HUDiag = 1 / sqrt(self.HessD ** 2 + self.Lambda) - 1 / sqrt(self.Lambda)
print("Hessian Samples Spectrum", self.HessD)
print("Hessian Samples Full Power:%f \nLambda:%f" % ((self.HessD ** 2).sum(), self.Lambda))
def step_simple(self, scores, codes):
''' Assume the 1st row of codes is the xnew new starting point '''
# set short name for everything to simplify equations
N = self.dimen
if self.hess_comp: # if this flag is True then more samples have been added to the trial
self.step_hessian(scores)
# you should only get images for gradient estimation, get rid of the Hessian samples, or make use of it to estimate gradient
codes = codes[:self.B + 1, :]
scores = scores[:self.B + 1]
self.hess_comp = False
if self._istep == 0:
# Population Initialization: if without initialization, the first xmean is evaluated from weighted average all the natural images
print('First generation\n')
self.xcur = codes[0:1, :]
self.xnew = codes[0:1, :]
# No reweighting as there should be a single code
else:
# self.xcur = self.xnew # should be same as following line
self.xcur = codes[0:1, :]
if self.rankweight is False: # use the score difference as weight
# B normalizer should go here larger cohort of codes gives more estimates
self.weights = (scores[1:] - scores[0]) / self.B # / self.mu
else: # use a function of rank as weight, not really gradient.
if self.maximize is False: # note for weighted recombination, the maximization flag is here.
code_rank = np.argsort(np.argsort(scores[1:])) # add - operator it will do maximization.
else:
code_rank = np.argsort(np.argsort(-scores[1:]))
# Consider do we need to consider the basis code and score here? Or no?
# Note the weights here are internally normalized s.t. sum up to 1, no need to normalize more.
self.weights = rankweight(len(scores) - 1, mu=self.select_cutoff)[
code_rank] # map the rank to the corresponding weight of recombination
# estimate gradient from the codes and scores
# HAgrad = self.weights[1:] @ (codes[1:] - self.xcur) / self.B # it doesn't matter if it includes the 0 row!
HAgrad = self.weights[np.newaxis, :] @ self.tang_codes
print("Estimated Gradient Norm %f" % np.linalg.norm(HAgrad))
if self.rankweight is False:
if self.maximize is True:
self.xnew = ExpMap(self.xcur, self.lr * HAgrad) # add - operator it will do maximization.
else:
self.xnew = ExpMap(self.xcur, - self.lr * HAgrad)
else:
self.xnew = ExpMap(self.xcur, self.lr * HAgrad)
# vtan_new = VecTransport(self.xcur, self.xnew, vtan_old)
# uni_vtan_old = vtan_old / np.linalg.norm(vtan_old);
# uni_vtan_new = vtan_new / np.linalg.norm(vtan_new); # uniform the tangent vector
# Generate new sample by sampling from Gaussian distribution
self.tang_codes = zeros((self.B, N)) # Tangent vectors of exploration
new_samples = zeros((self.B + 1, N))
self.innerU = randn(self.B, N) # Isotropic gaussian distributions
self.outerV = self.innerU / sqrt(self.Lambda) + (
(self.innerU @ self.HessUC.T) * self.HUDiag) @ self.HessUC # H^{-1/2}U
new_samples[0:1, :] = self.xnew
self.tang_codes[:, :] = self.mu * self.outerV # m + sig * Normal(0,C)
new_samples[1:, ] = ExpMap(self.xnew, self.tang_codes)
if (self._istep + 1) % self.Hupdate_freq == 0:
# add more samples to next batch for hessian computation
self.hess_comp = True
self.HinnerU = randn(self.HB, N)
H_pos_samples = self.xnew + self.mu * self.HinnerU
H_neg_samples = self.xnew - self.mu * self.HinnerU
new_samples = np.concatenate((new_samples, H_pos_samples, H_neg_samples), axis=0)
self._istep += 1
self._curr_samples = new_samples / norm(new_samples, axis=1)[:, np.newaxis] * self.sphere_norm
return self._curr_samples
class HessAware_Gauss_Cylind:
""" Cylindrical Evolution, Both angular and radial. """
def __init__(self, space_dimen, population_size=40, population_kept=None, lr_norm=0.5, mu_norm=5, lr_sph=2,
mu_sph=0.005,
Lambda=1, Hupdate_freq=201, max_norm=300, maximize=True, rankweight=False):
self.dimen = space_dimen # dimension of input space
self.B = population_size # population batch size
assert Lambda > 0
self.Lambda = Lambda # diagonal regularizer for Hessian matrix
self.lr_norm = lr_norm # learning rate (step size) of moving along gradient
self.mu_norm = mu_norm # scale of the Gaussian distribution to estimate gradient
self.lr_sph = lr_sph
self.mu_sph = mu_sph
self.sphere_flag = True # initialize the whole system as linear?
self.max_norm = max_norm
self.tang_codes = zeros((self.B, self.dimen))
self.grad = np.zeros((1, self.dimen)) # estimated gradient
self.innerU = np.zeros((self.B, self.dimen)) # inner random vectors with covariance matrix Id
self.outerV = np.zeros(
(self.B, self.dimen)) # outer random vectors with covariance matrix H^{-1}, equals self.innerU @ H^{-1/2}
self.xcur = np.zeros((1, self.dimen)) # current base point
self.xnew = np.zeros((1, self.dimen)) # new base point
self.fcur = 0 # f(xcur)
self.fnew = 0 # f(xnew)
self.Hupdate_freq = int(Hupdate_freq) # Update Hessian (add additional samples every how many generations)
self.HB = population_size # Batch size of samples to estimate Hessian, can be different from self.B
self.HinnerU = np.zeros((self.HB, self.dimen)) # sample deviation vectors for Hessian construction
# SVD of the weighted HinnerU for Hessian construction
self.HessUC = np.zeros((self.HB, self.dimen)) # Basis vector for the linear subspace defined by the samples
self.HessD = np.zeros(self.HB) # diagonal values of the Lambda matrix
self.HessV = np.zeros((self.HB, self.HB)) # seems not used....
self.HUDiag = np.zeros(self.HB)
self.hess_comp = False
self._istep = 0 # step counter
self.maximize = maximize # maximize / minimize the function
self.rankweight = rankweight # Switch between using raw score as weight VS use rank weight as score
print("Spereical Space dimension: %d, Population size: %d, Optimization Parameters:\n"
"Norm Exploration Range %.3f Learning rate: %.3f\n Angular Exploration Range:%.3f Learning Rate: %.3f"
% (self.dimen, self.B, self.mu_norm, self.lr_norm, self.mu_sph, self.lr_sph))
if rankweight:
self.BKeep = population_kept if population_kept is not None else int(self.B // 2)
print("Using rank based weights. Keep population size: %d" % (self.BKeep))
def step_hessian(self, scores):
''' Currently not implemented in Spherical Version. '''
raise NotImplementedError
# fbasis = scores[0]
# fpos = scores[-2 * self.HB:-self.HB]
# fneg = scores[-self.HB:]
# weights = abs(
# (fpos + fneg - 2 * fbasis) / 2 / self.mu ** 2 / self.HB) # use abs to enforce positive definiteness
# C = sqrt(weights[:, np.newaxis]) * self.HinnerU # or the sqrt may not work.
# # H = C^TC + Lambda * I
# self.HessV, self.HessD, self.HessUC = np.linalg.svd(C, full_matrices=False)
# self.HUDiag = 1 / sqrt(self.HessD ** 2 + self.Lambda) - 1 / sqrt(self.Lambda)
# print("Hessian Samples Spectrum", self.HessD)
# print("Hessian Samples Full Power:%f \nLambda:%f" % ((self.HessD ** 2).sum(), self.Lambda))
def step_simple(self, scores, codes):
''' Assume the 1st row of codes is the xnew new starting point '''
# set short name for everything to simplify equations
N = self.dimen
if self.hess_comp: # if this flag is True then more samples have been added to the trial
raise NotImplementedError
self.step_hessian(scores)
# you should only get images for gradient estimation, get rid of the Hessian samples, or make use of it to estimate gradient
codes = codes[:self.B + 1, :]
scores = scores[:self.B + 1]
self.hess_comp = False
if self._istep == 0:
# Population Initialization: if without initialization, the first xmean is evaluated from weighted average all the natural images
print('First generation\n')
self.xcur = codes[0:1, :]
self.xnew = codes[0:1, :]
# No reweighting as there should be a single code
else:
# self.xcur = self.xnew # should be same as following line
self.xcur = codes[0:1, :]
if self.rankweight is False: # use the score difference as weight
# B normalizer should go here larger cohort of codes gives more estimates
self.weights = (scores[:] - scores[0]) / self.B # / self.mu
else: # use a function of rank as weight, not really gradient.
if self.maximize is False: # note for weighted recombination, the maximization flag is here.
code_rank = np.argsort(np.argsort(scores[:])) # add - operator it will do maximization.
else:
code_rank = np.argsort(np.argsort(-scores[:]))
# Consider do we need to consider the basis code and score here? Or no?
# Note the weights here are internally normalized s.t. sum up to 1, no need to normalize more.
self.weights = rankweight(len(scores), mu=self.BKeep)[code_rank]
# map the rank to the corresponding weight of recombination
# estimate gradient from the codes and scores
# HAgrad = self.weights[1:] @ (codes[1:] - self.xcur) / self.B # it doesn't matter if it includes the 0 row!
tang_codes_aug = np.concatenate((np.zeros((1, self.tang_codes.shape[1])), self.tang_codes), axis=0)
HAgrad = self.weights[np.newaxis,
:] @ tang_codes_aug # self.tang_codes # Changed to take the current location into account.
normgrad = self.weights[np.newaxis, 1:] @ (self.code_norms - norm(self.xcur)) # Recombine norms to get,
print("Estimated Angular Gradient Norm %f" % norm(HAgrad))
print("Estimated Radial Gradient Norm %f" % normgrad)
mov_sign = -1 if (not self.maximize) and (not self.rankweight) else 1
normnew = np.minimum(self.max_norm, norm(
self.xcur) + mov_sign * self.lr_norm * normgrad) # use the new norm to normalize ynew
self.xnew = ExpMap(self.xcur, mov_sign * self.lr_sph * HAgrad) # add - operator it will do maximization.
self.xnew = renormalize(self.xnew, normnew)
# Generate new sample by sampling from Gaussian distribution
self.innerU = randn(self.B, N) # Isotropic gaussian distributions
self.outerV = self.innerU / sqrt(self.Lambda) + (
(self.innerU @ self.HessUC.T) * self.HUDiag) @ self.HessUC # H^{-1/2}U
self.tang_codes = self.mu_sph * self.outerV # m + sig * Normal(0,C)
self.tang_codes = orthogonalize(self.xnew, self.tang_codes) # Tangent vectors of exploration
new_norms = norm(self.xnew) + self.mu_norm * randn(self.B)
new_norms = np.minimum(self.max_norm, new_norms)
new_samples = zeros((self.B + 1, N))
new_samples[0:1, :] = self.xnew
new_samples[1:, ] = ExpMap(self.xnew, self.tang_codes)
new_samples[1:, ] = renormalize(new_samples[1:, ], new_norms)
print("norm of new samples", norm(new_samples, axis=1))
self.code_norms = new_norms # doesn't include the norm of the basis vector.
if (self._istep + 1) % self.Hupdate_freq == 0:
# add more samples to next batch for hessian computation
self.hess_comp = True
self.HinnerU = randn(self.HB, N)
H_pos_samples = self.xnew + self.mu * self.HinnerU
H_neg_samples = self.xnew - self.mu * self.HinnerU
new_samples = np.concatenate((new_samples, H_pos_samples, H_neg_samples), axis=0)
self._istep += 1
return new_samples
#%
class HessEstim_Gauss:
"""Code to generate samples and estimate Hessian from it"""
def __init__(self, space_dimen):
self.dimen = space_dimen
self.HB = 0
self.std = 2
def GaussSampling(self, xmean, batch=100, std=2):
xmean = xmean.reshape(1, -1)
self.std = std
self.HB = batch
self.HinnerU = randn(self.HB, self.dimen) # / sqrt(self.dimen) # make it unit var along the code vector dimension
H_pos_samples = xmean + self.std * self.HinnerU
H_neg_samples = xmean - self.std * self.HinnerU
new_samples = np.concatenate((xmean, H_pos_samples, H_neg_samples), axis=0)
return new_samples
def HessEstim(self, scores):
fbasis = scores[0]
fpos = scores[-2 * self.HB:-self.HB]
fneg = scores[-self.HB:]
weights = abs(
(fpos + fneg - 2 * fbasis) / 2 / self.std ** 2 / self.HB) # use abs to enforce positive definiteness
C = sqrt(weights[:, np.newaxis]) * self.HinnerU # or the sqrt may not work.
# H = C^TC + Lambda * I
self.HessV, self.HessD, self.HessUC = np.linalg.svd(C, full_matrices=False)
# self.HessV.shape = (HB, HB); self.HessD.shape = (HB,), self.HessUC.shape = (HB, dimen)
# self.HUDiag = 1 / sqrt(self.HessD ** 2 + self.Lambda) - 1 / sqrt(self.Lambda)
print("Hessian Samples Spectrum", self.HessD)
print("Hessian Samples Full Power:%f" % ((self.HessD ** 2).sum()))
return self.HessV, self.HessD, self.HessUC
#%
class HessAware_Gauss_DC:
"""Gaussian Sampling method for estimating Hessian"""
def __init__(self, space_dimen, population_size=40, lr=0.1, mu=1, Lambda=0.9, Hupdate_freq=5,
maximize=True, max_norm=300, rankweight=False, nat_grad=False):
self.dimen = space_dimen # dimension of input space
self.B = population_size # population batch size
self.mu = mu # scale of the Gaussian distribution to estimate gradient
assert Lambda > 0
self.Lambda = Lambda # diagonal regularizer for Hessian matrix
self.lr = lr # learning rate (step size) of moving along gradient
self.grad = np.zeros((1, self.dimen)) # estimated gradient
self.innerU = np.zeros((self.B, self.dimen)) # inner random vectors with covariance matrix Id
self.outerV = np.zeros((self.B, self.dimen)) # outer random vectors with covariance matrix H^{-1}, equals self.innerU @ H^{-1/2}
self.xnew = np.zeros((1, self.dimen)) # new base point
self.xscore = 0
self.Hupdate_freq = int(Hupdate_freq) # Update Hessian (add additional samples every how many generations)
self.HB = population_size # Batch size of samples to estimate Hessian, can be different from self.B
self.HinnerU = np.zeros((self.HB, self.dimen)) # sample deviation vectors for Hessian construction
# SVD of the weighted HinnerU for Hessian construction
self.HessUC = np.zeros((self.HB, self.dimen)) # Basis vector for the linear subspace defined by the samples
self.HessD = np.zeros(self.HB) # diagonal values of the Lambda matrix
self.HessV = np.zeros((self.HB, self.HB)) # seems not used....
self.HUDiag = np.zeros(self.HB)
self.hess_comp = False
self._istep = 0 # step counter
self.maximize = maximize # maximize / minimize the function
self.code_stored = np.array([]).reshape((0, self.dimen))
self.score_stored = np.array([])
self.N_in_samp = 0
self.max_norm = max_norm
self.nat_grad = nat_grad # use the natural gradient definition, or normal gradient.
self.rankweight = rankweight
def new_generation(self, init_score, init_code):
self.xscore = init_score
self.score_stored = np.array([])
self.xnew = init_code
self.code_stored = np.array([]).reshape((0, self.dimen))
self.N_in_samp = 0
def compute_hess(self, scores, Lambda_Frac=100):
'''Currently only use part of the samples to estimate hessian, maybe need more '''
fbasis = self.xscore
fpos = scores[:self.HB]
fneg = scores[-self.HB:]
weights = abs((fpos + fneg - 2 * fbasis) / 2 / self.mu ** 2 / self.HB) # use abs to enforce positive definiteness
C = sqrt(weights[:, np.newaxis]) * self.HinnerU # or the sqrt may not work.
# H = C^TC + Lambda * I
self.HessV, self.HessD, self.HessUC = np.linalg.svd(C, full_matrices=False)
self.Lambda = (self.HessD ** 2).sum() / Lambda_Frac
self.HUDiag = 1 / sqrt(self.HessD ** 2 + self.Lambda) - 1 / sqrt(self.Lambda)
print("Hessian Samples Spectrum", self.HessD)
print("Hessian Samples Full Power:%f \nLambda:%f" % ((self.HessD ** 2).sum(), self.Lambda) )
def compute_grad(self, scores):
# add the new scores to storage
self.score_stored = np.concatenate((self.score_stored, scores), axis=0) if self.score_stored.size else scores
if self.rankweight is False: # use the score difference as weight
# B normalizer should go here larger cohort of codes gives more estimates
self.weights = (self.score_stored - self.xscore) / self.score_stored.size # / self.mu
# assert(self.N_in_samp == self.score_stored.size)
else: # use a function of rank as weight, not really gradient.
# Note descent check **could be** built into ranking weight?
# If not better just don't give weights to that sample
if self.maximize is False: # note for weighted recombination, the maximization flag is here.
code_rank = np.argsort(np.argsort( self.score_stored)) # add - operator it will do maximization.
else:
code_rank = np.argsort(np.argsort(-self.score_stored))
# Consider do we need to consider the basis code and score here? Or no?
# Note the weights here are internally normalized s.t. sum up to 1, no need to normalize more.
self.weights = rankweight(len(self.score_stored), mu=20)[code_rank] # map the rank to the corresponding weight of recombination
# only keep the top 20 codes and recombine them.
if self.nat_grad: # if or not using the Hessian to rescale the codes
hagrad = self.weights @ (self.code_stored - self.xnew) # /self.mu
else:
Hdcode = self.Lambda * (self.code_stored - self.xnew) + (
((self.code_stored - self.xnew) @ self.HessUC.T) * self.HessD **2) @ self.HessUC
hagrad = self.weights @ Hdcode # /self.mu
print("Gradient Norm %.2f" % (np.linalg.norm(hagrad)))
# if self.rankweight is False:
# if self.maximize:
# ynew = radial_proj(self.xnew + self.lr * hagrad, max_norm=self.max_norm)
# else:
# ynew = radial_proj(self.xnew - self.lr * hagrad, max_norm=self.max_norm)
# else: # if using rankweight, then the maximization if performed in the recombination step.
# ynew = radial_proj(self.xnew + self.lr * hagrad, max_norm=self.max_norm)
mov_sign = -1 if (not self.maximize) and (not self.rankweight) else 1
ynew = radial_proj(self.xnew + mov_sign * self.lr * hagrad, max_norm=self.max_norm)
return ynew
def generate_sample(self, samp_num=None, hess_comp=False):
''' Assume the 1st row of codes is the xnew new starting point '''
N = self.dimen
# Generate new sample by sampling from Gaussian distribution
if hess_comp:
# self.hess_comp = True
self.HinnerU = randn(self.HB, N)
H_pos_samples = self.xnew + self.mu * self.HinnerU
H_neg_samples = self.xnew - self.mu * self.HinnerU
new_samples = np.concatenate((H_pos_samples, H_neg_samples), axis=0)
# new_samples = radial_proj(new_samples, self.max_norm)
else:
new_samples = zeros((samp_num, N))
self.innerU = randn(samp_num, N) # Isotropic gaussian distributions
self.outerV = self.innerU /
|
sqrt(self.Lambda)
|
numpy.sqrt
|
import os
import sys
import argparse
import numpy as np
import open3d as o3d
from graspnetAPI.utils.config import get_config
from graspnetAPI.utils.eval_utils import get_scene_name, create_table_points, voxel_sample_points, transform_points, eval_grasp
from graspnetAPI.utils.utils import generate_scene_model
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(ROOT_DIR)
from around_view.utils.evaluation import AroundViewGraspEval
from around_view.utils.grasp import AroundViewGraspGroup
class TestTransformGraspNetEval(AroundViewGraspEval):
def eval_all(self, dump_folder, proc = 2):
_scene_ids = list(range(101, 190))
res = np.array(self.parallel_eval_scenes(scene_ids = _scene_ids, dump_folder = dump_folder, proc = proc))
ap = [np.mean(res), np.mean(res[0:30]), np.mean(res[30:60]),
|
np.mean(res[60:90])
|
numpy.mean
|
import logging
import os.path
import time
import shutil
import tensorflow as tf
import numpy as np
import cv2
import sys
sys.path.append('/home/cany/tensorflow/models/research')
sys.path.append('/home/cany/tensorflow/models/research/slim')
from PIL import Image
from deeplab import common
import utils
import mem_net
from multiprocessing.dummy import Pool as ThreadPool
from argoverse.data_loading.argoverse_tracking_loader \
import ArgoverseTrackingLoader
import argoverse_token_splits as token_splits
from experiments import argoverse_objects_val_exp as exp_config
means_image = np.array([123.68, 116.779, 103.939], dtype=np.single)
total_label_slices = exp_config.num_classes + 2
train_path = os.path.expandvars(exp_config.argo_track_path)
train_loader = ArgoverseTrackingLoader(train_path)
target_dir = exp_config.argo_labels_path
exp_config.batch_size=1
use_deeplab = True
starting_from_cityscapes =False
starting_from_imagenet =False
frame_interval = exp_config.frame_interval
num_frames=exp_config.num_frames
single_frame_experiment=exp_config.single_frame_experiment
reference_frame_index = exp_config.reference_frame_index
n_frames_per_seq = exp_config.num_frames
n_seqs = n_frames_per_seq-num_frames+1
use_occlusion=exp_config.use_occlusion
BATCH_SIZE = exp_config.batch_size
"""
If inception pre-process is used, the inputs to query encoders are corrected through vgg processing in the tensorflow part.
Query encoders do not use masks, thus they can be simply propagated through the Resnet. Memory encoders need to be handled
differently since if the image's range is 0-255 and mask is 0-1 then the mask is not effective through simple addition before
batch norm.
If root block is included, inception_preprocess should be set to False.
"""
use_inception_preprocess = False
freeze_batch_norm_layers = True
multiply_labels=True
include_root_block=True
logging.error('REFERENCE FRAME ' + str(reference_frame_index))
log_dir = exp_config.log_dir
train_results_path = os.path.join(log_dir,'train_results')
#log_dir = os.path.join('/raid/cany/mapmaker/logdir/', exp_config.experiment_name)
validation_res_path = os.path.join(log_dir,'val_results')
if not os.path.exists(train_results_path):
os.makedirs(train_results_path, exist_ok=True)
if not os.path.exists(validation_res_path):
os.makedirs(validation_res_path, exist_ok=True)
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
logging.error('EXPERIMENT : ' + str(exp_config.experiment_name))
logging.error('THIS IS ' + str(log_dir))
def decode_binary_labels(labels, nclass):
bits = np.power(2, np.arange(nclass))
return np.uint8((np.expand_dims(labels,axis=-1) & np.reshape(bits,(1, 1,-1))) > 0)
def list_directories(path):
return [ name for name in os.listdir(path) if os.path.isdir(os.path.join(path, name)) ]
def get_clipped_grads(gvs):
capped_gvs = []
for grad, var in gvs:
if grad == None:
logging.error('VAR ' + str(var) + ' NONE GRAD')
else:
capped_gvs.append((tf.clip_by_value(grad, -10., 10.), var))
return capped_gvs
def single_process(pair):
camera_channel = 'ring_front_center'
image_path,label_path, vis_mask,calib_cur,pose_ref, pose_cur , is_reference_sample= pair
calib_ref = calib_cur
cam_intrinsic = calib_cur.K[:,:3]
img = Image.open(image_path)
img.load()
encoded_labels = np.flipud(np.array(Image.open(label_path),np.int32))
num_class = exp_config.num_bev_classes
bev_labels = decode_binary_labels(encoded_labels, num_class+ 1)
# vis_mask[:-100,:] = 0
bev_labels = np.concatenate([np.copy(bev_labels[...,:exp_config.num_bev_classes]),np.copy(vis_mask),vis_mask*(1-np.copy(np.expand_dims(bev_labels[...,exp_config.num_bev_classes],axis=-1)))],axis=-1)
to_image_transform = utils.project_to_image(exp_config, np.zeros_like(bev_labels),calib_ref)
image_labels = cv2.warpPerspective(bev_labels,to_image_transform,exp_config.original_image_size,flags=cv2.INTER_NEAREST)
# image_objects= cv2.resize(image_objects,(int(exp_config.camera_image_patch_size[1]/4),int(exp_config.camera_image_patch_size[0]/4)), interpolation = cv2.INTER_LINEAR)
image_labels = np.uint8(image_labels > 0.3)
image=np.array(img, dtype=np.uint8)
warp_trans1 = utils.tensorflow_project_to_ground(image,np.zeros((int(exp_config.camera_image_patch_size[0]/(4*exp_config.downsample_ratio)),int(exp_config.camera_image_patch_size[1]/(4*exp_config.downsample_ratio)))),pose_ref, calib_ref,pose_cur,calib_cur, cam_intrinsic,reference_frame=is_reference_sample)
warp_trans2 = utils.tensorflow_project_to_ground(image,np.zeros((int(exp_config.camera_image_patch_size[0]/(8*exp_config.downsample_ratio)),int(exp_config.camera_image_patch_size[1]/(8*exp_config.downsample_ratio)))),pose_ref, calib_ref,pose_cur,calib_cur, cam_intrinsic,reference_frame=is_reference_sample)
warp_trans3 = utils.tensorflow_project_to_ground(image,np.zeros((int(exp_config.camera_image_patch_size[0]/(16*exp_config.downsample_ratio)),int(exp_config.camera_image_patch_size[1]/(16*exp_config.downsample_ratio)))),pose_ref, calib_ref,pose_cur,calib_cur, cam_intrinsic,reference_frame=is_reference_sample)
warped_img, warped_cover, coordinate_transform = utils.argoverse_project_to_ground(exp_config, image,image_labels[...,exp_config.num_bev_classes],calib_ref,pose_ref,calib_cur,pose_cur,cam_intrinsic,reference_frame=is_reference_sample)
if is_reference_sample:
padded_vis_mask = np.zeros((exp_config.project_patch_size[1],exp_config.project_patch_size[0]))
padded_vis_mask[50:-50,48:-48] = np.squeeze(vis_mask)
warped_cover = padded_vis_mask
# save_array(np.expand_dims(image_labels,axis=0),'pre_resize',is_rgb=False)
new_sizes = (exp_config.camera_image_patch_size[1],exp_config.camera_image_patch_size[0])
cropped_label = np.uint8(cv2.resize(image_labels, (int(exp_config.camera_image_patch_size[1]/4),int(exp_config.camera_image_patch_size[0]/4)), interpolation = cv2.INTER_LINEAR)>0.5)
cropped_img = cv2.resize(image,new_sizes, interpolation = cv2.INTER_LINEAR)
# save_array(np.expand_dims(cropped_label,axis=0),'temp_res',is_rgb=False)
return (cropped_img, cropped_label,np.float32(warped_cover),warped_img, coordinate_transform,np.reshape(warp_trans1,[-1])[0:8],np.reshape(warp_trans2,[-1])[0:8],np.reshape(warp_trans3,[-1])[0:8],bev_labels)
def inception_preprocess(image):
image=np.float32(image)/255
image = image - 0.5
image = image*2
return image
def inverse_inception_preprocess(image):
image=np.float32(image)/2
image = image + 0.5
image = image*255
return image
def write_to_txt_file(path, strings_list):
file1 = open(path,"a")
for L in strings_list:
file1.write(L)
file1.write("\n")
file1.close()
def write_variables_to_txt_file(path, strings_list):
file1 = open(path,"a")
for L in strings_list:
file1.write(str(L))
file1.write("\n")
file1.close()
def read_from_txt_file(path):
with open(path) as t:
txt = t.readlines()
for k in range(len(txt)):
if '\n' in txt[k]:
txt[k] = txt[k][0:-1]
return txt
def run_training(continue_run):
# train_file ='C:\\winpython\\WPy-3670\\codes\\davis2017\\DAVIS\\ImageSets\\2017\\train.txt'
# data_images_path ='C:\\winpython\\WPy-3670\\codes\\davis2017\\DAVIS\\JPEGImages\\480p\\drone'
logging.error('EXPERIMENT : ' + str(exp_config.experiment_name))
logging.error('THIS IS : ' + str(log_dir))
val_tokens = token_splits.VAL_LOGS
logging.info('EXPERIMENT NAME: %s' % exp_config.experiment_name)
# Tell TensorFlow that the model will be built into the default Graph.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
# with tf.Graph().as_default():
with tf.Session(config = config) as sess:
# Generate placeholders for the images and labels.
training_time_placeholder = tf.placeholder(tf.bool, shape=[])
my_training_placeholder = tf.placeholder(tf.bool, shape=[])
num_frames = None
n_frames_per_seq = None
reference_frame_index_pl = tf.placeholder(tf.int32, shape=[])
# Build a Graph that computes predictions from the inference model.
my_model_options = common.ModelOptions({common.OUTPUT_TYPE:10},crop_size=exp_config.camera_image_patch_size,atrous_rates=[6, 12, 18])
image_tensor_shape = [n_frames_per_seq,exp_config.camera_image_patch_size[0],exp_config.camera_image_patch_size[1],3]
image_mask_tensor_shape = [n_frames_per_seq,int(exp_config.camera_image_patch_size[0]/4),int(exp_config.camera_image_patch_size[1]/4),total_label_slices]
# mask_tensor_shape = [n_seqs,exp_config.patch_size[1],exp_config.patch_size[0],exp_config.num_bev_classes + 1]
images_placeholder = tf.placeholder(tf.float32, shape=image_tensor_shape, name='images')
image_labels_placeholder = tf.placeholder(tf.float32, shape=image_mask_tensor_shape, name='image_labels')
separate_covers_placeholder = tf.placeholder(tf.float32, shape=[n_seqs,num_frames,exp_config.patch_size[1],exp_config.patch_size[0],1], name='separate_covers')
bev_transforms_placeholder = tf.placeholder(tf.float32, shape=[np.max([1,n_seqs-1]),8], name='bev_transforms')
ground_transforms_placeholder1 = tf.placeholder(tf.float32, shape=[n_seqs,num_frames,8], name='ground_transforms1')
ground_transforms_placeholder2 = tf.placeholder(tf.float32, shape=[n_seqs,num_frames,8], name='ground_transforms2')
ground_transforms_placeholder3 = tf.placeholder(tf.float32, shape=[n_seqs,num_frames,8], name='ground_transforms3')
coordinate_ground_transforms_placeholder = tf.placeholder(tf.float32, shape=[n_seqs,num_frames,3,3], name='coordinate_ground_transforms')
no_mask_tensor = tf.constant(-np.ones((1,int(exp_config.patch_size[1]/exp_config.feature_downsample),int(exp_config.patch_size[0]/exp_config.feature_downsample),int(exp_config.num_classes+1)),np.float32))
'''
Extract features from the CAMERA IMAGE
'''
image_total_backbone_out, image_total_relative_endpoints, image_total_end_points =mem_net.image_encoder(images_placeholder,no_mask_tensor,my_model_options,downsample_stages=4,use_deeplab=use_deeplab,is_training=training_time_placeholder, reuse=False)
# image_total_backbone_out = mem_net.my_image_decoder(image_total_relative_endpoints,image_total_backbone_out,reuse=False)
total_input_image = image_total_backbone_out
side_mask_logits,side_occ_est_logits, side_masks, side_occ_softmaxed = mem_net.compat_my_side_decoder(image_total_relative_endpoints,total_input_image,num_classes=1,reuse=False)
reference_image_endpoints=[]
for endi in range(len(image_total_relative_endpoints)):
reference_image_endpoints.append(tf.slice(image_total_relative_endpoints[endi],[reference_frame_index_pl,0,0,0],[1,-1,-1,-1]))
side_obj_logits, side_obj_softmaxed = mem_net.my_object_side_decoder(reference_image_endpoints,tf.slice(total_input_image,[reference_frame_index_pl,0,0,0],[1,-1,-1,-1]),exp_config,reuse=False)
# logging.error('SIDE OCC LOGITS ' + str(side_obj_))
# logging.error('SIDE OCC LABELS ' + str(tf.squeeze(tf.slice(image_labels_placeholder,[0,0,0,exp_config.num_classes+1],[-1,-1,-1,-1]),axis=-1)))
projected_estimates = tf.contrib.image.transform(
tf.concat([side_masks,side_occ_softmaxed],axis=-1),
tf.squeeze(tf.slice(ground_transforms_placeholder1,[0,0,0],[1,-1,-1]),axis=0),
interpolation='BILINEAR',
output_shape=(exp_config.project_patch_size[1],exp_config.project_patch_size[0]),
name='tensorflow_ground_transform'
)
cur_separate_covers = tf.squeeze(tf.slice(separate_covers_placeholder,[0,0,0,0,0],[1,-1,-1,-1,-1]),axis=0)
combined_projected_estimates = tf.reduce_max(projected_estimates*cur_separate_covers,axis=0,keepdims=True)
projected_obj_estimates = tf.contrib.image.transform(
side_obj_softmaxed,
tf.squeeze(tf.slice(ground_transforms_placeholder1,[0,reference_frame_index_pl,0],[1,1,-1]),axis=0),
interpolation='BILINEAR',
output_shape=(exp_config.project_patch_size[1],exp_config.project_patch_size[0]),
name='tensorflow_ground_transform'
)
projected_obj_estimates = projected_obj_estimates*tf.squeeze(tf.slice(separate_covers_placeholder,[0,reference_frame_index_pl,0,0,0],[1,1,-1,-1,-1]),axis=0)
combined_projected_estimates = tf.concat([combined_projected_estimates,projected_obj_estimates],axis=-1)
resized_combined_projected_estimates = tf.image.resize(
combined_projected_estimates, [int(exp_config.patch_size[1]/8),int(exp_config.patch_size[0]/8)] ,method='bilinear',name='projected_estimates_resize' )
bigger_resized_combined_projected_estimates = tf.image.resize(
combined_projected_estimates, [int(exp_config.patch_size[1]/4),int(exp_config.patch_size[0]/4)] ,method='bilinear',name='bigger_projected_estimates_resize' )
logging.error('BIGGER PROJ ' + str(bigger_resized_combined_projected_estimates))
'''
Scale the coordinates to the original image so that the transformation is compatible
'''
all_bev_total_backbone_out = tf.contrib.image.transform(
image_total_relative_endpoints[0],
tf.squeeze(tf.slice(ground_transforms_placeholder2,[0,0,0],[1,-1,-1]),axis=0),
interpolation='BILINEAR',
output_shape=(exp_config.project_patch_size[1],exp_config.project_patch_size[0]),
name='tensorflow_ground_transform_end1'
)
cur_separate_covers = tf.squeeze(tf.slice(separate_covers_placeholder,[0,0,0,0,0],[1,-1,-1,-1,-1]),axis=0)
# combined_back_out = tf.reduce_max(tf.slice(all_bev_total_backbone_out,[0,0,0,0],[-1,-1,-1,128])*cur_separate_covers,axis=0,keepdims=True)
# combined_back_out = tf.concat([tf.tile(combined_back_out,[num_frames,1,1,1]),tf.slice(all_bev_total_backbone_out,[0,0,0,128],[-1,-1,-1,-1])],axis=-1)
combined_back_out = tf.reduce_max(all_bev_total_backbone_out*cur_separate_covers,axis=0,keepdims=True)
combined_back_out = tf.concat([combined_back_out,tf.slice(all_bev_total_backbone_out,[reference_frame_index_pl,0,0,0],[1,-1,-1,-1])],axis=-1)
bev_total_backbone_out = tf.image.resize(
combined_back_out, [int(exp_config.patch_size[1]/8),int(exp_config.patch_size[0]/8)] ,method='bilinear',name='projected_estimates_resize' )
all_bev_end2 = tf.contrib.image.transform(
image_total_relative_endpoints[1],
tf.squeeze(tf.slice(ground_transforms_placeholder1,[0,0,0],[1,-1,-1]),axis=0),
interpolation='BILINEAR',
output_shape=(exp_config.project_patch_size[1],exp_config.project_patch_size[0]),
name='tensorflow_ground_transform_end2'
)
logging.error('ENDPOINT WARPED ' + str(all_bev_end2))
cur_separate_covers = tf.squeeze(tf.slice(separate_covers_placeholder,[0,0,0,0,0],[1,-1,-1,-1,-1]),axis=0)
# combined_end = tf.reduce_max(tf.slice(all_bev_end2,[0,0,0,0],[-1,-1,-1,128])*cur_separate_covers,axis=0,keepdims=True)
# combined_end = tf.concat([tf.tile(combined_end,[num_frames,1,1,1]),tf.slice(all_bev_end2,[0,0,0,128],[-1,-1,-1,-1])],axis=-1)
combined_end = tf.reduce_max(all_bev_end2*cur_separate_covers,axis=0,keepdims=True)
combined_end = tf.concat([combined_end,tf.slice(all_bev_end2,[reference_frame_index_pl,0,0,0],[1,-1,-1,-1])],axis=-1)
# combined_end = tf.reduce_max( all_bev_end2*cur_separate_covers,axis=0,keepdims=True)
combined_end = tf.image.resize(
combined_end, [int(exp_config.patch_size[1]/4),int(exp_config.patch_size[0]/4)] ,method='bilinear',name='projected_estimates_resize' )
bev_total_relative_endpoints = [tf.concat([combined_end,bigger_resized_combined_projected_estimates],axis=-1)]
total_input = tf.concat([ resized_combined_projected_estimates,bev_total_backbone_out],axis=-1)
static_logits, static_masks,object_logits, object_masks = mem_net.my_bev_object_decoder(bev_total_relative_endpoints,total_input,exp_config,reuse=False)
masks = tf.concat([static_masks,object_masks],axis=-1)
saver = tf.train.Saver(max_to_keep=2)
# saver_best_loss = tf.train.Saver(max_to_keep=2)
init = tf.global_variables_initializer()
sess.run(init)
load_path = exp_config.load_path
saver.restore(sess,load_path)
# to_load_saver.restore(sess,load_path)
sess.run(mem_net.interp_surgery(tf.global_variables()))
val_res=do_eval(sess,val_tokens,
my_training_placeholder,
images_placeholder,
image_labels_placeholder,
bev_transforms_placeholder,
separate_covers_placeholder,
ground_transforms_placeholder1,
ground_transforms_placeholder2,
ground_transforms_placeholder3,
coordinate_ground_transforms_placeholder,
projected_obj_estimates,
masks,
side_masks,side_occ_softmaxed,side_obj_softmaxed,
projected_estimates,
reference_frame_index_pl,
combined_projected_estimates,
0,training_time_placeholder,val_folder_path=validation_res_path)
overall_mean = np.mean(np.array(val_res))
logging.error('Overall mean : ' + str(overall_mean))
def eval_iterator(ref_id,frame_interval,n_frames_per_seq,my_scene,cur_index,single_frame=False,apply_interval=False):
num_frames = n_frames_per_seq
n_seqs = 1
camera = "ring_front_center"
scene = train_loader.get(my_scene)
pool = ThreadPool(n_seqs*num_frames)
frame_ids=[]
first_frame = cur_index
frame_ids.append(first_frame)
# logging.error('LEN IMAGES ' + str(len(all_images_list)))
if single_frame:
for frame_number in range(1,n_frames_per_seq):
frame_ids.append(first_frame )
# logging.error('ENTERED SINGLE FRAME ' + str(frame_ids))
else:
# if apply_interval:
for frame_number in range(1,n_frames_per_seq):
frame_ids.append(first_frame + frame_interval*frame_number)
# else:
# for frame_number in range(1,n_frames_per_seq):
# frame_ids.append(first_frame + frame_number)
pairs = []
# logging.error('SCENE ' + my_scene)
# logging.error('FRAMES ' + str(frame_ids))
timestamp = str(np.copy(train_loader._image_timestamp_list_sync[my_scene][camera][frame_ids[ref_id]]))
# logging.error('TIME S ' + timestamp)
output_path = os.path.join(exp_config.argo_labels_path,
my_scene, camera,
str(camera)+'_'+str(timestamp)+'.png')
encoded_labels = np.flipud(np.array(Image.open(output_path),np.int32))
# logging.error('ENCODED LABELS SHAPE ' + str(encoded_labels.shape))
num_class = exp_config.num_bev_classes
bev_labels = decode_binary_labels(encoded_labels, num_class+ 1)
# mask = ~labels[...,-1]
# labels = labels[...,:-1]
calib_cur = train_loader.get_calibration(camera, my_scene)
calib_ref =calib_cur
to_image_transform = utils.argoverse_project_to_image(exp_config, np.zeros_like(bev_labels),calib_ref)
# image_objects = cv2.warpPerspective(np.squeeze(bev_labels[...,-1]),to_image_transform,exp_config.original_image_size,flags=cv2.INTER_LINEAR)
# image_objects= cv2.resize(image_objects,(int(exp_config.camera_image_patch_size[1]/4),int(exp_config.camera_image_patch_size[0]/4)), interpolation = cv2.INTER_LINEAR)
# vis_mask = np.float32(image_objects > 0.5)
vis_mask = np.copy(np.uint8(np.flipud(utils.get_visible_mask(calib_cur.K, calib_cur.camera_config.img_width,
exp_config.map_extents, exp_config.resolution))))
# logging.error('VIS MASK SHAPE ' + str(vis_mask.shape))
# logging.error('BEV LABELS SHAPE ' + str(bev_labels.shape))
vis_mask = np.expand_dims(vis_mask,axis=-1)
bev_labels = np.concatenate([bev_labels[...,:exp_config.num_bev_classes],vis_mask,vis_mask*(1-np.expand_dims(bev_labels[...,exp_config.num_bev_classes],axis=-1))],axis=-1)
pose_ref = np.copy(scene.get_pose(frame_ids[ref_id]).transform_matrix)
for k in range(n_seqs):
for m in range(num_frames):
timestamp = str(np.copy(train_loader._image_timestamp_list_sync[my_scene][camera][frame_ids[m]]))
# logging.error('TIME ' + str(m) + ' ' + timestamp)
pose_cur = np.copy(scene.get_pose(frame_ids[m]).transform_matrix)
output_path = os.path.join(exp_config.argo_labels_path,
my_scene, camera,
str(camera)+'_'+str(timestamp)+'.png')
image_string = os.path.join(exp_config.argo_track_path,my_scene,'ring_front_center','ring_front_center_'+str(timestamp)+'.jpg')
pairs.append((image_string,output_path,vis_mask,calib_cur,pose_ref,pose_cur,m==ref_id))
results = pool.map(single_process,pairs)
pool.close()
pool.join()
# logging.error('Results shape : ' + str(len(results)))
seq_images_ar=np.zeros((n_frames_per_seq,exp_config.camera_image_patch_size[0],exp_config.camera_image_patch_size[1],3),np.float32)
seq_labels_ar=np.ones((n_frames_per_seq,int(exp_config.camera_image_patch_size[0]/4),int(exp_config.camera_image_patch_size[1]/4),exp_config.num_bev_classes+2),np.float32)
bev_transforms_ar1=np.ones((n_seqs,num_frames,8),np.float32)
bev_transforms_ar2=np.ones((n_seqs,num_frames,8),np.float32)
bev_transforms_ar3=np.ones((n_seqs,num_frames,8),np.float32)
coordinate_transforms_ar=np.ones((n_seqs,num_frames,3,3),np.float32)
bev_images_ar=np.zeros((n_seqs,num_frames,exp_config.patch_size[1],exp_config.patch_size[0],3),np.float32)
bev_covers_ar=np.ones((n_seqs,num_frames,exp_config.patch_size[1],exp_config.patch_size[0],1),np.float32)
all_bev_labels_ar = np.zeros((num_frames,196,200,exp_config.num_bev_classes+2),np.float32)
# logging.error('PROJECT TO GROUND ENDED')
for k in range(len(results)):
temp_res = results[k]
if k < num_frames:
seq_images_ar[k,...] = np.copy(temp_res[0])
seq_labels_ar[k,...] = np.copy(temp_res[1])
elif k >= (n_seqs*num_frames - (n_seqs - 1)):
seq_images_ar[k - (num_frames-1),...] = np.copy(temp_res[0])
seq_labels_ar[k - (num_frames-1),...] =
|
np.copy(temp_res[1])
|
numpy.copy
|
### Script for hyperparameter training of LSTM
#Setup
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import keras_tuner as kt
import json
from sklearn.metrics import accuracy_score, roc_auc_score, mean_squared_error, mean_absolute_error, auc, confusion_matrix, roc_curve, precision_score, recall_score, f1_score
#Read in the data
data = np.load('/store/DAMTP/dfs28/PICU_data/np_arrays.npz')
array3d = data['d3']
array2d = data['d2']
outcomes = data['outcomes']
def test_trainsplit(array, split):
"""
Function to split up 3d slices into test, train, validate
split is an np.ndarray
"""
#Flexibly choose dimension to split along
shape = array.shape
z_dim = np.max(shape)
#Ensure splits add up to 1, get indicies based on splits
split = split/sum(split)
indices = np.floor(z_dim*split)
#Get cumulative indices, ensure start from beginning and end at end
cumulative_indices = np.cumsum(indices).astype(int)
cumulative_indices = np.insert(cumulative_indices, 0, 0)
cumulative_indices[-1] = z_dim
split_array = list()
for i in range(len(split)):
start = cumulative_indices[i]
finish = cumulative_indices[i + 1]
temp_array = array[start:finish, ]
split_array.append(temp_array)
return split_array
#Split up testing and outcomes
array3d = np.transpose(array3d, (0, 2, 1))
split_array3d = test_trainsplit(array3d, np.array([70, 15, 15]))
split_array2d = test_trainsplit(array2d, np.array([70, 15, 15]))
split_outcomes = test_trainsplit(outcomes, np.array([70, 15, 15]))
split_array3d2 = test_trainsplit(array3d, np.array([85, 15]))
split_array2d2 = test_trainsplit(array2d, np.array([85, 15]))
split_outcomes2 = test_trainsplit(outcomes, np.array([85, 15]))
train_array3d = split_array3d[0]
train_array2d = split_array2d[0]
train_outcomes = split_outcomes[0]
test_array3d = split_array3d[1]
test_array2d = split_array2d[1]
test_outcomes = split_outcomes[1]
validate_array3d = split_array3d[2]
validate_array2d = split_array2d[2]
validate_outcomes = split_outcomes[2]
all_train_array3d = split_array3d2[0]
all_train_array2d = split_array2d2[0]
all_train_outcomes = split_outcomes2[0]
all_test_array3d = split_array3d2[1]
all_test_array2d = split_array2d2[1]
all_test_outcomes = split_outcomes2[1]
def make_LSTM(model_type):
#Other things to think about - momentum, dropout, sparsity, more layers
#Set regularisers
tf.keras.regularizers.l2(0.001)
tf.keras.regularizers.l1(0.001)
kernal_regulariser = 'l1'
bias_regulariser = 'l1'
activity_regulariser = 'l2'
init = initializer = tf.keras.initializers.GlorotUniform()
#Set the input shape
input_shape3d = train_array3d.shape
input_timeseries = keras.Input(shape = input_shape3d[1:])
input_flat = keras.Input(shape = train_array2d.shape[1:])
x = tf.keras.layers.Bidirectional(layers.LSTM(100, return_sequences= True))(input_timeseries)
x = tf.keras.layers.Bidirectional(layers.LSTM(150, return_sequences= True))(x)
x = tf.keras.layers.Bidirectional(layers.LSTM(150, return_sequences= False))(x)
##Now make the other head with input
y = layers.Dense(45, activation = 'relu', kernel_initializer = init,
kernel_regularizer= kernal_regulariser,
bias_regularizer= bias_regulariser,
activity_regularizer= activity_regulariser)(input_flat)
##Now make the other head with input
y = layers.Dense(20, activation = 'relu', kernel_initializer = init,
kernel_regularizer= kernal_regulariser,
bias_regularizer= bias_regulariser)(input_flat)
#Now make the other head
flattened = layers.Flatten()(x)
concatted = layers.Concatenate()([y, flattened])
#With dropount
concatted = layers.Dropout(0.5)(concatted)
dense2 = layers.Dense(40, activation = 'relu', use_bias = True, kernel_initializer = init,
kernel_regularizer= kernal_regulariser,
bias_regularizer= bias_regulariser)(concatted)
#Make this a multihead output
death_head = layers.Dense(20, activation = 'relu', use_bias = True, kernel_initializer = init,
kernel_regularizer= kernal_regulariser,
bias_regularizer= bias_regulariser)(dense2)
death_head = layers.Dense(3, activation = 'softmax', use_bias = True, kernel_initializer = init,
kernel_regularizer= kernal_regulariser,
bias_regularizer= bias_regulariser)(death_head)
time_head = layers.Dense(20, activation = 'relu', use_bias = True, kernel_initializer = init,
kernel_regularizer= kernal_regulariser,
bias_regularizer= bias_regulariser)(dense2)
time_head = layers.Dense(3, activation = 'softmax', use_bias = True, kernel_initializer = init,
kernel_regularizer= kernal_regulariser,
bias_regularizer= bias_regulariser)(time_head)
PEWS_head = layers.Dense(20, activation = 'relu', use_bias = True, kernel_initializer = init,
kernel_regularizer= kernal_regulariser,
bias_regularizer= bias_regulariser)(dense2)
PEWS_head = layers.Dense(3, activation = 'softmax', use_bias = True, kernel_initializer = init,
kernel_regularizer= kernal_regulariser,
bias_regularizer= bias_regulariser)(PEWS_head)
#This is the full model with death and LOS as the outcome
full_model = keras.Model([input_timeseries, input_flat], [death_head, time_head, PEWS_head])
death_model = keras.Model([input_timeseries, input_flat], death_head)
discharge_model = keras.Model([input_timeseries, input_flat], [time_head])
PEWS_model = keras.Model([input_timeseries, input_flat], [PEWS_head])
#Allow this to return one of 3 different model structures
if model_type == 'full':
return full_model
elif model_type == 'death':
return death_model
elif model_type == 'discharge':
return discharge_model
elif model_type == 'PEWS':
return PEWS_model
#Set up some storage for the different metrics
AUC_death_full = list()
AUC_LOS_full = list()
AUC_PEWS_full = list()
acc_death_full = list()
acc_LOS_full = list()
acc_PEWS_full= list()
MSE_death_full = list()
MSE_LOS_full = list()
MSE_PEWS_full = list()
MAE_death_full = list()
MAE_LOS_full = list()
MAE_PEWS_full = list()
recall_death_full = list()
recall_LOS_full = list()
recall_PEWS_full = list()
precision_death_full = list()
precision_LOS_full = list()
precision_PEWS_full = list()
F1_death_full = list()
F1_LOS_full = list()
F1_PEWS_full = list()
#Run this 10 times
for i in range(10):
full_model = make_LSTM('full')
full_model.compile(optimizer = 'adam', loss='categorical_crossentropy', metrics=['accuracy',
'mse', tf.keras.metrics.MeanAbsoluteError(),
tf.keras.metrics.AUC()])
#Dont forget to add batch size back in 160
#Now fit the model
full_model_history = full_model.fit([all_train_array3d, all_train_array2d], [all_train_outcomes[:, 2:5], all_train_outcomes[:, 5:8], all_train_outcomes[:, 8:11]],
epochs = 20,
batch_size = 160,
shuffle = True,
validation_data = ([all_test_array3d, all_test_array2d], [all_test_outcomes[:, 2:5], all_test_outcomes[:, 5:8], all_test_outcomes[:, 8:11]]),
callbacks = [tf.keras.callbacks.EarlyStopping(patience=1)])
y_pred1, y_pred2, y_pred3 = full_model.predict([all_test_array3d, all_test_array2d])
recall_death_full.append(recall_score(np.argmax(y_pred1, axis = 1), np.argmax(all_test_outcomes[:, 2:5], axis = 1), average = 'macro'))
recall_LOS_full.append(recall_score(np.argmax(y_pred2, axis = 1), np.argmax(all_test_outcomes[:, 5:8], axis = 1), average = 'macro'))
recall_PEWS_full.append(recall_score(np.argmax(y_pred3, axis = 1), np.argmax(all_test_outcomes[:, 8:11], axis = 1), average = 'macro'))
precision_death_full.append(precision_score(np.argmax(y_pred1, axis = 1), np.argmax(all_test_outcomes[:, 2:5], axis = 1), average = 'macro'))
precision_LOS_full.append(precision_score(np.argmax(y_pred2, axis = 1), np.argmax(all_test_outcomes[:, 5:8], axis = 1), average = 'macro'))
precision_PEWS_full.append(precision_score(np.argmax(y_pred3, axis = 1), np.argmax(all_test_outcomes[:, 8:11], axis = 1), average = 'macro'))
F1_death_full.append(f1_score(np.argmax(y_pred1, axis = 1), np.argmax(all_test_outcomes[:, 2:5], axis = 1), average = 'macro'))
F1_LOS_full.append(f1_score(np.argmax(y_pred2, axis = 1), np.argmax(all_test_outcomes[:, 5:8], axis = 1), average = 'macro'))
F1_PEWS_full.append(f1_score(np.argmax(y_pred3, axis = 1), np.argmax(all_test_outcomes[:, 8:11], axis = 1), average = 'macro'))
keys = [i for i in full_model_history.history.keys()]
AUC_death_full.append(full_model_history.history[keys[23]][-1])
AUC_LOS_full.append(full_model_history.history[keys[27]][-1])
AUC_PEWS_full.append(full_model_history.history[keys[31]][-1])
acc_death_full.append(full_model_history.history[keys[20]][-1])
acc_LOS_full.append(full_model_history.history[keys[24]][-1])
acc_PEWS_full.append(full_model_history.history[keys[28]][-1])
MSE_death_full.append(full_model_history.history[keys[21]][-1])
MSE_LOS_full.append(full_model_history.history[keys[25]][-1])
MSE_PEWS_full.append(full_model_history.history[keys[29]][-1])
MAE_death_full.append(full_model_history.history[keys[22]][-1])
MAE_LOS_full.append(full_model_history.history[keys[26]][-1])
MAE_PEWS_full.append(full_model_history.history[keys[30]][-1])
conf_mat1 = confusion_matrix(np.argmax(y_pred1, axis = 1), np.argmax(all_test_outcomes[:, 2:5], axis = 1))
conf_mat2 = confusion_matrix(np.argmax(y_pred2, axis = 1), np.argmax(all_test_outcomes[:, 5:8], axis = 1))
conf_mat3 = confusion_matrix(np.argmax(y_pred3, axis = 1), np.argmax(all_test_outcomes[:, 8:11], axis = 1))
#Storage for individual models
AUC_death_individual = list()
AUC_LOS_individual = list()
AUC_PEWS_individual = list()
acc_death_individual = list()
acc_LOS_individual = list()
acc_PEWS_individual= list()
MSE_death_individual = list()
MSE_LOS_individual = list()
MSE_PEWS_individual = list()
MAE_death_individual = list()
MAE_LOS_individual = list()
MAE_PEWS_individual = list()
recall_death_individual = list()
recall_LOS_individual = list()
recall_PEWS_individual = list()
precision_death_individual = list()
precision_LOS_individual = list()
precision_PEWS_individual = list()
F1_death_individual = list()
F1_LOS_individual = list()
F1_PEWS_individual = list()
#Mortality prediction
for i in range(10):
full_model = make_LSTM('death')
full_model.compile(optimizer = 'adam', loss='categorical_crossentropy', metrics = ['mse', tf.keras.metrics.MeanAbsoluteError(),
tf.keras.metrics.AUC()])
#Dont forget to add batch size back in 160
#Now fit the model
full_model_history = full_model.fit([all_train_array3d, all_train_array2d], [all_train_outcomes[:, 2:5]],
epochs = 20,
batch_size = 160,
shuffle = True,
validation_data = ([all_test_array3d, all_test_array2d], [all_test_outcomes[:, 2:5]]),
callbacks = [tf.keras.callbacks.EarlyStopping(patience=1)])
y_pred1 = full_model.predict([all_test_array3d, all_test_array2d])
acc_death_individual.append(accuracy_score(np.argmax(y_pred1, axis = 1), np.argmax(all_test_outcomes[:, 2:5], axis = 1)))
recall_death_individual.append(recall_score(np.argmax(y_pred1, axis = 1), np.argmax(all_test_outcomes[:, 2:5], axis = 1), average = 'macro'))
precision_death_individual.append(precision_score(np.argmax(y_pred1, axis = 1), np.argmax(all_test_outcomes[:, 2:5], axis = 1), average = 'macro'))
F1_death_individual.append(f1_score(np.argmax(y_pred1, axis = 1), np.argmax(all_test_outcomes[:, 2:5], axis = 1), average = 'macro'))
keys = [i for i in full_model_history.history.keys()]
AUC_death_individual.append(full_model_history.history[keys[7]][-1])
MSE_death_individual.append(full_model_history.history[keys[5]][-1])
MAE_death_individual.append(full_model_history.history[keys[6]][-1])
#LOS prediction
for i in range(10):
full_model = make_LSTM('discharge')
full_model.compile(optimizer = 'adam', loss='categorical_crossentropy', metrics = ['mse', tf.keras.metrics.MeanAbsoluteError(),
tf.keras.metrics.AUC()])
#Dont forget to add batch size back in 160
#Now fit the model
full_model_history = full_model.fit([all_train_array3d, all_train_array2d], [all_train_outcomes[:, 5:8]],
epochs = 20,
batch_size = 160,
shuffle = True,
validation_data = ([all_test_array3d, all_test_array2d], [all_test_outcomes[:, 5:8]]),
callbacks = [tf.keras.callbacks.EarlyStopping(patience=1)])
y_pred1 = full_model.predict([all_test_array3d, all_test_array2d])
acc_LOS_individual.append(accuracy_score(np.argmax(y_pred1, axis = 1), np.argmax(all_test_outcomes[:, 5:8], axis = 1)))
recall_LOS_individual.append(recall_score(np.argmax(y_pred1, axis = 1), np.argmax(all_test_outcomes[:, 5:8], axis = 1), average = 'macro'))
precision_LOS_individual.append(precision_score(np.argmax(y_pred1, axis = 1), np.argmax(all_test_outcomes[:, 5:8], axis = 1), average = 'macro'))
F1_LOS_individual.append(f1_score(np.argmax(y_pred1, axis = 1), np.argmax(all_test_outcomes[:, 5:8], axis = 1), average = 'macro'))
keys = [i for i in full_model_history.history.keys()]
AUC_LOS_individual.append(full_model_history.history[keys[7]][-1])
MSE_LOS_individual.append(full_model_history.history[keys[5]][-1])
MAE_LOS_individual.append(full_model_history.history[keys[6]][-1])
#Deterioration prediction
for i in range(10):
full_model = make_LSTM('PEWS')
full_model.compile(optimizer = 'adam', loss='categorical_crossentropy', metrics = ['mse', tf.keras.metrics.MeanAbsoluteError(),
tf.keras.metrics.AUC()])
#Dont forget to add batch size back in 160
#Now fit the model
full_model_history = full_model.fit([all_train_array3d, all_train_array2d], [all_train_outcomes[:, 8:11]],
epochs = 20,
batch_size = 160,
shuffle = True,
validation_data = ([all_test_array3d, all_test_array2d], [all_test_outcomes[:, 8:11]]),
callbacks = [tf.keras.callbacks.EarlyStopping(patience=1)])
y_pred1 = full_model.predict([all_test_array3d, all_test_array2d])
acc_PEWS_individual.append(accuracy_score(np.argmax(y_pred1, axis = 1), np.argmax(all_test_outcomes[:, 8:11], axis = 1)))
recall_PEWS_individual.append(recall_score(np.argmax(y_pred1, axis = 1), np.argmax(all_test_outcomes[:, 8:11], axis = 1), average = 'macro'))
precision_PEWS_individual.append(precision_score(np.argmax(y_pred1, axis = 1), np.argmax(all_test_outcomes[:,8:11], axis = 1), average = 'macro'))
F1_PEWS_individual.append(f1_score(np.argmax(y_pred1, axis = 1), np.argmax(all_test_outcomes[:, 8:11], axis = 1), average = 'macro'))
keys = [i for i in full_model_history.history.keys()]
AUC_PEWS_individual.append(full_model_history.history[keys[7]][-1])
MSE_PEWS_individual.append(full_model_history.history[keys[5]][-1])
MAE_PEWS_individual.append(full_model_history.history[keys[6]][-1])
results = {#'acc_death_individual_mean' : np.mean(acc_death_individual),
#'acc_death_individual_std' : np.std(acc_death_individual),
'acc_death_full_mean' : np.mean(acc_death_full),
'acc_death_full_std' : np.std(acc_death_full),
#'acc_LOS_individual_mean' : np.mean(acc_LOS_individual),
#'acc_LOS_individual_std' : np.std(acc_LOS_individual),
'acc_LOS_full_mean' : np.mean(acc_LOS_full),
'acc_LOS_full_std' : np.std(acc_LOS_full),
#'acc_PEWS_individual_mean' : np.mean(acc_PEWS_individual),
#'acc_PEWS_individual_std' : np.std(acc_PEWS_individual),
'acc_PEWS_full_mean' : np.mean(acc_PEWS_full),
'acc_PEWS_full_std' : np.std(acc_PEWS_full),
#'AUC_death_individual_mean' : np.mean(AUC_death_individual),
#'AUC_death_individual_std' : np.std(AUC_death_individual),
'AUC_death_full_mean' :
|
np.mean(AUC_death_full)
|
numpy.mean
|
"""Neural network position reconstruction"""
import numpy as np
from pax import plugin, utils
class PosRecNeuralNet(plugin.PosRecPlugin):
"""Reconstruct S2 x,y positions from top pmt hit pattern using a feed-foward neural net
See <NAME>' thesis for details:
http://www.physik.uzh.ch/groups/groupbaudis/darkmatter/theses/xenon/Kish_THESISelectronic.pdf
"""
def startup(self):
""" Initialize the neural net.
"""
if self.config['pmt_0_is_fake']:
self.input_channels = self.pmts[1:]
else:
self.input_channels = self.pmts
self.nn_output_unit = self.config['nn_output_unit']
# Possibly scale the input of the activation function by a supplied value (float)
activation_scale = self.config['activation_function_scale']
# Apply the activation function to the output layer (bool)
output_layer_function = self.config['output_layer_function']
# Load the file defining the structure (number of nodes per layer)
# as well as the weights and biases of the neural network
data = np.load(utils.data_file_name(self.config['neural_net_file']))
self.nn = NeuralNet(structure=data['structure'],
weights=data['weights'],
biases=data['biases'],
activation_scale=activation_scale,
output_layer_function=output_layer_function)
data.close()
def reconstruct_position(self, peak):
input_areas = peak.area_per_channel[self.input_channels]
# Run the neural net
# Input is fraction of top area (see Xerawdp, PositionReconstruction.cpp, line 246)
# Convert from neural net's units to pax units
return self.nn.run(input_areas/np.sum(input_areas)) * self.nn_output_unit
class NeuralNet():
"""Feed-forward neural net with an arbitrary number of hidden layers
- Input layer without activation function or bias
- Hidden layers with tanh(sum + bias) activation function
- Output layer with sum + bias or tanh(sum + bias) activation function
All neurons in a layer are connected to all neurons in the previous layer.
"""
def __init__(self, structure, weights, biases, activation_scale, output_layer_function):
self.n_inputs = structure[0]
self.n_output = structure[-1]
self.n_layers = len(structure)
self.structure = np.array(structure)
self.weights = np.array(weights)
self.biases = np.array(biases)
self.activation_scale = activation_scale
self.output_layer_function = output_layer_function
# Calculate the number of connections in the network, per-layer
self.n_connections_per_layer = [structure[i] * structure[i+1] for i in range(self.n_layers - 1)]
# Create lists of the first weights and bias indices in each layer
self.weight_indices = np.insert(np.cumsum(self.n_connections_per_layer), 0, 0)
self.bias_indices = np.insert(np.cumsum(self.structure[1:]), 0, 0)
# Sanity checks
if not len(structure) > 2:
raise ValueError("There are not enough layers in the network, need at least 2+1")
if not len(biases) == np.sum(structure[1:]):
raise ValueError("Each hidden and output neuron must have a bias!")
if not len(weights) ==
|
np.sum(self.n_connections_per_layer)
|
numpy.sum
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This Module contains a collection of Mutation operators to be used in the ES-Framework
A Mutation operator mutates an Individual's genotype inline, thus returning nothing.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
__author__ = '<NAME> <<EMAIL>>'
import numpy as np
import random
from numpy import add, bitwise_and, dot, exp, floor, mod, shape, zeros
from numpy.linalg import norm
from random import gauss
from math import sqrt
'''-----------------------------------------------------------------------------
# Mutation Helper Functions #
-----------------------------------------------------------------------------'''
def _keepInBounds(x, l_bound, u_bound):
"""
This function transforms x to t w.r.t. the low and high
boundaries lb and ub. It implements the function T^{r}_{[a,b]} as
described in Rui Li's PhD thesis "Mixed-Integer Evolution Strategies
for Parameter Optimization and Their Applications to Medical Image
Analysis" as alorithm 6.
:param x: Column vector to be kept in bounds
:param l_bound: Lower bound column vector
:param u_bound: Upper bound column vector
:returns: An in-bounds kept version of the column vector ``x``
"""
y = (x - l_bound) / (u_bound - l_bound)
floor_y = floor(y) # Local storage to prevent double calls
I = mod(floor_y, 2) == 0
yprime = zeros(shape(y))
yprime[I] = np.abs(y[I] - floor_y[I])
yprime[~I] = 1.0 - np.abs(y[~I] - floor_y[~I])
x = l_bound + (u_bound - l_bound) * yprime
return x
def adaptStepSize(individual):
"""
Given the current individual, randomly determine a new step size offset
that can be no greater than maxStepSize - baseStepSize
:param individual: The :class:`~modea.Individual.FloatIndividual` object whose step size should be adapted
"""
# Empirically determined, see paper
gamma = 0.22
offset = individual.stepSizeOffset
offset = 1 + ((1 - offset) / offset)
offset = 1 / (offset * exp(gamma * gauss(0, 1)))
individual.stepSizeOffset = min(offset, (individual.maxStepSize - individual.baseStepSize))
def _scaleWithThreshold(mutation_vector, threshold):
"""
Checks if norm(mutation_vector) is at least the given threshold.
If not, the vector is mirrored to the other side of the threshold,
i.e. scaled to be length: threshold + (threshold - norm(mutation_vector))
:param mutation_vector: Mutation vector to be scaled
:param threshold: Minimum length threshold. Vector is scaled if length does not reach threshold
:returns: The threshold-compliant mutation vector
"""
length = norm(mutation_vector)
if length < threshold:
new_length = threshold + (threshold - length)
mutation_vector *= (new_length / length)
return mutation_vector
def _adaptSigma(sigma, p_s, c=0.817):
"""
Adapt parameter sigma based on the 1/5th success rule
:param sigma: Sigma value to be adapted
:param p_s: Recent success rate, determines whether sigma is increased or decreased
:param c: Factor c that is used to increase or decrease sigma
:returns: New value sigma
"""
if p_s < 1/5:
sigma *= c
elif p_s > 1/5:
sigma /= c
return sigma
def _getXi():
"""
Randomly returns 5/7 or 7/5 with equal probability
:return: float Xi
"""
if bool(random.getrandbits(1)):
return 5/7
else:
return 7/5
'''-----------------------------------------------------------------------------
# ES Mutations #
-----------------------------------------------------------------------------'''
def addRandomOffset(individual, param, sampler):
"""
Mutation 1: x = x + sigma*N(0,I)
:param individual: :class:`~modea.Individual.FloatIndividual` to be mutated
:param param: :class:`~modea.Parameters.Parameters` object to store settings
:param sampler: :mod:`~modea.Sampling` module from which the random values should be drawn
"""
individual.genotype += param.sigma * sampler.next()
def CMAMutation(individual, param, sampler, threshold_convergence=False):
"""
CMA mutation: x = x + (sigma * B*D*N(0,I))
:param individual: :class:`~modea.Individual.FloatIndividual` to be mutated
:param param: :class:`~modea.Parameters.Parameters` object to store settings
:param sampler: :mod:`~modea.Sampling` module from which the random values should be drawn
:param threshold_convergence: Boolean: Should threshold convergence be applied. Default: False
"""
individual.last_z = sampler.next()
if threshold_convergence:
individual.last_z = _scaleWithThreshold(individual.last_z, param.threshold)
individual.mutation_vector = dot(param.B, (param.D * individual.last_z)) # y_k in cmatutorial.pdf)
mutation_vector = individual.mutation_vector * param.sigma
individual.genotype = _keepInBounds(add(individual.genotype, mutation_vector), param.l_bound, param.u_bound)
'''-----------------------------------------------------------------------------
# GA Mutations #
-----------------------------------------------------------------------------'''
def mutateBitstring(individual):
"""
Simple 1/n bit-flip mutation
:param individual: :mod:`~modea.Individual` with a bit-string as genotype to undergo p=1/n mutation
"""
bitstring = individual.genotype
n = len(bitstring)
p = 1/n
for i in range(n):
if np.random.random() < p:
bitstring[i] = 1-bitstring[i]
def mutateIntList(individual, param, num_options_per_module):
"""
Self-adaptive random integer mutation to mutate the structure of an ES
:param individual: :class:`~modea.Individual.MixedIntegerIndividual` whose integer-part will be mutated
:param param: :class:`~modea.Parameters.Parameters` object
:param num_options_per_module: List :data:`~modea.num_options` with the number of available modules per module
position that are available to choose from
"""
p = individual.baseStepSize + individual.stepSizeOffset
num_ints = individual.num_ints
int_list = individual.genotype[:num_ints-1] # Get the relevant slice
for i, val in enumerate(num_options_per_module):
if np.random.random() < p:
# -1 as random_integers is [1, val], -1 to simulate leaving out the current value
new_int = np.random.random_integers(val-1)-1
if int_list[i] == new_int:
new_int = val - 1 # If we randomly selected the same value, pick the value we left out
int_list[i] = new_int
if np.random.random() < p:
new_lambda = np.random.random_integers(param.l_bound[num_ints-1], param.u_bound[num_ints-1])
individual.genotype[num_ints-1] = new_lambda
def mutateFloatList(individual, param, options):
"""
Self-adaptive, uniformly random floating point mutation on the tunable parameters of an ES
:param individual: :class:`~modea.Individual.MixedIntegerIndividual` whose integer-part will be mutated
:param param: :class:`~modea.Parameters.Parameters` object
:param options: List of tuples :data:`~modea.options` with the number of tunable parameters per module
"""
# Setup of values
p = individual.baseStepSize + individual.stepSizeOffset
float_part = individual.genotype[individual.num_ints:]
int_part = individual.genotype[:individual.num_ints-1]
l_bound = param.l_bound[individual.num_ints:].flatten()
u_bound = param.u_bound[individual.num_ints:].flatten()
search_space = u_bound - l_bound
# Create the mask: which float values will actually be mutated?
cond_mask = [True,True,True,True,True,True,True] # TODO FIXME: these are default CMA parameters, make this dynamic!
for i, val in enumerate(options):
cond_mask.extend([bool(int_part[i] * 1)] * val[2])
mutate_mask =np.random.random_sample(float_part.shape) < p
combined_mask = bitwise_and(cond_mask, mutate_mask)
# Scale the random values to the search space, then start at the lower bound
float_part[combined_mask] =np.random.random_sample(float_part.shape)[combined_mask] * search_space[combined_mask]
float_part[combined_mask] += l_bound[combined_mask]
def mutateMixedInteger(individual, param, options, num_options_per_module):
"""
Self-adaptive mixed-integer mutation of the structure of an ES
:param individual: :class:`~modea.Individual.MixedIntegerIndividual` whose integer-part will be mutated
:param param: :class:`~modea.Parameters.Parameters` object
:param options: List of tuples :data:`~modea.options` with the number of tunable parameters per module
:param num_options_per_module: List :data:`~modea.num_options` with the number of available modules per module position
that are available to choose from
"""
adaptStepSize(individual)
mutateIntList(individual, param, num_options_per_module)
mutateFloatList(individual, param, options)
'''-----------------------------------------------------------------------------
# MIES Mutations #
-----------------------------------------------------------------------------'''
def MIES_MutateDiscrete(individual, begin, end, u, num_options, options):
"""
Mutate the discrete part of a Mixed-Integer representation
:param individual: The individual to mutate
:param begin: Start index of the discrete part of the individual's representation
:param end: End index of the discrete part of the individual's representation
:param u: A pre-determined random value from a Gaussian distribution
:param num_options: List :data:`~modea.num_options` with the number of available modules per module position
that are available to choose from
:param options: List of tuples :data:`~modea.options` with the number of tunable parameters per module
:return: A boolean mask array to be used for further conditional mutations based on which modules
are active
"""
conditional_mask = [True,True,True,True,True,True,True]
for x in range(begin, end):
if individual.genotype[x] is not None:
# set stepsize
tau = 1 / sqrt(2 * individual.num_discrete)
tau_prime = 1 / sqrt(2 * sqrt(individual.num_discrete))
individual.stepSizeOffsetMIES[x] = 1 / (
1 + ((1 - individual.stepSizeOffsetMIES[x]) / individual.stepSizeOffsetMIES[x]) * exp(
(-tau) * u - tau_prime * gauss(0.5, 1)))
# Keep stepsize within the bounds
baseMIESstep = 1 / (3 * num_options[x]) # p'_i = T[ 1 / (3n_d) , 0.5]
individual.stepSizeOffsetMIES[x] = _keepInBounds(individual.stepSizeOffsetMIES[x], baseMIESstep, 0.5)
threshold = np.random.random_sample()
# change discrete
if threshold < individual.stepSizeMIES(x):
temparray = []
for i in range(num_options[x]):
temparray.append(i)
temparray.remove(individual.genotype[x])
individual.genotype[x] = random.choice(temparray)
for i in range(options[x][2]):
conditional_mask.append(individual.genotype[x])
return conditional_mask
def MIES_MutateIntegers(individual, begin, end, u, param):
"""
Mutate the integer part of a Mixed-Integer representation
:param individual: The individual to mutate
:param begin: Start index of the integer part of the individual's representation
:param end: End index of the integer part of the individual's representation
:param u: A pre-determined random value from a Gaussian distribution
:param param: :class:`~modea.Parameters.Parameters` object
"""
for x in range(begin, end):
if individual.genotype[x] is not None:
# Adapt stepsize
tau = 1 / sqrt(2 * individual.num_ints)
tau_prime = 1 / sqrt(2 * sqrt(individual.num_ints))
individual.stepSizeOffsetMIES[x] = max(1,
individual.stepSizeOffsetMIES[x] * exp(tau * u + tau_prime * gauss(0.5, 1)))
# Mudate integer
psi = 1 - (individual.stepSizeMIES(x) / individual.num_ints) / (
1 + sqrt(1 + pow(individual.stepSizeMIES(x) / individual.num_ints, 2)))
u1, u2 = np.random.random_sample(2)
G1 = int(floor(
|
np.log(1 - u1)
|
numpy.log
|
# imports
import numpy as np
import pandas as pd
from scipy.interpolate import griddata, Akima1DInterpolator
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
from sklearn.utils.fixes import parse_version
from utils import fit, modify
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from matplotlib.pyplot import cm
from matplotlib.ticker import (MultipleLocator, AutoMinorLocator)
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import collections, colors, transforms
# formatting
plt.rcParams['legend.title_fontsize'] = 'large'
plt.rcParams['legend.fontsize'] = 'medium'
fontP = FontProperties()
fontP.set_size('medium')
plt.style.use(['science', 'ieee', 'std-colors'])
# plt.style.use(['science', 'scatter'])
fig, ax = plt.subplots()
size_x_inches, size_y_inches = fig.get_size_inches()
plt.close(fig)
def plot_scatter(dficts, xparameter='y', yparameter='z', min_cm=0.5, z0=0, take_abs=False,
figsize=(6, 4), scattersize=2):
"""
Plot all data (xparameter, yparameter) as scatter points with different colors.
:param dficts:
:param xparameter:
:param yparameter:
:param min_cm:
:param z0:
:return:
"""
fig, ax = plt.subplots(figsize=figsize)
#cscatter = iter(cm.Spectral(np.linspace(0.95, 0.2, len(dficts.keys()))))
for name, df in dficts.items():
# filter dataframe
if min_cm:
df = df[df['cm'] > min_cm]
# sort by x-parameter and get x- and y-arrays for plotting
if xparameter is None or xparameter == 'index':
x = df.index
else:
df = df.sort_values(by=xparameter)
x = df[xparameter]
y = df[yparameter]
if z0:
y = y - z0
# take absolute value
if take_abs:
y = np.abs(y)
# plot
#cs = next(cscatter)
ax.scatter(x, y, s=scattersize)
# ax.set_xlabel(xparameter, fontsize=18)
# ax.set_ylabel(yparameter, fontsize=18)
# ax.grid(alpha=0.125)
# ax.legend(dficts.keys(), prop=fontP, title=r'$dz$ (mm)', loc='upper right', fancybox=True, shadow=False)
return fig, ax
def plot_mean(dficts, xparameter='y', yparameter='z', min_cm=0.5, z0=0, take_abs=False, fit_function=None):
"""
Plot all data (xparameter, yparameter) as scatter points with different colors.
:param dficts:
:param xparameter:
:param yparameter:
:param min_cm:
:param z0:
:return:
"""
cscatter = iter(cm.Spectral(np.linspace(0.95, 0.2, len(dficts.keys()))))
cerror = iter(cm.Spectral(np.linspace(0.95, 0.2, len(dficts.keys()))))
fig, ax = plt.subplots(figsize=(7.25, 4.25))
means = []
for name, df in dficts.items():
# filter dataframe
df = df[df['cm'] > min_cm]
y = df[yparameter] - z0
# take absolute value
if take_abs:
y = np.abs(y)
yerr = np.std(y)
y = np.mean(y)
means.append(y)
# plot
cs = next(cscatter)
ax.errorbar(name, y, yerr=yerr * 2, fmt='o', color=cs, ecolor=next(cerror), elinewidth=3, capsize=4, alpha=0.75)
ax.scatter(name, y, color=cs)
ax.set_xlabel(xparameter, fontsize=18)
ax.set_ylabel(yparameter, fontsize=18)
ax.grid(alpha=0.125)
ax.legend(dficts.keys(), prop=fontP, title=r'$dz$ (mm)', loc='upper left', fancybox=True, shadow=False)
# fit the function
if fit_function is not None:
names = list(dficts.keys())
popt, pcov, fit_func = fit.fit(names, means, fit_function=fit_function)
# plot fitted function
xfit = np.linspace(0, np.max(names), 100)
ax.plot(xfit, fit_function(xfit, *popt), color='black', linewidth=2, linestyle='--', alpha=0.5)
return fig, ax
def plot_errorbars(dfbicts, xparameter='index', yparameter='z', min_cm=0.5, z0=0):
"""
Plot all data (xparameter, yparameter) as scatter points with different colors.
:param dficts:
:param xparameter:
:param yparameter:
:param min_cm:
:param z0:
:return:
"""
fig, ax = plt.subplots(figsize=(7.25, 4.25))
cscatter = iter(cm.Spectral(np.linspace(0.95, 0.2, len(dfbicts.keys()))))
cerror = iter(cm.Spectral(np.linspace(0.95, 0.2, len(dfbicts.keys()))))
for name, df in dfbicts.items():
# filter dataframe
df = df[df['cm'] > min_cm]
# sort by x-parameter and get x- and y-arrays for plotting
if xparameter is None or xparameter == 'index':
x = df.index
else:
df = df.sort_values(by=xparameter)
x = df[xparameter]
y = df[yparameter] - z0
# plot
cs = next(cscatter)
ax.errorbar(x, y, yerr=df.z_std * 2, fmt='o', color=cs, ecolor=next(cerror), elinewidth=1, capsize=2, alpha=0.75)
ax.scatter(x, y, color=cs)
ax.set_xlabel(xparameter, fontsize=18)
ax.set_ylabel(yparameter, fontsize=18)
ax.grid(alpha=0.125)
ax.legend(dfbicts.keys(), prop=fontP, title=r'$dz$ (mm)', loc='upper left', fancybox=True, shadow=False)
return fig, ax
def plot_fit_and_scatter(fit_function, dficts, xparameter='index', yparameter='z', min_cm=0.5, z0=0, auto_format=False):
"""
Plot fitted curve and data (xparameter, yparameter) as scatter points with different colors.
:param dficts:
:param xparameter:
:param yparameter:
:param min_cm:
:param z0:
:return:
"""
fig, ax = plt.subplots(figsize=(7.25, 4.25))
cscatter = iter(cm.Spectral(np.linspace(0.95, 0.2, len(dficts.keys()))))
for name, df in dficts.items():
# drop NaN's
df = df.dropna(axis=0, subset=[yparameter])
# filter dataframe
df = df[df['cm'] > min_cm]
# sort by x-parameter and get x- and y-arrays for plotting
if xparameter is None or xparameter == 'index':
x = df.index
else:
df = df.sort_values(by=xparameter)
x = df[xparameter]
y = df[yparameter] - z0
# plot scatter points
cs = next(cscatter)
ax.scatter(x, y, color=cs)
# fit the function
popt, pcov, fit_func = fit.fit(x, y, fit_function=fit_function)
# plot fitted function
xfit = np.linspace(0, x.max(), 100)
ax.plot(xfit, fit_function(xfit, popt[0], popt[1], popt[2]), color=cs, linewidth=3, alpha=0.9)
ax.set_xlabel(xparameter, fontsize=18)
ax.set_ylabel(yparameter, fontsize=18)
ax.grid(alpha=0.125)
if auto_format:
ax.legend(dficts.keys(), prop=fontP, title=r'$dz$ (mm)', loc='upper left', fancybox=True, shadow=False)
return fig, ax
def plot_dfbicts_local(dfbicts, parameters='rmse_z', h=1, colors=None, linestyles=None, show_legend=False, scale=None,
scatter_on=True, scatter_size=10,
label_dict=None,
ylabel=None, xlabel=None, semilogx=False, nrows=None, ncols=None):
"""
Notes:
1. Plots the dataframe index on x-axis.
2. If only one parameter is passed (len(parameters) == 1), then no ax2 is returned.
:param dfbicts:
:param parameters:
:param h:
:param colors:
:param linestyles:
:param show_legend:
:param scale:
:param scatter_on:
:param scatter_size:
:param ylabel:
:param xlabel:
:return:
"""
# format figure
if isinstance(colors, list):
colors = colors
cscatter = None
cscatterr = None
elif colors == 'Blues':
cscatter = iter(cm.Blues(np.linspace(0.1, 0.9, len(dfbicts.keys()))))
cscatterr = iter(cm.Blues(np.linspace(0.1, 0.9, len(dfbicts.keys()))))
elif colors == 'inferno':
cscatter = iter(cm.inferno(np.linspace(0.1, 0.9, len(dfbicts.keys()))))
cscatterr = iter(cm.inferno(np.linspace(0.1, 0.9, len(dfbicts.keys()))))
else:
# get colors from cycler
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
if len(dfbicts) > len(colors):
colors_repeats = colors + colors
colors = colors_repeats[:len(dfbicts)]
cscatter = None
cscatterr = None
if isinstance(linestyles, list):
lstyle = iter(linestyles)
else:
lstyle = iter('-' for i in list(dfbicts.keys()))
if not scale:
if nrows:
fig, [ax, ax2] = plt.subplots(nrows=2, sharex=True)
elif ncols:
fig, [ax, ax2] = plt.subplots(ncols=2)
else:
fig, ax = plt.subplots()
else:
if isinstance(scale, (int, float)):
scalex, scaley = scale, scale
else:
scalex, scaley = scale[0], scale[1]
fig, ax = plt.subplots()
size_x_inches, size_y_inches = fig.get_size_inches()
size_x_pixels, size_y_pixels = fig.get_size_inches() * fig.dpi
plt.close(fig)
if nrows:
fig, [ax, ax2] = plt.subplots(nrows=2, sharex=True, figsize=(size_x_inches * scalex, size_y_inches * scaley))
elif ncols:
fig, [ax, ax2] = plt.subplots(ncols=2, figsize=(size_x_inches * scalex, size_y_inches * scaley))
else:
fig, ax = plt.subplots(figsize=(size_x_inches*scalex, size_y_inches*scaley))
# organize data
if (isinstance(parameters, str)) or (isinstance(parameters, list) and len(parameters) == 1):
parameter = parameters
parameterr = None
parameter3 = None
parameter4 = None
elif isinstance(parameters, list) and len(parameters) == 2:
parameter = parameters[0]
parameterr = parameters[1]
parameter3 = None
parameter4 = None
elif isinstance(parameters, list) and len(parameters) == 3:
parameter = parameters[0]
parameterr = parameters[1]
parameter3 = parameters[2]
parameter4 = None
elif isinstance(parameters, list) and len(parameters) == 4:
parameter = parameters[0]
parameterr = parameters[1]
parameter3 = parameters[2]
parameter4 = parameters[3]
if parameter == 'rmse_z':
for item, clr in zip(dfbicts.items(), colors):
if cscatter is not None:
cs = next(cscatter)
ls = next(lstyle)
ax.plot(item[1].index, item[1][parameter] / h)
if scatter_on:
ax.scatter(item[1].index, item[1][parameter] / h)
else:
if label_dict:
lbl = label_dict[item[0]]['label']
else:
lbl = None
ls = next(lstyle)
if scatter_on:
ax.scatter(item[1].index, item[1][parameter] / h, color=clr, s=scatter_size)
ax.plot(item[1].index, item[1][parameter] / h, color=clr, linestyle=ls, label=lbl)
else:
ax.plot(item[1].index, item[1][parameter] / h, color=clr, label=lbl, linestyle=ls)
else:
for item, clr in zip(dfbicts.items(), colors):
if cscatter is not None:
ax.plot(item[1].index, item[1][parameter])
if scatter_on:
ax.scatter(item[1].index, item[1][parameter])
else:
if label_dict:
lbl = label_dict[item[0]]['label']
else:
lbl = None
ls = next(lstyle)
if semilogx:
ax.semilogx(item[1].index, item[1][parameter] / h)
else:
if scatter_on:
ax.scatter(item[1].index, item[1][parameter] / h, s=scatter_size, color=clr)
ax.plot(item[1].index, item[1][parameter] / h, color=clr, linestyle=ls, label=lbl)
else:
ax.plot(item[1].index, item[1][parameter] / h, color=clr, linestyle=ls, label=lbl)
if parameterr is not None:
if not nrows:
ax2 = ax.twinx()
for item, clr in zip(dfbicts.items(), colors):
if nrows:
ax2.plot(item[1].index, item[1][parameterr], color=clr)
else:
ax2.plot(item[1].index, item[1][parameterr], color=clr, linestyle='--')
if parameter3 is not None:
ax2.plot(item[1].index, item[1][parameter3], color=clr, linestyle=':')
if parameter4 is not None:
ax2.plot(item[1].index, item[1][parameter4], color=clr, linestyle='-.')
if ylabel:
ax.set_ylabel(ylabel)
elif h != 1 and parameter == 'rmse_z':
ax.set_ylabel(r'$\sigma_{z}\left(z\right) / h$')
elif parameter == 'rmse_z':
ax.set_ylabel(r'$\sigma_{z}\left(z\right)$')
else:
ax.set_ylabel(parameter)
if xlabel:
if nrows:
ax2.set_xlabel(xlabel)
else:
ax.set_xlabel(xlabel)
else:
if nrows:
ax2.set_xlabel('z ($\mu m$)')
else:
ax.set_xlabel('z ($\mu m$)')
ax.grid(alpha=0.25)
if nrows:
ax2.grid(alpha=0.25)
if show_legend:
ax.legend(dfbicts.keys(), title=r'$\sigma$')
if parameterr is not None:
return fig, ax, ax2
else:
return fig, ax
def plot_dfbicts_global(dfbicts, parameters='rmse_z', xlabel='parameter', h=1, print_values=False,
scale=None, fig=None, ax=None, ax2=None, ax2_ylim=None, color=None, scatter_size=10,
smooth=False, ylabel=None):
if fig is None and ax is None:
if not scale:
fig, ax = plt.subplots()
else:
if isinstance(scale, (int, float)):
scalex, scaley = scale, scale
else:
scalex, scaley = scale[0], scale[1]
fig, ax = plt.subplots()
size_x_inches, size_y_inches = fig.get_size_inches()
plt.close(fig)
fig, ax = plt.subplots(figsize=(size_x_inches * scalex, size_y_inches * scaley))
if ax2 is None and isinstance(parameters, list) and len(parameters) > 1:
ax2 = ax.twinx()
# organize data
if isinstance(parameters, str) or len(parameters) == 1:
parameter = parameters
parameterr = None
parameterrr = None
names = dfbicts.keys()
means = np.array([m[parameter].mean() for m in dfbicts.values()])
sort_by_name = sorted(list(zip(names, means)), key=lambda x: x[0])
names = [x[0] for x in sort_by_name]
means = np.array([x[1] for x in sort_by_name])
elif isinstance(parameters, list) and len(parameters) == 2:
parameter = parameters[0]
parameterr = parameters[1]
parameterrr = None
names = dfbicts.keys()
means = np.array([m[parameter].mean() for m in dfbicts.values()])
means_prr = np.array([m[parameterr].mean() for m in dfbicts.values()])
sort_by_name = sorted(list(zip(names, means, means_prr)), key=lambda x: x[0])
names = [x[0] for x in sort_by_name]
means = np.array([x[1] for x in sort_by_name])
means_prr = np.array([x[2] for x in sort_by_name])
elif isinstance(parameters, list) and len(parameters) == 3:
parameter = parameters[0]
parameterr = parameters[1]
parameterrr = parameters[2]
names = dfbicts.keys()
means = np.array([m[parameter].mean() for m in dfbicts.values()])
means_prr = np.array([m[parameterr].mean() for m in dfbicts.values()])
means_prrr = np.array([m[parameterrr].mean() for m in dfbicts.values()])
sort_by_name = sorted(list(zip(names, means, means_prr, means_prrr)), key=lambda x: x[0])
names = [x[0] for x in sort_by_name]
means = np.array([x[1] for x in sort_by_name])
means_prr = np.array([x[2] for x in sort_by_name])
means_prrr = np.array([x[3] for x in sort_by_name])
else:
raise ValueError("parameters must be a string or a list of strings")
# smooth data
if smooth:
names = np.array(names)
names_interp = np.linspace(np.min(names), np.max(names), 500)
means_interp = Akima1DInterpolator(names, means)(names_interp)
means = means_interp
if parameterr:
means_prr_interp = Akima1DInterpolator(names, means_prr)(names_interp)
means_prr = means_prr_interp
if parameterrr:
means_prrr_interp = Akima1DInterpolator(names, means_prrr)(names_interp)
means_prrr = means_prrr_interp
names = names_interp
# plot figure
if parameter == 'rmse_z' and h != 1:
ax.plot(names, means / h, color=color)
if scatter_size:
ax.scatter(names, means / h, s=scatter_size, color=color)
else:
ax.plot(names, means, color=color)
if scatter_size:
ax.scatter(names, means, s=scatter_size, color=color)
if parameter == 'rmse_z':
ax.set_ylabel(r'$\overline{\sigma_{z}} / h$')
elif ylabel:
ax.set_ylabel(ylabel)
else:
ax.set_ylabel(parameter)
if parameterr is not None and parameterrr is None:
ax2.plot(names, means_prr, linestyle='--', color=color)
ax2.set_ylim(ax2_ylim)
elif parameterrr is not None:
ax2.plot(names, means_prr, color=color, linestyle='--')
ax2.plot(names, means_prrr, color=color, linestyle=':')
ax2.set_ylim(ax2_ylim)
ax.set_xlabel(xlabel)
ax.grid(alpha=0.25)
# print results
if print_values:
print(names)
print('{}: {}'.format(parameter, means / h))
if parameterr:
print('{}: {}'.format(parameterr, means_prr))
return fig, ax, ax2
def plot_dfbicts_list_global(dfbicts_list, parameters='rmse_z', xlabel='parameter', h=1, print_values=False,
scale=None, colors=None, ax2_ylim=None, scatter_size=10, smooth=False, ylabel=None):
# format figure
if not colors:
# get colors from cycler
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
if not scale:
fig, ax = plt.subplots()
else:
if isinstance(scale, (int, float)):
scalex, scaley = scale, scale
else:
scalex, scaley = scale[0], scale[1]
fig, ax = plt.subplots()
size_x_inches, size_y_inches = fig.get_size_inches()
plt.close(fig)
fig, ax = plt.subplots(figsize=(size_x_inches * scalex, size_y_inches * scaley))
if isinstance(parameters, list) and len(parameters) > 1:
ax2 = ax.twinx()
else:
ax2 = None
for dfbicts, color in zip(dfbicts_list, colors):
fig, ax, ax2 = plot_dfbicts_global(dfbicts, parameters, xlabel, h, print_values,
scale=scale, fig=fig, ax=ax, ax2=ax2, ax2_ylim=ax2_ylim,
color=color, scatter_size=scatter_size, smooth=smooth, ylabel=ylabel)
return fig, ax, ax2
def plot_scatter_z_color(dficts, xparameter='x', yparameter='y', zparameter='z', min_cm=0.5, z0=0, take_abs=False):
"""
Plot all data (xparameter, yparameter, zparameter) as scatter points with z-parameter as colors.
"""
for name, df in dficts.items():
ax = plt.subplot()
# filter dataframe
df = df[df['cm'] > min_cm]
# get x and y values
x = df[xparameter]
y = df[yparameter]
# adjust for z-offset
z = df[zparameter] - z0
# take absolute value
if take_abs:
z = np.abs(z)
# plot
data = ax.scatter(x, y, c=z)
ax.set_xlabel(xparameter, fontsize=18)
ax.set_ylabel(yparameter, fontsize=18)
ax.set_title(name, fontsize=18)
ax.grid(alpha=0.125)
# color bar
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.5)
plt.colorbar(data, cax=cax)
plt.show()
plt.close('all')
# --------------------------------- DATAFRAMES ---------------------------------------------------------------------
def plot_scatter_3d(df, fig=None, ax=None, elev=5, azim=-40, color=None, alpha=0.75):
"""
:param df: dataframe with 'x', 'y', and 'z' columns
:param fig: figure
:param ax: axes to plot on
:param elev: the elevation angle in the z-plane.
:param azim: the azimuth angle in the x-y plane.
:return:
"""
if not fig:
fig = plt.figure(figsize=(6, 6))
if not ax:
ax = fig.add_subplot(projection='3d')
if isinstance(df, list):
x, y, z = df
else:
x, y, z = df.x, df.y, df.z
if color is None:
color = z
ax.scatter(x, y, z, marker='o', c=color, alpha=alpha)
ax.view_init(elev, azim)
return fig, ax
def plot_scatter_3d_multi_angle(df, z_param='z'):
fig = plt.figure(figsize=(6.5, 5))
for i, v in zip(np.arange(1, 5), [45, 0, 315, 270]):
ax = fig.add_subplot(2, 2, i, projection='3d')
sc = ax.scatter(df.x, df.y, df[z_param], c=df[z_param])
ax.view_init(5, v)
ax.patch.set_alpha(0.0)
if i == 2:
plt.colorbar(sc, shrink=0.5)
ax.get_xaxis().set_ticks([])
ax.set_ylabel(r'$y \: (pixels)$')
ax.set_zlabel(r'$z \: (\mu m)$')
elif i == 4:
ax.get_yaxis().set_ticks([])
ax.set_xlabel(r'$x \: (pixels)$')
ax.set_zlabel(r'$z \: (\mu m)$')
else:
ax.set_xlabel(r'$x \: (pixels)$')
ax.set_ylabel(r'$y \: (pixels)$')
ax.get_zaxis().set_ticklabels([])
plt.suptitle('title', y=0.875)
plt.subplots_adjust(hspace=-0.1, wspace=0.15)
return fig, ax
def plot_heatmap(df, fig=None, ax=None):
# drop NaNs
dfc = df.dropna(axis=0, subset=['z'])
# move x, y, z series to numpy arrays
x = dfc.x.to_numpy()
y = dfc.y.to_numpy()
z = dfc.z.to_numpy()
# get spatial coordinate extents
xspace = np.max(x) - np.min(x)
yspace = np.max(y) - np.min(y)
zspace = np.max(z) - np.min(z)
# contour surface levels: 1 level = 1 micron
lvls_surface = int(np.round(zspace + 1))
lvls_lines = int(lvls_surface / 5)
# -----------------------
# Interpolation on a grid
# -----------------------
# A contour plot of irregularly spaced data coordinates
# via interpolation on a grid.
ngridx = int(xspace)
ngridy = int(yspace)
# Create grid values first.
xi = np.linspace(np.min(x), np.max(x), ngridx)
yi = np.linspace(np.min(y), np.max(y), ngridy)
# Linearly interpolate the data (x, y) on a grid defined by (xi, yi).
zi = griddata((x, y), z, (xi[None, :], yi[:, None]), method='linear')
if fig is None and ax is None:
fig, ax = plt.subplots(figsize=(8, 8))
# plot level surfaces
cntr = ax.contourf(xi, yi, zi, levels=lvls_surface, cmap="RdBu_r")
# plot level lines
ax.contour(xi, yi, zi, levels=lvls_lines, linewidths=0.5, colors='gray')
# plot data points
ax.scatter(x, y, c=z, cmap="RdBu_r")
cbar = fig.colorbar(cntr, ax=ax)
cbar.ax.set_title(r'$\delta z$')
ax.set_xlabel('$x$', fontsize=18)
ax.set_ylabel(r'$y$', fontsize=18)
return fig, ax
# ------------------------------------- ARRAYS ---------------------------------------------------------------------
def scatter_xy_color_z(df, param_z):
fig, ax = plt.subplots()
sc = ax.scatter(df.x, df.y, c=df[param_z], s=3)
plt.colorbar(sc, shrink=0.75)
ax.set_xlabel('x')
ax.set_ylabel('y')
return fig
def scatter_z_by_xy(df, z_params):
if not isinstance(z_params, list):
z_params = [z_params]
fig, ax = plt.subplots(ncols=2, sharey=True, figsize=(size_x_inches*2, size_y_inches))
for z_param in z_params:
ax[0].scatter(df.x, df[z_param], s=3)
ax[1].scatter(df.y, df[z_param], s=3, label=z_param)
ax[0].set_xlabel('x')
ax[0].set_ylabel('z')
ax[1].set_xlabel('y')
ax[1].legend(loc='upper left', bbox_to_anchor=(1, 1))
plt.tight_layout()
return fig, ax
def plot_fitted_plane_and_points(df, dict_fit_plane):
param_z = dict_fit_plane['z_f']
rmse, r_squared = dict_fit_plane['rmse'], dict_fit_plane['r_squared']
tilt_x, tilt_y = dict_fit_plane['tilt_x_degrees'], dict_fit_plane['tilt_y_degrees']
px, py, pz = dict_fit_plane['px'], dict_fit_plane['py'], dict_fit_plane['pz']
normal = dict_fit_plane['normal']
d = dict_fit_plane['d']
fig = plt.figure(figsize=(6.5, 5))
for i, v in zip(np.arange(1, 5), [45, 0, 315, 270]):
ax = fig.add_subplot(2, 2, i, projection='3d')
sc = ax.scatter(df.x, df.y, df[param_z], c=df[param_z], s=1)
ax.plot_surface(px, py, pz, alpha=0.4, color='red')
ax.view_init(5, v)
ax.patch.set_alpha(0.0)
if i == 2:
plt.colorbar(sc, shrink=0.5)
ax.get_xaxis().set_ticks([])
ax.set_ylabel(r'$y \: (pixels)$')
ax.set_zlabel(r'$z \: (\mu m)$')
elif i == 4:
ax.get_yaxis().set_ticks([])
ax.set_xlabel(r'$x \: (pixels)$')
ax.set_zlabel(r'$z \: (\mu m)$')
else:
ax.set_xlabel(r'$x \: (pixels)$')
ax.set_ylabel(r'$y \: (pixels)$')
ax.get_zaxis().set_ticklabels([])
# title
plt.suptitle('RMSE: {}, '.format(np.round(rmse, 3)) +
r'$R^2$' + ': {}'.format(np.round(r_squared, 3)) + '\n' +
r'$(\theta_{x}, \theta_{y})=$' + ' ({}, {} deg.)'.format(np.round(tilt_x, 3), np.round(tilt_y, 3)))
# deprecated title
"""plt.suptitle(r"$0 = n_x x + n_y y + n_z z - d$" + "= {}x + {}y + {}z - {} \n"
"(x, y: pixels; z: microns)".format(np.round(normal[0], 5),
np.round(normal[1], 5),
np.round(normal[2], 5),
np.round(d, 5)),
y=0.875)"""
plt.subplots_adjust(hspace=-0.1, wspace=0.15)
return fig
def scatter_3d_and_surface(x, y, z, func, func_params, fit_params, cmap='RdBu', grid_resolution=30, view='multi'):
# setup data points for calculating surface model
model_x_data = np.linspace(min(x), max(x), grid_resolution)
model_y_data = np.linspace(min(y), max(y), grid_resolution)
# create coordinate arrays for vectorized evaluations
X, Y = np.meshgrid(model_x_data, model_y_data)
# calculate z-coordinate of array
if func_params == ['x', 'y']:
Z = func(np.array([X, Y]), *fit_params)
elif func_params == 'y':
Z = func(Y, *fit_params)
elif func_params == 'x':
Z = func(X, *fit_params)
else:
raise ValueError('function parameters not understood.')
# plot
if view == 'multi':
fig = plt.figure(figsize=(12, 10))
for i, v in zip(np.arange(1, 5), [315, 0, 225, 90]):
ax = fig.add_subplot(2, 2, i, projection='3d')
sc = ax.scatter(x, y, z, c=z, s=0.5, alpha=0.75)
ps = ax.plot_surface(X, Y, Z, cmap=cmap, alpha=0.25)
ax.view_init(5, v)
ax.patch.set_alpha(0.0)
if i == 2:
plt.colorbar(sc, shrink=0.5)
ax.get_xaxis().set_ticks([])
ax.set_ylabel(r'$y \: (pixels)$')
ax.set_zlabel(r'$z \: (\mu m)$')
elif i == 4:
ax.get_yaxis().set_ticks([])
ax.set_xlabel(r'$x \: (pixels)$')
ax.set_zlabel(r'$z \: (\mu m)$')
else:
ax.set_xlabel(r'$x \: (pixels)$')
ax.set_ylabel(r'$y \: (pixels)$')
ax.get_zaxis().set_ticklabels([])
else:
fig = plt.figure()
ax = Axes3D(fig)
ps = ax.plot_surface(X, Y, Z, cmap=cmap, alpha=0.5)
ax.scatter(x, y, z, s=1, color='black', alpha=0.5)
if view == 'y':
ax.view_init(5, 0)
elif view == 'x':
ax.view_init(5, 90)
plt.colorbar(ps, fraction=0.015, pad=0.08)
return fig, ax
def scatter_3d_and_spline(x, y, z, bispl, cmap='RdBu', grid_resolution=25, view='multi'):
# setup data points for calculating surface model
model_x_data = np.linspace(min(x), max(x), grid_resolution)
model_y_data = np.linspace(min(y), max(y), grid_resolution)
# create coordinate arrays for vectorized evaluations
X, Y = np.meshgrid(model_x_data, model_y_data)
Z = bispl.ev(X, Y)
# plot
if view == 'multi':
fig = plt.figure(figsize=(12, 10))
for i, v in zip(np.arange(1, 5), [315, 0, 225, 90]):
ax = fig.add_subplot(2, 2, i, projection='3d')
sc = ax.scatter(x, y, z, c=z, s=0.5, alpha=0.75)
ps = ax.plot_surface(X, Y, Z, cmap=cmap, alpha=0.25)
ax.view_init(5, v)
ax.patch.set_alpha(0.0)
if i == 2:
plt.colorbar(sc, shrink=0.5)
ax.get_xaxis().set_ticks([])
ax.set_ylabel(r'$y \: (pixels)$')
ax.set_zlabel(r'$z \: (\mu m)$')
elif i == 4:
ax.get_yaxis().set_ticks([])
ax.set_xlabel(r'$x \: (pixels)$')
ax.set_zlabel(r'$z \: (\mu m)$')
else:
ax.set_xlabel(r'$x \: (pixels)$')
ax.set_ylabel(r'$y \: (pixels)$')
ax.get_zaxis().set_ticklabels([])
else:
fig = plt.figure()
ax = Axes3D(fig)
ps = ax.plot_surface(X, Y, Z, cmap=cmap, alpha=0.5)
ax.scatter(x, y, z, s=1, color='black', alpha=0.5)
if view == 'y':
ax.view_init(5, 0)
elif view == 'x':
ax.view_init(5, 90)
plt.colorbar(ps, fraction=0.015, pad=0.08)
return fig, ax
def scatter_hist(x, y, fig, color=None, colormap='coolwarm', scatter_size=1, kde=True, distance_from_mean=10):
# Add a gridspec with two rows and two columns and a ratio of 2 to 7 between
# the size of the marginal axes and the main axes in both directions.
# Also adjust the subplot parameters for a square plot.
gs = fig.add_gridspec(2, 2, width_ratios=(7, 2), height_ratios=(2, 7),
left=0.1, right=0.9, bottom=0.1, top=0.9,
wspace=0.075, hspace=0.075)
ax = fig.add_subplot(gs[1, 0])
ax_histx = fig.add_subplot(gs[0, 0], sharex=ax)
ax_histy = fig.add_subplot(gs[1, 1], sharey=ax)
# no labels
ax_histx.tick_params(axis="x", labelbottom=False)
ax_histy.tick_params(axis="y", labelleft=False)
# the scatter plot:
if color is not None:
ax.scatter(x, y, c=color, cmap=colormap, s=scatter_size)
else:
ax.scatter(x, y, s=scatter_size)
# vertical and horizontal lines denote the mean value
ax.axvline(np.mean(x), ymin=0, ymax=0.5, color='black', linestyle='--', linewidth=0.25, alpha=0.25)
ax.axhline(np.mean(y), xmin=0, xmax=0.5, color='black', linestyle='--', linewidth=0.25, alpha=0.25)
# now determine nice limits by hand:
binwidth = 0.25
xymax = max(np.max(np.abs(x)), np.max(np.abs(y)))
lim = (int(xymax/binwidth) + 1) * binwidth
bins = np.arange(-lim, lim + binwidth, binwidth)
nx, binsx, patchesx = ax_histx.hist(x, bins=bins, zorder=2.5)
ny, binsy, patchesy = ax_histy.hist(y, bins=bins, orientation='horizontal', zorder=2.5)
# kernel density estimation
if kde:
x_plot = np.linspace(np.mean(x) - distance_from_mean, np.mean(x) + distance_from_mean, 1000)
y_plot = np.linspace(np.mean(y) - distance_from_mean, np.mean(y) + distance_from_mean, 1000)
x = x[:, np.newaxis]
x_plot = x_plot[:, np.newaxis]
kde_x = KernelDensity(kernel="gaussian", bandwidth=0.75).fit(x)
log_dens_x = kde_x.score_samples(x_plot)
scale_to_max = np.max(nx) / np.max(np.exp(log_dens_x))
#ax_histx.fill(x_plot[:, 0], np.exp(log_dens_x) * scale_to_max, fc='lightsteelblue', zorder=2)
ax_histx.fill_between(x_plot[:, 0], 0, np.exp(log_dens_x) * scale_to_max, fc='lightsteelblue', zorder=2)
y = y[:, np.newaxis]
y_plot = y_plot[:, np.newaxis]
kde_y = KernelDensity(kernel="gaussian", bandwidth=0.75).fit(y)
log_dens_y = kde_y.score_samples(y_plot)
scale_to_max = np.max(ny) / np.max(np.exp(log_dens_y))
ax_histy.fill_betweenx(y_plot[:, 0], 0, np.exp(log_dens_y) * scale_to_max, fc='lightsteelblue', zorder=2)
return fig, ax, ax_histx, ax_histy
def plot_violin(data, positions, density_directions, facecolors, edgecolor, clrs, qlrs,
axis_quartiles=0, widths=0.5, bw_method=None,
plot_median=True, plot_quartile=False, plot_whiskers=False,
median_marker='_', median_marker_size=25,
fig=None, ax2=None):
if not fig:
fig, ax2 = plt.subplots()
#ax2.set_title('Customized violin plot')
parts = ax2.violinplot(data, showmeans=False, showmedians=False, showextrema=False, half_violin=True, widths=widths,
positions=positions, density_direction=density_directions, points=100, bw_method=bw_method)
for ind, pc in enumerate(parts['bodies']):
pc.set_facecolor(facecolors[ind])
if edgecolor is None:
pc.set_edgecolor(facecolors[ind])
else:
pc.set_edgecolor(edgecolor)
pc.set_alpha(1)
quartile1, medians, quartile3 = np.percentile(data, [25, 50, 75], axis=axis_quartiles)
whiskers = np.array([
adjacent_values(sorted_array, q1, q3)
for sorted_array, q1, q3 in zip(data, quartile1, quartile3)])
whiskers_min, whiskers_max = whiskers[:, 0], whiskers[:, 1]
inds = positions
if plot_median:
ax2.scatter(inds, medians, c=clrs, marker=median_marker, s=median_marker_size, zorder=3)
if plot_quartile:
ax2.vlines(inds, quartile1, quartile3, colors=qlrs, linestyle='-', lw=2, alpha=1)
if plot_whiskers:
ax2.vlines(inds, whiskers_min, whiskers_max, color='k', linestyle='-', lw=1)
# set style for the axes
"""labels = [30, 45]
for ax in [ax2]:
set_axis_style(ax, labels)"""
"""if density_directions[0] > 0:
plt.show()
j = 1"""
return fig, ax2
def plot_arrays_on_one_axis(x, ys):
fig, ax = plt.subplots()
for y in ys:
ax.plot(x, y)
return fig, ax
def plot_arrays_on_two_subplots(x1, y1s, x2, y2s, y12s=None, y22s=None, rows_or_columns='rows', sharex=False,
sharey=False, smooth=False):
if rows_or_columns == 'rows':
if sharex:
fig, (ax1, ax2) = plt.subplots(nrows=2, sharex=True)
else:
fig, (ax1, ax2) = plt.subplots(nrows=2)
else:
if sharey:
fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True)
else:
fig, (ax1, ax2) = plt.subplots(ncols=2)
for y1 in y1s:
ax1.plot(x1, y1)
if y12s is not None:
ax12 = ax1.twinx()
for y12 in y12s:
ax12.plot(x1, y12, linestyle='--')
else:
ax12 = None
for y2 in y2s:
ax2.plot(x2, y2)
if y22s is not None:
ax22 = ax2.twinx()
for y22 in y22s:
ax22.plot(x2, y22, linestyle='--')
else:
ax22 = None
plt.tight_layout()
return fig, ax1, ax2, ax12, ax22
# ------------------------------------- PARTICLE DISTRIBUTIONS -------------------------------------------------------
def plot_dist_errorbars(dficts, xparameter='index', yparameter='z'):
"""
Error bar plot particle distribution of 'yparameter' as 'xparameter'.
"""
fig, ax = plt.subplots(figsize=(7.25, 4.25))
for name, df in dficts.items():
# plot
"""ax1.scatter(dfg_bid.id, dfg_bid.z_corr, s=5, label=_id)
_id_z_corr = dfg_bid.z_corr.mean()
ax2.errorbar(_id, dfg_bid.z_corr.mean(), yerr=dfg_bid.z_corr.std(), fmt='o', elinewidth=3, capsize=4,
alpha=0.75)
ax2.scatter(_id, dfg_bid.z_corr.mean(), s=5)"""
pass
return fig, ax
# --------------------------------- ONE-OFF PLOTTING FUNCTIONS -----------------------------------------------------
def plot_multi_optimal_cm_via_percent_change_diff(df, split_column='index', true_percent=False, smooth_plots=False):
x = df.cm_threshold.to_numpy()
y_sigma = df.rmse_z.to_numpy()
if true_percent:
y_percent = df.true_percent_meas.to_numpy()
else:
y_percent = df.percent_meas.to_numpy()
pc_sigma = (y_sigma[0] - y_sigma) / y_sigma[0]
pc_percent = (y_percent[0] - y_percent) / y_percent[0]
pc_diff = pc_sigma - pc_percent
fig, ax1, ax2, ax12, ax22 = plot_arrays_on_two_subplots(x1=x, y1s=[y_sigma], x2=x, y2s=[pc_diff],
y12s=[y_percent], y22s=None,
rows_or_columns='rows', sharex=True, smooth=smooth_plots)
best_cm = x[np.argmax(pc_diff)]
ax1.axvline(x=best_cm, ymin=0, ymax=1, color='red', linestyle=':', alpha=0.125)
ax2.axvline(x=best_cm, ymin=0, ymax=1, color='red', linestyle=':', alpha=0.125)
ax2.scatter(best_cm, np.max(pc_diff), s=5, color='red', alpha=0.5)
ax1.set_ylabel(r'$\sigma_{z} / h$')
ax12.set_ylabel(r'$\phi$')
ax1.grid(alpha=0.125)
ax2.set_xlabel(r'$c_m$')
ax2.set_ylabel(r'$\overline{\sigma_{z}}/\overline{\phi}$')
ax2.grid(alpha=0.125)
plt.tight_layout()
return fig, ax1, ax2, ax12, ax22
def plot_single_optimal_cm_via_percent_change_diff(df, true_percent=False, smooth_plots=False):
x = df.cm_threshold.to_numpy()
y_sigma = df.rmse_z.to_numpy()
if true_percent:
y_percent = df.true_percent_meas.to_numpy()
else:
y_percent = df.percent_meas.to_numpy()
pc_sigma = (y_sigma[0] - y_sigma) / y_sigma[0]
pc_percent = (y_percent[0] - y_percent) / y_percent[0]
pc_diff = pc_sigma - pc_percent
fig, ax1, ax2, ax12, ax22 = plot_arrays_on_two_subplots(x1=x, y1s=[y_sigma], x2=x, y2s=[pc_diff],
y12s=[y_percent], y22s=None,
rows_or_columns='rows', sharex=True, smooth=smooth_plots)
best_cm = x[np.argmax(pc_diff)]
ax1.axvline(x=best_cm, ymin=0, ymax=1, color='red', linestyle=':', alpha=0.125)
ax2.axvline(x=best_cm, ymin=0, ymax=1, color='red', linestyle=':', alpha=0.125)
ax2.scatter(best_cm, np.max(pc_diff), s=5, color='red', alpha=0.5)
ax1.set_ylabel(r'$\sigma_{z} / h$')
ax12.set_ylabel(r'$\phi$')
ax1.grid(alpha=0.125)
ax2.set_xlabel(r'$c_m$')
ax2.set_ylabel(r'$\overline{\sigma_{z}}/\overline{\phi}$')
ax2.grid(alpha=0.125)
plt.tight_layout()
return fig, ax1, ax2, ax12, ax22
def plot_normalized_sigma_by_percent(df, smooth_plots=False):
x = df.cm_threshold.to_numpy()
y_sigma = df.rmse_z.to_numpy()
y_true = df.true_percent_meas.to_numpy()
y_percent = df.percent_meas.to_numpy()
norm_true = (y_sigma / y_sigma[0]) / (y_true / y_true[0])
norm_percent = (y_sigma / y_sigma[0]) / (y_percent / y_percent[0])
ys = [norm_true, norm_percent]
fig, ax = plot_arrays_on_one_axis(x, ys)
ax.set_xlabel(r'$c_m$')
ax.set_ylabel(r'$\tilde{\sigma}_{z}/\tilde{\phi}$')
ax.grid(alpha=0.125)
ax.legend([r'$\phi$', r'$\phi_{ID}$'], loc='lower left')
plt.tight_layout()
return fig, ax
def plot_3d_scatter_and_plane(df, z_param, p_xyz, fit_plane_params, x_param='x', y_param='y'):
# get dataframe points
x = df[x_param].to_numpy()
y = df[y_param].to_numpy()
z = df[z_param].to_numpy()
# get plane points
px, py, pz = p_xyz[0], p_xyz[1], p_xyz[2]
d, normal = fit_plane_params[3], fit_plane_params[4]
fig = plt.figure(figsize=(6.5, 5))
for i, v in zip(np.arange(1, 5), [45, 0, 315, 270]):
ax = fig.add_subplot(2, 2, i, projection='3d')
sc = ax.scatter(x, y, z, c=z, s=1)
ax.plot_surface(px, py, pz, alpha=0.4, color='black')
ax.view_init(5, v)
ax.patch.set_alpha(0.0)
if i == 2:
plt.colorbar(sc, shrink=0.5)
ax.get_xaxis().set_ticks([])
ax.set_ylabel(r'$y \: (pixels)$')
ax.set_zlabel(r'$z \: (\mu m)$')
elif i == 4:
ax.get_yaxis().set_ticks([])
ax.set_xlabel(r'$x \: (pixels)$')
ax.set_zlabel(r'$z \: (\mu m)$')
else:
ax.set_xlabel(r'$x \: (pixels)$')
ax.set_ylabel(r'$y \: (pixels)$')
ax.get_zaxis().set_ticklabels([])
plt.suptitle(r"$0 = n_x x + n_y y + n_z z - d$" + "= {}x + {}y + {}z - {} \n"
"(x, y: pixels; z: microns)".format(np.round(normal[0], 3),
np.round(normal[1], 3),
np.round(normal[2], 3),
|
np.round(d, 3)
|
numpy.round
|
"""
Classes for hierarchical parameter inference.
"""
from itertools import compress
import bilby
import numpy as np
from lintegrate import logtrapz
from scipy.interpolate import splev, splrep
from scipy.stats import expon, gaussian_kde, truncnorm
from .utils import ellipticity_to_q22, q22_to_ellipticity
#: Allowed distributions and their required hyperparameters
DISTRIBUTION_REQUIREMENTS = {
"exponential": ["mu"],
"gaussian": ["mu", "sigma", "weight"],
"deltafunction": ["peak"],
"powerlaw": ["alpha", "minimum", "maximum"],
"histogram": ["weight"],
}
class BaseDistribution(object):
"""
The base class for the distribution, as defined by a set of
hyperparameters, that you want to fit.
Parameters
----------
name: str
The parameter for which this distribution is the prior.
disttype: str
The type of distribution, e.g., 'exponential', 'gaussian'.
hyperparameters: dict
A dictionary of hyperparameters for the distribution with the keys
giving the parameter names, and values giving their fixed value, or
a :class:`bilby.core.prior.Prior` for values that are to be inferred.
low: float
The lower bound of the distribution
high: float
The upper bound of the distribution
"""
def __init__(self, name, disttype, hyperparameters={}, low=-np.inf, high=np.inf):
self.name = name # the parameter name
self.disttype = disttype
self.hyperparameters = hyperparameters
self.low = low
self.high = high
if self.low >= self.high:
raise ValueError("Lower bound is higher than upper bound!")
@property
def disttype(self):
return self._disttype
@disttype.setter
def disttype(self, disttype):
if disttype.lower() not in DISTRIBUTION_REQUIREMENTS.keys():
raise ValueError('Distribution name "{}" is not known'.format(disttype))
else:
self._disttype = disttype.lower()
@property
def hyperparameters(self):
return self._hyperparameters
@hyperparameters.setter
def hyperparameters(self, hyperparameters):
if isinstance(hyperparameters, dict):
# check is contains the required parameter names
for key in hyperparameters.keys():
if key.lower() not in DISTRIBUTION_REQUIREMENTS[self.disttype]:
raise KeyError(
'Unknown parameter "{}" for distribution '
'"{}"'.format(key, self.disttype)
)
self._hyperparameters = {
key.lower(): value for key, value in hyperparameters.items()
}
else:
raise TypeError("hyperparameters must be a dictionary")
# set fixed values
self.fixed = self._hyperparameters
@property
def parameters(self):
return list(self.hyperparameters.keys())
@property
def values(self):
return list(self.hyperparameters.values())
@property
def unpacked_parameters(self):
params = []
for key, value in self.hyperparameters.items():
if isinstance(value, (list, np.ndarray)):
for i in range(len(value)):
params.append("{0}{1:d}".format(key, i))
else:
params.append(key)
return params
@property
def unpacked_values(self):
values = []
for key, value in self.hyperparameters.items():
if isinstance(value, (list, np.ndarray)):
for i in range(len(value)):
values.append(value[i])
else:
values.append(value)
return values
def __getitem__(self, item):
if item.lower() in self.parameters:
return self.hyperparameters[item.lower()]
elif item.lower() in self.unpacked_parameters:
return self.unpacked_values[self.unpacked_parameters.index(item.lower())]
elif item.lower() in DISTRIBUTION_REQUIREMENTS[self.disttype]:
return None
else:
raise KeyError('"{}" is not a parameter in this distribution'.format(item))
def __setitem__(self, item, value):
if item.lower() not in self.hyperparameters.keys():
if item.lower() in DISTRIBUTION_REQUIREMENTS[self.disttype]:
self._hyperparameters[item.lower()] = value
else:
raise KeyError(
'"{}" is not a parameter in this distribution'.format(item)
)
else:
self._hyperparameters[item.lower()] = value
@property
def fixed(self):
"""
Return a dictionary keyed to parameter names and with boolean values
indicating whether the parameter is fixed (True), or to be inferred
(False).
"""
return self._fixed
@fixed.setter
def fixed(self, hyperparameters):
self._fixed = dict()
for param, value in hyperparameters.items():
if isinstance(value, (bilby.core.prior.Prior, bilby.core.prior.PriorDict)):
self._fixed[param] = False
elif isinstance(value, (list, np.ndarray)):
self._fixed[param] = []
for i in range(len(value)):
if isinstance(
value[i], (bilby.core.prior.Prior, bilby.core.prior.PriorDict)
):
self._fixed[param].append(False)
elif isinstance(value[i], (int, float)):
self._fixed[param].append(True)
else:
raise TypeError("Hyperparameter type is not valid")
elif isinstance(value, (int, float)):
self._fixed[param] = True
else:
raise TypeError("Hyperparameter type is not valid")
@property
def unpacked_fixed(self):
"""
Return a flattened version of ``fixed``, with multivalued parameters
indexed.
"""
fixed = dict()
for param, value in zip(self.unpacked_parameters, self.unpacked_values):
if isinstance(value, (bilby.core.prior.Prior, bilby.core.prior.PriorDict)):
fixed[param] = False
elif isinstance(value, (int, float)):
fixed[param] = True
else:
raise TypeError("Hyperparameter type is not valid")
return fixed
@property
def unknown_parameters(self):
"""
A list of the parameters that are to be inferred.
"""
return list(
compress(
self.unpacked_parameters, ~np.array(list(self.unpacked_fixed.values()))
)
)
@property
def unknown_priors(self):
"""
A list of the :class:`~bilby.core.prior.Prior` for the parameters
that are to be inferred.
"""
return list(
compress(
self.unpacked_values, ~np.array(list(self.unpacked_fixed.values()))
)
)
def log_pdf(self, value, hyperparameters):
"""
The natural logarithm of the distribution's probability density
function at the given value.
Parameters
----------
value: float
The value at which to evaluate the probability.
hyperparameters: dict
A dictionary of the hyperparameter values that define the current
state of the distribution.
Returns
-------
lnpdf:
The natural logarithm of the probability.
"""
return np.nan
def pdf(self, value, hyperparameters):
"""
The distribution's probability density function at the given value.
Parameters
----------
value: float
The value at which to evaluate the probability.
hyperparameters: dict
A dictionary of the hyperparameter values that define the current
state of the distribution.
Returns
-------
pdf:
The probability density.
"""
return np.exp(self.log_pdf(value, hyperparameters))
def sample(self, hyperparameters, size=1):
"""
Draw a sample from the distribution as defined by the given
hyperparameters.
Parameters
----------
hyperparameters: dict
A dictionary of the hyperparameter values that define the current
state of the distribution.
size: int
The number of samples to draw from the distribution.
Returns
-------
sample:
A sample, or set of samples, from the distribution.
"""
return None
class BoundedGaussianDistribution(BaseDistribution):
"""
A distribution to define estimating the parameters of a (potentially
multi-modal) bounded Gaussian distribution.
An example of using this distribution for a two component Gaussian
distribution bounded at zero and with unknown mean, standard deviations and
weights would be:
>>> from bilby.core.prior import HalfNormal, LogUniform, DirichletPriorDict
>>> # set priors for means (half-Normal distributions with mode at 0)
>>> mus = [HalfNormal(10.0, name="mu0"), HalfNormal(10.0, name="mu1")]
>>> # set priors for standard deviations (log uniform distributions)
>>> sigmas = [LogUniform(name="sigma0", minimum=0.0001, maximum=100.0),
LogUniform(name="sigma1", minimum=0.0001, maximum=100.0)]
>>> # set a Dirichlet prior on the weights (i.e., they must add up to 1)
>>> weights = DirichletPriorDict(n_dim=2, label="weight")
>>> dist = BoundedGaussianDistribution("x", mus=mus, sigmas=sigmas, weights=weights)
Note that if using a Dirichlet prior on the weights all weights must be
included and none can be set as fixed.
Parameters
----------
name: str
See :class:`~cwinpy.hierarchical.BaseDistribution`
mus: array_like
A list of values of the means of each mode of the Gaussian.
sigmas: array_like
A list of values of the standard deviations of each mode of the
Gaussian.
weights: array_like
A list of values of the weights (relative probabilities) of
each mode. This will default to equal weights if not given. If wanting
to estimate multiple weights a DirichletPriorDict should be used as in
the example above.
low: float
The lower bound of the distribution (defaults to 0, i.e., only positive
values are allowed)
high: float
The upper bound of the distribution (default to infinity)
"""
def __init__(self, name, mus=[], sigmas=[], weights=None, low=0.0, high=np.inf):
gaussianparameters = {"mu": [], "sigma": [], "weight": []}
if isinstance(mus, (int, float, bilby.core.prior.Prior)):
mus = [mus]
elif not isinstance(mus, (list, np.ndarray)):
raise TypeError("Unknown type for 'mus'")
if isinstance(sigmas, (int, float, bilby.core.prior.Prior)):
sigmas = [sigmas]
elif not isinstance(sigmas, (list, np.ndarray)):
raise TypeError("Unknown type for 'sigmas'")
if weights is None:
weights = [1] * len(mus)
elif not isinstance(
weights, (list, np.ndarray, bilby.core.prior.DirichletPriorDict)
):
raise TypeError("Unknown type for 'weights'")
if isinstance(weights, bilby.core.prior.DirichletPriorDict):
# DirichletPriorDict has length one less than the number of weights
nweights = len(weights) + 1
for wv in weights.values():
gaussianparameters["weight"].append(wv)
else:
nweights = len(weights)
# set the number of modes
self.nmodes = len(mus)
if len(mus) != len(sigmas) or nweights != len(mus):
raise ValueError("'mus', 'sigmas' and 'weights' must be the same length")
if self.nmodes < 1:
raise ValueError("Gaussian must have at least one mode")
for i in range(self.nmodes):
gaussianparameters["mu"].append(mus[i])
gaussianparameters["sigma"].append(sigmas[i])
if isinstance(weights, (list, np.ndarray)):
gaussianparameters["weight"].append(weights[i])
# initialise
super().__init__(
name, "gaussian", hyperparameters=gaussianparameters, low=low, high=high
)
def log_pdf(self, value, hyperparameters={}):
"""
The natural logarithm of the pdf of a 1d (potentially multi-modal)
Gaussian probability distribution.
Parameters
----------
value: float
The value at which the probability is to be evaluated.
hyperparameters: dict
A dictionary containing the current values of the hyperparameters
that need to be inferred. If there are multiple modes and weights
are not fixed then the hyperparameters should include ``n-1``
weights values, where ``n`` is the number of modes.
Returns
-------
logpdf:
The natural logarithm of the probability density at the given
value.
"""
if np.any((value < self.low) | (value > self.high)):
return -np.inf
mus = self["mu"]
sigmas = self["sigma"]
if isinstance(self.fixed["weight"], (list, np.ndarray)):
if np.any(np.asarray(self.fixed["weight"]) == True): # noqa: E712
weights = self["weight"]
else:
# all should be False for Dirichlet priors
weights = np.zeros(self.nmodes)
else:
weights = np.zeros(self.nmodes)
# get current mus and sigmas from values
for i in range(self.nmodes):
if not self.fixed["mu"][i]:
param = "mu{}".format(i)
try:
mus[i] = hyperparameters[param]
except KeyError:
raise KeyError(
"Cannot calculate log probability when "
"value '{}' is not given".format(param)
)
if not self.fixed["sigma"][i]:
param = "sigma{}".format(i)
try:
sigmas[i] = hyperparameters[param]
except KeyError:
raise KeyError(
"Cannot calculate log probability when "
"value '{}' is not given".format(param)
)
if i < (self.nmodes - 1):
if not self.fixed["weight"][i]:
param = "weight{}".format(i)
try:
weights[i] = hyperparameters[param]
except KeyError:
raise KeyError(
"Cannot calculate log probability when "
"value '{}' is not given".format(param)
)
if weights[self.nmodes - 1] == 0.0:
# set final weight
weights[self.nmodes - 1] = 1.0 - np.sum(weights[:-1])
if np.any(np.asarray(sigmas) <= 0.0):
return -np.inf
if np.any(np.asarray(weights) <= 0.0):
return -np.inf
# normalise weights
lweights = np.log(np.asarray(weights) / np.sum(weights))
# get log pdf
if isinstance(value, (float, int)):
logpdf = -np.inf
elif isinstance(value, (list, np.ndarray)):
logpdf = np.full_like(value, -np.inf)
else:
raise TypeError("value must be a float or array-like")
for mu, sigma, lweight in zip(mus, sigmas, lweights):
lpdf = lweight + truncnorm.logpdf(
value,
(self.low - mu) / sigma,
(self.high - mu) / sigma,
loc=mu,
scale=sigma,
)
logpdf = np.logaddexp(logpdf, lpdf)
return logpdf
def sample(self, hyperparameters={}, size=1):
"""
Draw a sample from the bounded Gaussian distribution as defined by the
given hyperparameters.
Parameters
----------
hyperparameters: dict
A dictionary of the hyperparameter values that define the current
state of the distribution. If there are multiple modes and weights
are not fixed then the hyperparameters should include ``n-1``
weights values, where ``n`` is the number of modes.
size: int
The number of samples to draw. Default is 1.
Returns
-------
sample:
A sample, or set of samples, from the distribution.
"""
mus = self["mu"]
sigmas = self["sigma"]
if isinstance(self.fixed["weight"], (list, np.ndarray)):
if np.any(np.asarray(self.fixed["weight"]) == True): # noqa: E712
weights = self["weight"]
else:
# all should be False for Dirichlet priors
weights = np.zeros(self.nmodes)
else:
weights = np.zeros(self.nmodes)
# get current mus and sigmas from values
for i in range(self.nmodes):
if not self.fixed["mu"][i]:
param = "mu{}".format(i)
try:
mus[i] = hyperparameters[param]
except KeyError:
raise KeyError(
"Cannot calculate log probability when "
"value '{}' is not given".format(param)
)
if not self.fixed["sigma"][i]:
param = "sigma{}".format(i)
try:
sigmas[i] = hyperparameters[param]
except KeyError:
raise KeyError(
"Cannot calculate log probability when "
"value '{}' is not given".format(param)
)
if i < (self.nmodes - 1):
if not self.fixed["weight"][i]:
param = "weight{}".format(i)
try:
weights[i] = hyperparameters[param]
except KeyError:
raise KeyError(
"Cannot calculate log probability when "
"value '{}' is not given".format(param)
)
if weights[self.nmodes - 1] == 0.0:
# set final weight
weights[self.nmodes - 1] = 1.0 - np.sum(weights[:-1])
# cumulative normalised weights
cweights = np.cumsum(np.asarray(weights) / np.sum(weights))
# pick mode and draw sample
if self.nmodes == 1:
sample = truncnorm.rvs(
(self.low - mus[0]) / sigmas[0],
(self.high - mus[0]) / sigmas[0],
loc=mus[0],
scale=sigmas[0],
size=size,
)
else:
sample = np.zeros(size)
for i in range(size):
mode = np.argwhere(cweights - np.random.rand() > 0)[0][0]
sample[i] = truncnorm.rvs(
(self.low - mus[mode]) / sigmas[mode],
(self.high - mus[mode]) / sigmas[mode],
loc=mus[mode],
scale=sigmas[mode],
size=1,
)
if size == 1:
sample = sample[0]
return sample
class ExponentialDistribution(BaseDistribution):
"""
A distribution to define estimating the parameters of an exponential distribution.
Parameters
----------
name: str
See :class:`~cwinpy.hierarchical.BaseDistribution`
mu: float, Prior
The mean of the exponential distribution.
"""
def __init__(self, name, mu):
# initialise
super().__init__(
name, "exponential", hyperparameters=dict(mu=mu), low=0.0, high=np.inf
)
def log_pdf(self, value, hyperparameters={}):
"""
The natural logarithm of the pdf of an exponential distribution.
Parameters
----------
value: float
The value at which the probability is to be evaluated.
hyperparameters: dict
A dictionary containing the current values of the hyperparameters
that need to be inferred.
Returns
-------
logpdf:
The natural logarithm of the probability at the given value.
"""
if np.any((value < self.low) | (value > self.high)):
return -np.inf
mu = self["mu"]
if not self.fixed["mu"]:
try:
mu = hyperparameters["mu"]
except KeyError:
raise KeyError("Cannot evaluate the probability when mu is not given")
if mu <= 0.0:
return -np.inf
# get log pdf
logpdf = expon.logpdf(value, scale=mu)
return logpdf
def sample(self, hyperparameters={}, size=1):
"""
Draw a sample from the exponential distribution as defined by the
given hyperparameters.
Parameters
----------
hyperparameters: dict
A dictionary of the hyperparameter values (``mu``) that define the
current state of the distribution.
size: int
The number of samples to draw from the distribution.
Returns
-------
sample:
A sample, or set of samples, from the distribution.
"""
mu = self["mu"]
if not self.fixed["mu"]:
try:
mu = hyperparameters["mu"]
except KeyError:
raise KeyError("Cannot evaluate the probability when mu is not given")
samples = expon.rvs(scale=mu, size=size)
while 1:
idx = (samples > self.low) & (samples < self.high)
nvalid = np.sum(idx)
if nvalid != size:
sample = expon.rvs(scale=mu, size=(size - nvalid))
samples[~idx] = sample
else:
break
if size == 1:
sample = samples[0]
else:
sample = samples
return sample
class PowerLawDistribution(BaseDistribution):
"""
A distribution to define estimating the parameters of a power law distribution.
Parameters
----------
name: str
See :class:`~cwinpy.hierarchical.BaseDistribution`
alpha: float, Prior
The power law index of the distribution.
minimum: float
A positive finite value giving the lower cutoff of the distribution.
maximum: float
A positive finite value giving the upper cutoff of the distribution.
"""
def __init__(self, name, alpha, minimum, maximum):
if isinstance(minimum, float):
if minimum <= 0 or not np.isfinite(minimum):
raise ValueError(
"Minimum of distribution must be positive finite value"
)
if isinstance(maximum, float):
if maximum < minimum:
raise ValueError(
"Maximum of distribution must be smaller than minimum!"
)
if isinstance(maximum, float):
if maximum <= 0 or not np.isfinite(maximum):
raise ValueError(
"Maximum of distribution must be positive finite value"
)
# initialise
super().__init__(
name,
"powerlaw",
hyperparameters=dict(alpha=alpha, minimum=minimum, maximum=maximum),
)
def log_pdf(self, value, hyperparameters={}):
"""
The natural logarithm of the pdf of a power law distribution.
Parameters
----------
value: float
The value at which the probability is to be evaluated.
hyperparameters: dict
A dictionary containing the current values of the hyperparameters
that need to be inferred.
Returns
-------
logpdf:
The natural logarithm of the probability at the given value.
"""
alpha = self["alpha"]
if not self.fixed["alpha"]:
try:
alpha = hyperparameters["alpha"]
except KeyError:
raise KeyError(
"Cannot evaluate the probability when alpha is not given"
)
minimum = self["minimum"]
if not self.fixed["minimum"]:
try:
minimum = hyperparameters["minimum"]
except KeyError:
raise KeyError(
"Cannot evaluate the probability when minimum is not given"
)
elif np.any(value < minimum):
return -np.inf
maximum = self["maximum"]
if not self.fixed["maximum"]:
try:
maximum = hyperparameters["maximum"]
except KeyError:
raise KeyError(
"Cannot evaluate the probability when maximum is not given"
)
elif np.any(value > maximum):
return -np.inf
# get log pdf
logpdf = bilby.core.prior.PowerLaw(alpha, minimum, maximum).ln_prob(value)
return logpdf
def sample(self, hyperparameters={}, size=1):
"""
Draw a sample from the exponential distribution as defined by the
given hyperparameters.
Parameters
----------
hyperparameters: dict
A dictionary of the hyperparameter values (``alpha``, ``minimum``
and ``maximum``) that define the current state of the distribution.
size: int
The number of samples to draw from the distribution.
Returns
-------
sample:
A sample, or set of samples, from the distribution.
"""
alpha = self["alpha"]
if not self.fixed["alpha"]:
try:
alpha = hyperparameters["alpha"]
except KeyError:
raise KeyError(
"Cannot evaluate the probability when alpha is not given"
)
minimum = self["minimum"]
if not self.fixed["minimum"]:
try:
minimum = hyperparameters["minimum"]
except KeyError:
raise KeyError(
"Cannot evaluate the probability when minimum is not given"
)
maximum = self["maximum"]
if not self.fixed["maximum"]:
try:
maximum = hyperparameters["maximum"]
except KeyError:
raise KeyError(
"Cannot evaluate the probability when maximum is not given"
)
samples = bilby.core.prior.PowerLaw(alpha, minimum, maximum).sample(size=size)
if size == 1:
sample = samples[0]
else:
sample = samples
return sample
class DeltaFunctionDistribution(BaseDistribution):
"""
A distribution defining a delta function (useful if wanting to fix a
parameter at a specific value if creating signals, or use as a null model).
Parameters
----------
name: str
See :class:`~cwinpy.hierarchical.BaseDistribution`
peak: float
The value at which the delta function is non-zero.
"""
def __init__(self, name, peak):
# initialise
super().__init__(name, "deltafunction", hyperparameters=dict(peak=peak))
def log_pdf(self, value, hyperparameters={}):
"""
The natural logarithm of the pdf of a delta function distribution.
Parameters
----------
value: float
The value at which the probability is to be evaluated.
hyperparameters: dict
A dictionary containing the current values of the hyperparameters
that need to be inferred.
Returns
-------
logpdf:
The natural logarithm of the probability at the given value.
"""
peak = self["peak"]
if not self.fixed["peak"]:
try:
peak = hyperparameters["peak"]
except KeyError:
raise KeyError("Cannot evaluate the probability when peak is not given")
if value != peak:
return -np.inf
return 0.0
def sample(self, hyperparameters={}, size=1):
"""
Return the position of the delta function.
Parameters
----------
hyperparameters: dict
A dictionary of the hyperparameter values (``peak``) that define
the current state of the distribution.
size: int
The number of samples to draw from the distribution.
Returns
-------
sample:
A sample, or set of samples, from the distribution.
"""
peak = self["peak"]
if not self.fixed["peak"]:
try:
peak = hyperparameters["peak"]
except KeyError:
raise KeyError("Cannot evaluate the probability when peak is not given")
if size == 1:
return peak
else:
return peak * np.ones(size)
class HistogramDistribution(BaseDistribution):
"""
A distribution to define estimating the bin weights of a non-parameteric
histogram-type distribution. The priors for the bin weights will be a
Dirichlet prior.
An example of using this distribution for a 10 bin histogram would be:
>>> # set the number of bins and bounds
>>> nbins = 20
>>> lowerbound = 0.0
>>> upperbound = 10.0
>>> dist = HistogramDistribution("x", low=lowerbound, high=upperbound, nbins=nbins)
Parameters
----------
name: str
See :class:`~cwinpy.hierarchical.BaseDistribution`
low: float
The lower bound of the distribution (required).
high: float
The upper bound of the distribution (required).
nbins: int
An integer number of histogram bins to use (defaults to 10).
"""
def __init__(self, name, low, high, nbins=10):
binparameters = {"weight": []}
if isinstance(nbins, int):
if nbins < 1:
raise ValueError("Histogram must have at least one bin.")
self.nbins = nbins
else:
raise TypeError("Number of bins must be an integer")
# set the histogram bin edges (add small buffer on upper bin to allow
# points on the edge)
self.binedges = np.linspace(low, high + 1e-8 * high, nbins + 1)
# set Dirichlet priors on weights
binparameters["weight"] = list(
bilby.core.prior.DirichletPriorDict(
n_dim=self.nbins,
label="weight",
).values()
)
# initialise
super().__init__(
name, "histogram", hyperparameters=binparameters, low=low, high=high
)
def log_pdf(self, value, hyperparameters={}):
"""
The natural logarithm of the pdf of a histogrammed distribution.
Parameters
----------
value: float
The value at which the probability is to be evaluated.
hyperparameters: dict
A dictionary containing the current values of the hyperparameters
that need to be inferred. For a histogram with ``n`` bins, the
hyperparameters should include ``n-1`` weights values.
Returns
-------
logpdf:
The natural logarithm of the probability density at the given
value.
"""
if np.any((np.asarray(value) < self.low) | (np.asarray(value) > self.high)):
return -np.inf
weights = np.zeros(self.nbins)
for i in range(self.nbins):
param = "weight{}".format(i)
if i < (self.nbins - 1):
weights[i] = hyperparameters[param]
else:
# set final weight
weights[i] = 1.0 - np.sum(weights[:-1])
if np.any(weights <= 0.0):
return -np.inf
# get log of weights
lweights = np.log(weights)
# get log pdf
if isinstance(value, (float, int)):
logpdf = -np.inf
elif isinstance(value, (list, np.ndarray)):
logpdf = np.full_like(value, -np.inf)
else:
raise TypeError("value must be a float or array-like")
binidxs = np.digitize(value, self.binedges)
if isinstance(value, (float, int)):
logpdf = lweights[binidxs - 1]
else:
for i in range(len(value)):
logpdf[i] = lweights[binidxs[i] - 1]
return logpdf
def sample(self, hyperparameters={}, size=1):
"""
Draw a sample from the histogram distribution as defined by the
given hyperparameters.
Parameters
----------
hyperparameters: dict
A dictionary of the hyperparameter values that define the current
state of the distribution. If there are ``n`` bins in the
histogram, then the hyperparameters should include ``n-1`` weights
values.
size: int
The number of samples to draw. Default is 1.
Returns
-------
sample:
A sample, or set of samples, from the distribution.
"""
rng = np.random.default_rng()
weights = np.zeros(self.nbins)
# get current weights
for i in range(self.nbins):
param = "weight{}".format(i)
if i < (self.nbins - 1):
weights[i] = hyperparameters[param]
else:
# set final weight
weights[i] = 1.0 - np.sum(weights[:-1])
# cumulative normalised weights
cweights = np.cumsum(np.asarray(weights) / np.sum(weights))
# pick bin and draw sample
if self.nbins == 1:
sample = rng.uniform(self.low, self.high, size=size)
else:
sample = np.zeros(size)
for i in range(size):
bin = np.argwhere(cweights - rng.uniform() > 0)[0][0]
sample[i] = rng.uniform(
self.binedges[bin], self.binedges[bin + 1], size=1
)
if size == 1:
sample = sample[0]
return sample
def create_distribution(name, distribution, distkwargs={}):
"""
Function to create a distribution.
An example of creating an exponential distribution, with a half-Gaussian
prior on the mean would be:
>>> from bilby.core.prior import HalfGaussian
>>> sigma = 1e34 # width of half-Gaussian prior on mu
>>> distkwargs = {"mu": HalfGaussian(name="mu", sigma=sigma)}
>>> expdist = create_distribution("q22", "exponential", distkwargs)
An example of creating a bimodal-Gaussian distribution, with modes fixed at
particular values, fixed weights, but log-uniform priors on the mode
widths, would be:
>>> from bilby.core.prior import LogUniform
>>> min = 1e28 # minimum of the width prior
>>> max = 1e38 # maximum of the width prior
>>> modes = [0.0, 1e32] # fixed modes
>>> weights = [0.7, 0.3] # fixed weights
>>> sigmas = [
>>> LogUniform(name="sigma0", minimum=min, maximum=max),
>>> LogUniform(name="sigma1", minimum=min, maximum=max),
>>> ]
>>> distkwargs = {
>>> "mu": modes, # set "mu" for the modes
>>> "sigma": sigmas, # set "sigma" for the widths
>>> "weight": weights, # set "weight" for the weights
>>> }
>>> gaussdist = create_distribution("q22", "gaussian", distkwargs)
Parameters
----------
name: str
The name of the parameter which the distribution represents.
distribution: str, :class:`cwinpy.hierarchical.BaseDistribution`
A string giving a valid distribution name. This is the distribution for
which the hyperparameters are going to be inferred. If using a string,
the distribution keyword arguments must be passed using ``distkwargs``.
distkwargs: dict
A dictionary of keyword arguments for the distribution that is being
inferred.
Returns
-------
distribution
The distribution class.
"""
if isinstance(distribution, BaseDistribution):
return distribution
elif isinstance(distribution, str):
if distribution.lower() not in DISTRIBUTION_REQUIREMENTS.keys():
raise ValueError('Unknown distribution type "{}"'.format(distribution))
if distribution.lower() == "gaussian":
return BoundedGaussianDistribution(name, **distkwargs)
elif distribution.lower() == "exponential":
return ExponentialDistribution(name, **distkwargs)
elif distribution.lower() == "deltafunction":
return DeltaFunctionDistribution(name, **distkwargs)
elif distribution.lower() == "powerlaw":
return PowerLawDistribution(name, **distkwargs)
elif distribution.lower() == "histogram":
return HistogramDistribution(name, **distkwargs)
else:
raise TypeError("Unknown distribution")
class MassQuadrupoleDistribution(object):
"""
A class to infer the hyperparameters of the :math:`l=m=2` mass quadrupole
distribution (or fiducial ellipticity :math:`\\varepsilon`) for a given
selection of known pulsars (see, for example, [2]_).
The class currently can attempt to fit the hyperparameters for the
following distributions:
* a :math:`n`-mode bounded Gaussian distribution defined by either fixed or
unknown means, standard deviations and weights;
* an exponential distribution defined by an unknown mean.
* a power law distribution defined by an unknown power law index and fixed
or unknown bounds.
All distributions do not allow the quadrupole value to become negative.
Parameters
----------
data: :class:`bilby.core.result.ResultList`
A :class:`bilby.core.result.ResultList` of outputs from running source
parameter estimation using bilby for a set of individual CW sources.
These can be from MCMC or nested sampler runs, but only the latter can
be used if requiring a properly normalised evidence value.
gridrange: array_like
A list of values at which the :math:`Q_{22}` parameter posteriors
should be interpolated, or a lower and upper bound in the range of
values, which will be split into ``bins`` points spaced linearly in
log-space (unless ``gridtype'' is set to a value other than ``"log"``).
If not supplied this will instead be set using the posterior samples,
with a minimum value at zero and a maximum given by the maximum of all
posterior samples.
bins: int
The number of bins at which the posterior will be interpolated.
gridtype: str
This sets the grid bin spacing used for assigning the interpolation
grid. It defaults to spacings that are uniform in log-space for
distributions other than
:class:`cwinpy.hierarchical.HistogramDistribution` for which case the
spacing defaults to linear. Values can either be ``"log"`` or
``"linear"`` to force one or other spacing.
distribution: :class:`cwinpy.hierarchical.BaseDistribution`, str
A predefined distribution, or string giving a valid distribution name.
This is the distribution for which the hyperparameters are going to be
inferred. If using a string, the distribution keyword arguments must be
passed using ``distkwargs``.
distkwargs: dict
A dictionary of keyword arguments for the distribution that is being
inferred.
bw: str, scalar, callable
See the ``bw_method`` argument for :class:`scipy.stats.gaussian_kde`.
sampler: str
The name of the stochastic sampler method used by ``bilby`` for
sampling the posterior. This defaults to use 'dynesty'.
sampler_kwargs: dict
A dictionary of arguments required by the given sampler.
grid: dict
A dictionary of values that define a grid in the parameter and
hyperparameter space that can be used by a
:class:`bilby.core.grid.Grid`. If given sampling will be performed on
the grid, rather than using the stochastic sampler.
integration_method: str
The method to use for integration over the :math:`Q_{22}` parameter for
each source. Default is 'numerical' to perform trapezium rule
integration. Other allowed values are: 'sample' to sample over each
individual :math:`Q_{22}` parameter for each source; or, 'expectation',
which uses the :math:`Q_{22}` posterior samples to approximate the
expectation value of the hyperparameter distribution. At the moment,
these two additional methods may not be correct/reliable.
nsamples: int
This sets the number of posterior samples to store for either
estimating KDEs or calculating expectation values from those passed in
the data. This allows downsampling of large numbers of samples by
randomly drawing a subsection of samples. If the number given is larger
than the total number of samples for a given pulsar, then all samples
will be used in that case. The default will be to use all samples, but
this may lead to memory issues when using large numbers of pulsars.
use_ellipticity: bool
If True, work with fiducial ellipticity :math:`\\varepsilon` rather
than mass quadrupole.
To do
-----
Distributions that could be added include:
* a Student's t-distributions with unknown mean and number of degrees of
freedom.
"""
def __init__(
self,
data=None,
gridrange=None,
bins=100,
gridtype=None,
distribution=None,
distkwargs=None,
bw="scott",
sampler="dynesty",
sampler_kwargs={},
grid=None,
integration_method="numerical",
nsamples=None,
use_ellipticity=False,
):
self._posterior_samples = []
self._pulsar_priors = []
self._log_evidence = []
self._likelihood_kdes_interp = []
self._distribution = None
# set whether to use ellipticity rather than mass quadrupole
self.use_ellipticity = use_ellipticity
# set the values of q22/ellipticity at which to calculate the KDE
# interpolator
self.set_range(gridrange, bins, gridtype=gridtype)
# set integration method
self.set_integration_method(integration_method)
# set the data
self.add_data(data, bw=bw, nsamples=nsamples)
# set the sampler
if grid is None:
self.set_sampler(sampler, sampler_kwargs)
else:
self.set_grid(grid)
# set the distribution
self.set_distribution(distribution, distkwargs)
def set_range(self, gridrange, bins=100, gridtype=None):
"""
Set the values of :math:`Q_{22}`, or ellipticity :math:`\\varepsilon`,
either directly, or as a set of points linear in log-space defined by
a lower and upper bounds and number of bins, at which to evaluate the
posterior samples via their KDE to make an interpolator.
Parameters
----------
gridrange: array_like
If this array contains two values it is assumed that these are the
lower and upper bounds of a range, and the ``bins`` parameter sets
the number of bins in log-space that the range will be split into.
Otherwise, if more than two values are given it is assumed these
are the values for :math:`Q_{22}` or :math:`\\varepsilon`.
bins: int
The number of bins the range is split into.
gridtype: str
Set whether to have grid-spacing be ``"linear"`` or linear in
log-10 space (``"log"``). By default, for distribution's other than
:class:`cwinpy.hierarchical.HistogramDistribution` the default will
be linear in log-10 space.
"""
self._bins = bins
if gridrange is None:
self._grid_interp_values = None
return
if len(gridrange) == 2:
if gridrange[1] < gridrange[0]:
raise ValueError("Grid range is badly defined")
# set grid spacing (either linear or linear in log10-space)
lower, upper = gridrange
if (
gridtype is None
and not isinstance(self._distribution, HistogramDistribution)
) or gridtype == "log":
self._grid_interp_values = np.logspace(
np.log10(gridrange[0]), np.log10(gridrange[1]), self._bins
)
else:
self._grid_interp_values = np.linspace(
gridrange[0], gridrange[1], self._bins
)
elif len(gridrange) > 2:
self._grid_interp_values = gridrange
else:
raise ValueError("Grid range is badly defined")
@property
def interpolated_log_kdes(self):
"""
Return the list of interpolation functions for the natural logarithm of
the :math:`Q_{22}` likelihood functions after a Gaussian KDE has been
applied.
"""
return self._likelihood_kdes_interp
def add_data(self, data, bw="scott", nsamples=None):
"""
Set the data, i.e., the individual source posterior distributions, on
which the hierarchical analysis will be performed.
The posterior samples must include the ``Q22`` :math:`l=m=2` parameter,
or the fiducial ellipticity parameter ``ELL``, for this inference to be
performed.
If using the "numerical" integration method, upon running the
:meth:`~cwinpy.hierarchical.MassQuadrupoleDistribution.sample` method,
these samples will be converted to a KDE (reflected about zero
to avoid edge effects, and re-normalised, although the bandwidth will
be calculated using the unreflected samples), using
:class:`scipy.stats.gaussian_kde`, which will be used as the
data for hierarchical inference. If the posterior
samples come with a Bayesian evidence value, and the prior is present,
then these are used to convert the posterior distribution into a
likelihood, which is what is then stored in the interpolation function.
If using the "expectation" integration method, and if the posterior
samples were not estimated using a uniform prior on ``Q22``/``ELL``,
then the samples will be resampled from a uniform prior to attempt to
generate samples from the likelihood.
Parameters
----------
data: :class:`bilby.core.result.ResultList`
A list, or single, results from bilby containing posterior samples
for a set of sources, or individual source.
bw: str, scale, callable
The Gaussian KDE bandwidth calculation method as required by
:class:`scipy.stats.gaussian_kde`. The default is the 'scott'
method.
nsamples: int
This sets the number of posterior samples to store and use from
those passed in the data. This allows downsampling of large numbers
of samples by randomly drawing a subsection of samples. If the
number given is larger than the total number of samples for a given
pulsar, then all samples will be used in that case. The default
will be to use all samples, but this may lead to memory issues when
using large numbers of pulsars.
"""
# check the data is a ResultList
if not isinstance(data, bilby.core.result.ResultList):
if isinstance(data, (bilby.core.result.Result, str)):
# convert to a ResultList
data = bilby.core.result.ResultList([data])
elif isinstance(data, list):
data = bilby.core.result.ResultList(data)
elif data is None:
return
else:
raise TypeError("Data is not a known type")
self._bw = bw
for result in data:
# check all posteriors contain Q22 or ellipticity
if (
"Q22" not in result.posterior.columns
and "q22" not in result.posterior.columns
):
if (
"ELL" in result.posterior.columns
or "ell" in result.posterior.columns
):
priorkey = "ell" if "ell" in result.posterior.columns else "ELL"
if not self.use_ellipticity:
# convert ellipticity into q22
result.posterior["q22"] = ellipticity_to_q22(
result.posterior[priorkey]
)
else:
raise RuntimeError("Results do not contain Q22")
else:
priorkey = "q22" if "q22" in result.posterior.columns else "Q22"
if self.use_ellipticity:
result.posterior["ell"] = q22_to_ellipticity(
result.posterior[priorkey]
)
self._pulsar_priors.append(result.priors[priorkey])
if nsamples is not None:
if not isinstance(nsamples, int):
raise TypeError("nsamples must be a positive integer")
elif nsamples < 1:
raise ValueError("nsamples must be a positive integer")
# set number of samples to use
if not hasattr(self, "_nsamples") and nsamples is not None:
self._nsamples = nsamples
numsamps = nsamples
elif hasattr(self, "_nsamples") and nsamples is None:
numsamps = self._nsamples
else:
numsamps = nsamples
# create KDEs/add samples
iniidx = len(self._posterior_samples)
for i, result in enumerate(data):
if self.use_ellipticity:
keystr = "ell" if "ell" in result.posterior.columns else "ELL"
else:
keystr = "q22" if "q22" in result.posterior.columns else "Q22"
samples = result.posterior[keystr]
# reweight samples back to equivalent likelihood samples if prior
# on Q22/ELL for PE was not uniform
prior = self._pulsar_priors[iniidx + i]
if self._integration_method == "expectation" and not isinstance(
prior, bilby.core.prior.Uniform
):
# resample to uniform prior
possamps = result.posterior[keystr]
lnweights = prior.log_prob(possamps)
weights = np.exp(lnweights - np.max(lnweights))
samples = possamps[weights > np.random.rand(len(weights))]
# append samples
if numsamps is None:
self._posterior_samples.append(np.array(samples))
else:
if len(samples) < numsamps:
self._posterior_samples.append(np.array(samples))
else:
# generate random choice of samples to store
sidx = np.random.default_rng().choice(
len(samples), numsamps, replace=False
)
self._posterior_samples.append(np.array(samples)[sidx])
self._log_evidence.append(result.log_evidence)
def set_distribution(self, distribution, distkwargs={}):
"""
Set the distribution who's hyperparameters are going to be inferred.
Parameters
----------
distribution: :class:`cwinpy.hierarchical.BaseDistribution`, str
A predefined distribution, or string giving a valid distribution
name. If using a string, the distribution keyword arguments must be
passed using ``distkwargs``.
distkwargs: dict
A dictionary of keyword arguments for the distribution that is being
inferred.
"""
self._prior = None
self._likelihood = None
if distribution is None:
return
if isinstance(distribution, BaseDistribution):
if self.use_ellipticity:
if distribution.name.upper() != "ELL":
raise ValueError("Distribution name must be 'ELL'")
else:
self._distribution = distribution
else:
if distribution.name.upper() != "Q22":
raise ValueError("Distribution name must be 'Q22'")
else:
self._distribution = distribution
elif isinstance(distribution, str):
if self.use_ellipticity:
self._distribution = create_distribution(
"ELL", distribution.lower(), distkwargs
)
else:
self._distribution = create_distribution(
"Q22", distribution.lower(), distkwargs
)
# set the priors from the distribution
self._set_priors()
def _set_priors(self):
"""
Set the priors based on those supplied via the distribution class.
"""
# get the priors from the distribution
if len(self._distribution.unknown_parameters) < 1:
raise ValueError("Distribution has no parameters to infer")
# add priors as PriorDict
self._prior = None
# check for Dirichlet priors
for param, prior in zip(
self._distribution.unknown_parameters, self._distribution.unknown_priors
):
if isinstance(prior, bilby.core.prior.DirichletElement):
self._prior = bilby.core.prior.DirichletPriorDict(
n_dim=prior.n_dimensions, label=prior.label
)
break
if self._prior is None:
self._prior = bilby.core.prior.ConditionalPriorDict()
for param, prior in zip(
self._distribution.unknown_parameters, self._distribution.unknown_priors
):
if param not in self._prior:
self._prior[param] = prior
def _set_likelihood(self):
"""
Set the likelihood.
"""
samples = None
grid = None
likelihoods = None
# set the grid
if self._integration_method == "expectation":
samples = self._posterior_samples
else:
if self._grid_interp_values is None:
# set parameter range from data
minmax = [np.inf, -np.inf]
for psamples in self._posterior_samples:
minval = psamples.min()
maxval = psamples.max()
if minval < minmax[0]:
minmax[0] = minval
if maxval > minmax[1]:
minmax[1] = maxval
self.set_range(minmax, self._bins)
grid = self._grid_interp_values
# generate KDEs from samples and create spline interpolants
nkdes = len(self._likelihood_kdes_interp)
if len(self._posterior_samples) > nkdes:
for i in range(nkdes, len(self._posterior_samples)):
psamples = self._posterior_samples[i]
try:
# get reflected samples
samps = np.concatenate((psamples, -psamples))
# calculate the KDE initially using the unreflected
# samples to get a better bandwidth and prevent
# artificially broadened distributions
kdeorig = gaussian_kde(psamples, bw_method=self._bw)
# calculate KDE (using new bandwidth equivalent to that
# for unreflected samples)
bw = np.sqrt(kdeorig.covariance[0][0] / np.var(samps))
kde = gaussian_kde(samps, bw_method=bw)
# use log pdf for the kde
interpvals = kde.logpdf(self._grid_interp_values) + np.log(
2.0
) # multiply by 2 so pdf normalises to 1
# replace any infinity values with small number (logpdf
# returns inf rather than -inf, so we need to flip the
# sign)
infvals = ~np.isfinite(interpvals)
if np.any(infvals):
interpvals[infvals] = -np.inf
interpvals = np.nan_to_num(interpvals)
except Exception as e:
raise RuntimeError("Problem creating KDE: {}".format(e))
# convert posterior to likelihood (if possible)
if np.isfinite(self._log_evidence[i]):
# multiply by evidence
interpvals += self._log_evidence[i]
# divide by prior
interpvals -= self._pulsar_priors[i].ln_prob(
self._grid_interp_values
)
# create and add interpolator (the tck tuple for a B-spline)
self._likelihood_kdes_interp.append(
splrep(self._grid_interp_values, interpvals)
)
likelihoods = self._likelihood_kdes_interp
self._likelihood = MassQuadrupoleDistributionLikelihood(
self._distribution, likelihoods=likelihoods, samples=samples, grid=grid
)
def set_sampler(self, sampler="dynesty", sampler_kwargs={}):
"""
Set the stochastic sampling method for ``bilby`` to use when sampling
the parameter and hyperparameter posteriors.
Parameters
----------
sampler: str
The name of the stochastic sampler method used by ``bilby`` for
sampling the posterior. This defaults to use 'dynesty'.
sampler_kwargs: dict
A dictionary of arguments required by the given sampler.
"""
self._sampler = sampler
if self._sampler not in bilby.core.sampler.IMPLEMENTED_SAMPLERS:
raise ValueError(
'Sampler "{}" is not implemented in "bilby"'.format(self._sampler)
)
self._sampler_kwargs = sampler_kwargs
self._use_grid = False # set to not use the Grid sampling
def set_grid(self, grid):
"""
Set a grid on which to evaluate the hyperparameter posterior, as used
by :class:`bilby.core.grid.Grid`.
Parameters
----------
grid: dict
A dictionary of values that define a grid in the hyperparameter
space that can be used by a :class:`bilby.core.grid.Grid` class.
"""
if not isinstance(grid, dict):
raise TypeError("Grid must be a dictionary")
self._grid = grid
self._use_grid = True
def set_integration_method(self, integration_method="numerical"):
"""
Set the method to use for integration over the :math:`Q_{22}` parameter
for each source.
Parameters
----------
integration_method: str
Default is 'numerical' to perform trapezium rule integration.
The other allowed value is 'expectation', which uses the
:math:`Q_{22}` posterior samples to approximate the expectation
value of the hyperparameter distribution.
"""
if not isinstance(integration_method, str):
raise TypeError("integration method must be a string")
if integration_method.lower() not in ["numerical", "expectation"]:
raise ValueError(
"Unrecognised integration method type "
"'{}'".format(integration_method)
)
self._integration_method = integration_method.lower()
@property
def result(self):
"""
Return the ``bilby`` object containing the results. If evaluating the
posterior over a grid this is a :class:`bilby.core.grid.Grid` object.
If sampling using a stochastic sampler, this is a
:class:`bilby.core.result.Result` object. If sampling has not yet been
performed this returns ``None``.
"""
if self._use_grid:
if hasattr(self, "_grid_result"):
return self._grid_result
else:
return None
else:
if hasattr(self, "_result"):
return self._result
else:
return None
def sample(self, **run_kwargs):
"""
Sample the posterior distribution using ``bilby``. This can take
keyword arguments required by the bilby
:func:`~bilby.core.sampler.run_sampler` function.
"""
# set up the likelihood function
self._set_likelihood()
# set use_ratio to False by default, i.e., don't use the likelihood
# ratio
run_kwargs.setdefault("use_ratio", False)
if self._use_grid:
self._grid_result = bilby.core.grid.Grid(
self._likelihood, self._prior, grid_size=self._grid
)
else:
self._result = bilby.run_sampler(
likelihood=self._likelihood,
priors=self._prior,
sampler=self._sampler,
**self._sampler_kwargs,
**run_kwargs
)
return self.result
def posterior_predictive(self, points, nsamples=100):
"""
Return an iterator that will draw samples from the distribution
hyperparameter posterior (once
:meth:`~cwinpy.hierarchical.MassQuadrupoleDistribution.sample` has been
run) and returns the associated distribution evaluated at a set of
points.
Currently this is only implemented to work using samples from a
stochastic sampling method rather than posteriors evaluated on a grid.
Parameters
----------
points: array_like
An array of Q22/ellipticity values at which to evaluate the
distribution.
nsamples: int
The number of samples to draw from the distribution. This defaults
to 100, but must be less than the number of posterior samples.
"""
if self.result is None:
raise RuntimeError("Sampling has not yet been performed")
if self._use_grid:
raise RuntimeError("Posterior predictive check can only use samples")
# check grid
if not isinstance(points, (tuple, list, np.ndarray)):
raise TypeError("points must be array_like")
if nsamples > len(self.result.posterior):
raise ValueError(
"Requested number of samples is greater than the number of posterior samples"
)
# chose random indexes of samples
idx = np.random.choice(len(self.result.posterior), nsamples, replace=False)
for i in idx:
# get parameters of distribution for each sample
hyper = {
key: self.result.posterior[key][i]
for key in self._distribution.unpacked_parameters
if key in self.result.posterior.columns
}
# evaluate the distribution
yield self._distribution.pdf(np.asarray(points), hyper)
class MassQuadrupoleDistributionLikelihood(bilby.core.likelihood.Likelihood):
"""
The likelihood function for the inferring the hyperparameters of the
mass quadrupole, :math:`Q_{22}`, distribution (or equivalently the
fiducial ellipticity distribution).
Parameters
----------
distribution: :class:`cwinpy.hierarchical.BaseDistribution`
The probability distribution for which the hyperparameters are going
to be inferred.
likelihoods: list
A list of interpolation functions each of which gives the likelihood
function for a single source.
grid: array_like
If given, the integration over the mass quadrupole distribution for
each source is performed numerically on at these grid points. If not
given, individual samples from :math:`Q_{22}` will be drawn from each
source (i.e., equivalent to having a new :math:`Q_{22}` parameter for
each source in the sampler).
samples: list
A list of arrays of :math:`Q_{22}` samples for each source. If this is
given then these samples will be used to approximate the integral over
independent :math:`Q_{22}` variables for each source.
"""
def __init__(self, distribution, likelihoods=None, grid=None, samples=None):
if not isinstance(distribution, BaseDistribution):
raise TypeError("Distribution is not the correct type")
# check that the distribution contains parameters to be inferred
if len(distribution.unknown_parameters) < 1:
raise ValueError("Distribution has no parameters to infer")
# set parameters to be inferred
inferred_parameters = {param: None for param in distribution.unknown_parameters}
self.distribution = distribution
self.grid = grid
self.likelihoods = likelihoods
self.samples = samples
super().__init__(parameters=inferred_parameters)
@property
def likelihoods(self):
return self._likelihoods
@likelihoods.setter
def likelihoods(self, like):
if like is None:
self._likelihoods = None
self._nsources = 0
elif not isinstance(like, list):
raise TypeError("Likelihoods must be a list")
else:
if self.grid is not None:
# evaluate the interpolated (log) likelihoods on the grid
self._likelihoods = []
for ll in like:
self._likelihoods.append(splev(self.grid, ll))
self._nsources = len(like)
else:
raise ValueError("Grid must be set to evaluate likelihoods")
@property
def grid(self):
return self._grid
@grid.setter
def grid(self, grid):
if isinstance(grid, (list, np.ndarray)):
self._grid = np.asarray(grid)
elif grid is None:
self._grid = None
else:
raise TypeError("Grid must be array-like")
@property
def samples(self):
return self._samples
@samples.setter
def samples(self, samples):
if samples is not None:
if not isinstance(samples, (list, np.ndarray)):
raise TypeError("samples value must be a list")
if isinstance(samples, np.ndarray):
if len(samples.shape) != 2:
raise ValueError("Samples must be a 2D array")
for samplelist in samples:
if not isinstance(samplelist, (list, np.ndarray)):
raise TypeError("Samples must be a list")
if len(np.asarray(samplelist).shape) != 1:
raise ValueError("Source samples must be a 1d list")
self._nsources = len(samples)
self._samples = samples
def log_likelihood(self):
"""
Evaluate the log likelihood.
"""
log_like = 0.0 # initialise the log likelihood
if self.samples is not None:
# log-likelihood using expectation value from samples
for samps in self.samples:
with np.errstate(divide="ignore"):
log_like += np.log(
np.mean(self.distribution.pdf(samps, self.parameters))
)
log_like =
|
np.nan_to_num(log_like)
|
numpy.nan_to_num
|
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import codecs
import os
import math
import random
import time
import h5py
import numpy as np
from numpy.linalg import norm
import tensorflow as tf
from tqdm import tqdm
from models.decoders import get_scores_and_spans, get_span_indices, \
greedy_search
from models.span_models import MaskSpanModel
from utils.common import load_json, write_json, word_convert, UNK
from utils.data_utils import f_score, count_gold_spans, \
count_gold_and_system_outputs, span2bio
NULL_LABEL_ID = 0
class KnnModel(MaskSpanModel):
def __init__(self, config, batcher, is_train=True):
self.knn_ids = None
self.gold_label_proba = None
self.max_n_spans = config["max_n_spans"]
super(KnnModel, self).__init__(config, batcher, is_train)
def _add_placeholders(self):
self.words = tf.placeholder(tf.int32, shape=[None, None], name="words")
self.tags = tf.placeholder(tf.int32, shape=[None, None], name="tags")
self.seq_len = tf.placeholder(tf.int32, shape=[None], name="seq_len")
self.neighbor_reps = tf.placeholder(tf.float32, shape=[None, None],
name="neighbor_reps")
self.neighbor_tags = tf.placeholder(tf.float32, shape=[None],
name="neighbor_tags")
self.neighbor_tag_one_hots = tf.placeholder(tf.float32, shape=[None, None],
name="neighbor_tag_one_hots")
if self.cfg["use_chars"]:
self.chars = tf.placeholder(tf.int32, shape=[None, None, None],
name="chars")
# hyperparameters
self.is_train = tf.placeholder(tf.bool, name="is_train")
self.keep_prob = tf.placeholder(tf.float32, name="rnn_keep_probability")
self.drop_rate = tf.placeholder(tf.float32, name="dropout_rate")
self.lr = tf.placeholder(tf.float32, name="learning_rate")
def _get_feed_dict(self, batch, keep_prob=1.0, is_train=False, lr=None):
feed_dict = {self.words: batch["words"], self.seq_len: batch["seq_len"]}
if "tags" in batch:
feed_dict[self.tags] = batch["tags"]
if self.cfg["use_chars"]:
feed_dict[self.chars] = batch["chars"]
if "neighbor_reps" in batch:
feed_dict[self.neighbor_reps] = batch["neighbor_reps"]
if "neighbor_tags" in batch:
feed_dict[self.neighbor_tags] = batch["neighbor_tags"]
if "neighbor_tag_one_hots" in batch:
feed_dict[self.neighbor_tag_one_hots] = batch["neighbor_tag_one_hots"]
feed_dict[self.keep_prob] = keep_prob
feed_dict[self.drop_rate] = 1.0 - keep_prob
feed_dict[self.is_train] = is_train
if lr is not None:
feed_dict[self.lr] = lr
return feed_dict
def _build_neighbor_similarity_op(self):
with tf.name_scope("similarity"):
# 1D: batch_size, 2D: max_num_spans, 3D: num_instances
self.similarity = tf.tensordot(self.span_rep, self.neighbor_reps,
axes=[-1, -1])
def _build_neighbor_proba_op(self):
with tf.name_scope("neighbor_prob"):
# 1D: batch_size, 2D: max_num_spans, 3D: num_instances
self.neighbor_proba = tf.nn.softmax(self.similarity, axis=-1)
def _build_marginal_proba_op(self):
with tf.name_scope("gold_label_prob"):
# 1D: batch_size, 2D: max_num_spans, 3D: 1
tags = tf.expand_dims(tf.cast(self.tags, dtype=tf.float32), axis=2)
# 1D: batch_size, 2D: max_num_spans, 3D: num_instances
gold_label_mask = tf.cast(
tf.equal(self.neighbor_tags, tags), dtype=tf.float32)
# 1D: batch_size, 2D: max_num_spans, 3D: num_instances
proba = self.neighbor_proba * gold_label_mask
# 1D: batch_size, 2D: max_num_spans
self.gold_label_proba = tf.reduce_sum(
tf.clip_by_value(proba, 1e-10, 1.0), axis=2)
def _build_knn_loss_op(self):
with tf.name_scope("loss"):
# 1D: batch_size, 2D: max_num_spans
self.losses = tf.math.log(self.gold_label_proba)
self.loss = - tf.reduce_mean(tf.reduce_sum(self.losses, axis=-1))
tf.summary.scalar("loss", self.loss)
def _build_one_nn_predict_op(self):
with tf.name_scope("prediction"):
neighbor_indices = tf.argmax(self.similarity, axis=2)
knn_predicts = tf.gather(self.neighbor_tags, neighbor_indices)
self.predicts = tf.reshape(knn_predicts,
shape=(tf.shape(self.words)[0], -1))
def _build_max_marginal_predict_op(self):
with tf.name_scope("prediction"):
# 1D: 1, 2D: 1, 3D: num_instances, 4D: num_tags
one_hot_tags = tf.reshape(self.neighbor_tag_one_hots,
shape=[1, 1, -1, self.tag_vocab_size])
# 1D: batch_size, 2D: max_num_spans, 3D: num_instances, 4D: 1
proba = tf.expand_dims(self.neighbor_proba, axis=3)
# 1D: batch_size, 2D: max_num_spans, 3D: num_instances, 4D: num_tags
proba = proba * one_hot_tags
# 1D: batch_size, 2D: max_num_spans, 3D: num_tags
self.marginal_proba = tf.reduce_sum(proba, axis=2)
self.predicts = tf.argmax(self.marginal_proba, axis=2)
def _build_model_op(self):
self._build_rnn_op()
self._make_span_indices()
if self.cfg["bilstm_type"] == "minus":
self._build_span_minus_op()
else:
self._build_span_add_and_minus_op()
self._build_span_projection_op()
self._build_neighbor_similarity_op()
self._build_neighbor_proba_op()
def _build_loss_op(self):
self._build_marginal_proba_op()
self._build_knn_loss_op()
def _build_predict_op(self):
if self.cfg["predict"] == "one_nn":
self._build_one_nn_predict_op()
else:
self._build_max_marginal_predict_op()
def get_neighbor_batch(self, train_sents, train_sent_ids):
return self.batcher.batchnize_neighbor_train_sents(
train_sents, train_sent_ids, self.max_span_len, self.max_n_spans)
def get_neighbor_reps_and_tags(self, span_reps, batch):
return self.batcher.batchnize_span_reps_and_tags(
span_reps, batch["tags"], batch["masks"])
def get_neighbor_reps_and_tag_one_hots(self, span_reps, batch):
return self.batcher.batchnize_span_reps_and_tag_one_hots(
span_reps, batch["tags"], batch["masks"], self.tag_vocab_size)
def make_one_batch_for_target(self, data, sent_id, add_tags=True):
return self.batcher.make_each_batch_for_targets(
batch_words=[data["words"]],
batch_chars=[data["chars"]],
batch_ids=[sent_id],
max_span_len=self.max_span_len,
max_n_spans=0,
batch_tags=[data["tags"]] if add_tags else None)
def _add_neighbor_instances_to_batch(self, batch, train_sents,
train_sent_ids, is_train):
if train_sent_ids:
if is_train:
train_sent_ids = list(set(train_sent_ids) - set(batch["instance_ids"]))
random.shuffle(train_sent_ids)
else:
train_sent_ids = batch["train_sent_ids"]
neighbor_batch = self.get_neighbor_batch(train_sents,
train_sent_ids[:self.cfg["k"]])
feed_dict = self._get_feed_dict(neighbor_batch)
span_reps = self.sess.run([self.span_rep], feed_dict)[0]
if is_train or self.cfg["predict"] == "one_nn":
rep_list, tag_list = self.get_neighbor_reps_and_tags(
span_reps, neighbor_batch)
batch["neighbor_reps"] = rep_list
batch["neighbor_tags"] = tag_list
else:
rep_list, tag_list = self.get_neighbor_reps_and_tag_one_hots(
span_reps, neighbor_batch)
batch["neighbor_reps"] = rep_list
batch["neighbor_tag_one_hots"] = tag_list
return batch
def _make_batch_and_sample_sent_ids(self, batch, valid_record, train_sents,
train_sent_ids):
if train_sent_ids:
random.shuffle(train_sent_ids)
sampled_train_sent_ids = train_sent_ids[:self.cfg["k"]]
else:
sampled_train_sent_ids = valid_record["train_sent_ids"][:self.cfg["k"]]
train_batch = self.batcher.make_batch_from_sent_ids(train_sents,
sampled_train_sent_ids)
feed_dict = self._get_feed_dict(train_batch)
span_reps = self.sess.run([self.span_rep], feed_dict)[0]
rep_list, tag_list = self.get_neighbor_reps_and_tag_one_hots(span_reps,
train_batch)
batch["neighbor_reps"] = rep_list
batch["neighbor_tag_one_hots"] = tag_list
return batch, sampled_train_sent_ids
def train_knn_epoch(self, batches, name):
loss_total = 0.
num_batches = 0
start_time = time.time()
train_sents = load_json(self.cfg["train_set"])
if self.cfg["knn_sampling"] == "random":
train_sent_ids = [sent_id for sent_id in range(len(train_sents))]
else:
train_sent_ids = None
for batch in batches:
num_batches += 1
if num_batches % 100 == 0:
print("%d" % num_batches, flush=True, end=" ")
# Setup a batch
batch = self._add_neighbor_instances_to_batch(batch,
train_sents,
train_sent_ids,
is_train=True)
# Convert a batch to the input format
feed_dict = self._get_feed_dict(batch,
is_train=True,
keep_prob=self.cfg["keep_prob"],
lr=self.cfg["lr"])
# Train a model
_, train_loss = self.sess.run([self.train_op, self.loss],
feed_dict)
if math.isnan(train_loss):
self.logger.info("\n\n\nNAN: Index: %d\n" % num_batches)
exit()
loss_total += train_loss
avg_loss = loss_total / num_batches
self.logger.info("-- Time: %f seconds" % (time.time() - start_time))
self.logger.info(
"-- Averaged loss: %f(%f/%d)" % (avg_loss, loss_total, num_batches))
return avg_loss, loss_total
def evaluate_knn_epoch(self, batches, name):
correct = 0
p_total = 0
num_batches = 0
start_time = time.time()
train_sents = load_json(self.cfg["train_set"])
if self.cfg["knn_sampling"] == "random":
train_sent_ids = [sent_id for sent_id in range(len(train_sents))]
else:
train_sent_ids = None
for batch in batches:
num_batches += 1
if num_batches % 100 == 0:
print("%d" % num_batches, flush=True, end=" ")
# Setup a batch
batch = self._add_neighbor_instances_to_batch(batch,
train_sents,
train_sent_ids,
is_train=False)
# Convert a batch to the input format
feed_dict = self._get_feed_dict(batch)
# Classify spans
predicted_tags = self.sess.run([self.predicts], feed_dict)[0]
crr_i, p_total_i = count_gold_and_system_outputs(batch["tags"],
predicted_tags,
NULL_LABEL_ID)
correct += crr_i
p_total += p_total_i
p, r, f = f_score(correct, p_total, self.n_gold_spans)
self.logger.info("-- Time: %f seconds" % (time.time() - start_time))
self.logger.info(
"-- {} set\tF:{:>7.2%} P:{:>7.2%} ({:>5}/{:>5}) R:{:>7.2%} ({:>5}/{:>5})"
.format(name, f, p, correct, p_total, r, correct, self.n_gold_spans))
return f, p, r, correct, p_total, self.n_gold_spans
def train(self):
self.logger.info(str(self.cfg))
config_path = os.path.join(self.cfg["checkpoint_path"], "config.json")
write_json(config_path, self.cfg)
batch_size = self.cfg["batch_size"]
epochs = self.cfg["epochs"]
train_path = self.cfg["train_set"]
valid_path = self.cfg["valid_set"]
self.n_gold_spans = count_gold_spans(valid_path)
if self.cfg["knn_sampling"] == "knn":
self.knn_ids = h5py.File(
os.path.join(self.cfg["raw_path"], "knn_ids.hdf5"), "r")
valid_batch_size = 1
shuffle = False
else:
valid_batch_size = batch_size
shuffle = True
valid_set = list(
self.batcher.batchnize_dataset(data=valid_path,
data_name="valid",
batch_size=valid_batch_size,
shuffle=shuffle))
best_f1 = -np.inf
init_lr = self.cfg["lr"]
self.log_trainable_variables()
self.logger.info("Start training...")
self._add_summary()
for epoch in range(1, epochs + 1):
self.logger.info('Epoch {}/{}:'.format(epoch, epochs))
train_set = self.batcher.batchnize_dataset(data=train_path,
data_name="train",
batch_size=batch_size,
shuffle=True)
_ = self.train_knn_epoch(train_set, "train")
if self.cfg["use_lr_decay"]: # learning rate decay
self.cfg["lr"] = max(init_lr / (1.0 + self.cfg["lr_decay"] * epoch),
self.cfg["minimal_lr"])
eval_metrics = self.evaluate_knn_epoch(valid_set, "valid")
cur_valid_f1 = eval_metrics[0]
if cur_valid_f1 > best_f1:
best_f1 = cur_valid_f1
self.save_session(epoch)
self.logger.info(
'-- new BEST F1 on valid set: {:>7.2%}'.format(best_f1))
self.train_writer.close()
self.test_writer.close()
def eval(self, preprocessor):
self.logger.info(str(self.cfg))
########################
# Load validation data #
########################
valid_data = preprocessor.load_dataset(
self.cfg["data_path"], keep_number=True,
lowercase=self.cfg["char_lowercase"])
valid_data = valid_data[:self.cfg["data_size"]]
dataset = preprocessor.build_dataset(valid_data,
self.word_dict,
self.char_dict,
self.tag_dict)
dataset_path = os.path.join(self.cfg["save_path"], "tmp.json")
write_json(dataset_path, dataset)
self.logger.info("Valid sentences: {:>7}".format(len(dataset)))
self.n_gold_spans = count_gold_spans(dataset_path)
######################
# Load training data #
######################
train_sents = load_json(self.cfg["train_set"])
if self.cfg["knn_sampling"] == "random":
train_sent_ids = [sent_id for sent_id in range(len(train_sents))]
else:
train_sent_ids = None
self.logger.info("Train sentences: {:>7}".format(len(train_sents)))
#############
# Main loop #
#############
correct = 0
p_total = 0
start_time = time.time()
print("PREDICTION START")
for record, data in zip(valid_data, dataset):
valid_sent_id = record["sent_id"]
if (valid_sent_id + 1) % 100 == 0:
print("%d" % (valid_sent_id + 1), flush=True, end=" ")
batch = self.make_one_batch_for_target(data, valid_sent_id)
#####################
# Sentence sampling #
#####################
batch, sampled_sent_ids = self._make_batch_and_sample_sent_ids(
batch, record, train_sents, train_sent_ids)
##############
# Prediction #
##############
feed_dict = self._get_feed_dict(batch)
batch_sims, batch_preds = self.sess.run(
[self.similarity, self.predicts], feed_dict)
crr_i, p_total_i = count_gold_and_system_outputs(
batch["tags"], batch_preds, NULL_LABEL_ID)
correct += crr_i
p_total += p_total_i
##############
# Evaluation #
##############
p, r, f = f_score(correct, p_total, self.n_gold_spans)
self.logger.info("-- Time: %f seconds" % (time.time() - start_time))
self.logger.info(
"-- F:{:>7.2%} P:{:>7.2%} ({:>5}/{:>5}) R:{:>7.2%} ({:>5}/{:>5})"
.format(f, p, correct, p_total, r, correct, self.n_gold_spans))
def save_predicted_spans(self, data_name, preprocessor):
self.logger.info(str(self.cfg))
########################
# Load validation data #
########################
valid_data = preprocessor.load_dataset(
self.cfg["data_path"], keep_number=True,
lowercase=self.cfg["char_lowercase"])
valid_data = valid_data[:self.cfg["data_size"]]
dataset = preprocessor.build_dataset(valid_data,
self.word_dict,
self.char_dict,
self.tag_dict)
dataset_path = os.path.join(self.cfg["save_path"], "tmp.json")
write_json(dataset_path, dataset)
self.logger.info("Valid sentences: {:>7}".format(len(dataset)))
######################
# Load training data #
######################
train_sents = load_json(self.cfg["train_set"])
if self.cfg["knn_sampling"] == "random":
train_sent_ids = [sent_id for sent_id in range(len(train_sents))]
else:
train_sent_ids = None
self.logger.info("Train sentences: {:>7}".format(len(train_sents)))
#############
# Main loop #
#############
start_time = time.time()
results = []
print("PREDICTION START")
for record, data in zip(valid_data, dataset):
valid_sent_id = record["sent_id"]
batch = self.make_one_batch_for_target(data, valid_sent_id,
add_tags=False)
if (valid_sent_id + 1) % 100 == 0:
print("%d" % (valid_sent_id + 1), flush=True, end=" ")
#####################
# Sentence sampling #
#####################
batch, sampled_sent_ids = self._make_batch_and_sample_sent_ids(
batch, record, train_sents, train_sent_ids)
###############
# KNN predict #
###############
feed_dict = self._get_feed_dict(batch)
batch_preds = self.sess.run([self.predicts], feed_dict)[0]
preds = batch_preds[0]
########################
# Make predicted spans #
########################
indx_i, indx_j = get_span_indices(n_words=len(record["words"]),
max_span_len=self.max_span_len)
assert len(preds) == len(indx_i) == len(indx_j)
pred_spans = [[self.rev_tag_dict[pred_label_id], int(i), int(j)]
for pred_label_id, i, j in zip(preds, indx_i, indx_j)
if pred_label_id != NULL_LABEL_ID]
##################
# Add the result #
##################
results.append({"sent_id": valid_sent_id,
"words": record["words"],
"spans": pred_spans,
"train_sent_ids": sampled_sent_ids})
path = os.path.join(self.cfg["checkpoint_path"],
"%s.predicted_spans.json" % data_name)
write_json(path, results)
self.logger.info(
"-- Time: %f seconds\nFINISHED." % (time.time() - start_time))
def save_predicted_bio_tags(self, data_name, preprocessor):
self.logger.info(str(self.cfg))
########################
# Load validation data #
########################
valid_data = preprocessor.load_dataset(
self.cfg["data_path"], keep_number=True,
lowercase=self.cfg["char_lowercase"])
valid_data = valid_data[:self.cfg["data_size"]]
dataset = preprocessor.build_dataset(valid_data,
self.word_dict,
self.char_dict,
self.tag_dict)
dataset_path = os.path.join(self.cfg["save_path"], "tmp.json")
write_json(dataset_path, dataset)
self.logger.info("Valid sentences: {:>7}".format(len(dataset)))
######################
# Load training data #
######################
train_sents = load_json(self.cfg["train_set"])
if self.cfg["knn_sampling"] == "random":
train_sent_ids = [sent_id for sent_id in range(len(train_sents))]
else:
train_sent_ids = None
self.logger.info("Train sentences: {:>7}".format(len(train_sents)))
#############
# Main loop #
#############
start_time = time.time()
path = os.path.join(self.cfg["checkpoint_path"], "%s.bio.txt" % data_name)
fout_txt = open(path, "w")
print("PREDICTION START")
for record, data in zip(valid_data, dataset):
valid_sent_id = record["sent_id"]
batch = self.make_one_batch_for_target(data, valid_sent_id,
add_tags=False)
if (valid_sent_id + 1) % 100 == 0:
print("%d" % (valid_sent_id + 1), flush=True, end=" ")
#####################
# Sentence sampling #
#####################
batch, sampled_sent_ids = self._make_batch_and_sample_sent_ids(
batch, record, train_sents, train_sent_ids)
###############
# KNN predict #
###############
feed_dict = self._get_feed_dict(batch)
proba = self.sess.run([self.marginal_proba], feed_dict)[0][0]
########################
# Make predicted spans #
########################
words = record["words"]
triples = greedy_search(proba,
n_words=len(words),
max_span_len=self.max_span_len,
null_label_id=NULL_LABEL_ID)
pred_bio_tags = span2bio(spans=triples,
n_words=len(words),
tag_dict=self.rev_tag_dict)
gold_bio_tags = span2bio(spans=record["tags"],
n_words=len(words))
assert len(words) == len(pred_bio_tags) == len(gold_bio_tags)
####################
# Write the result #
####################
for word, gold_tag, pred_tag in zip(words, gold_bio_tags, pred_bio_tags):
fout_txt.write("%s _ %s %s\n" % (word, gold_tag, pred_tag))
fout_txt.write("\n")
self.logger.info(
"-- Time: %f seconds\nFINISHED." % (time.time() - start_time))
def save_nearest_spans(self, data_name, preprocessor, print_knn):
self.logger.info(str(self.cfg))
########################
# Load validation data #
########################
valid_data = preprocessor.load_dataset(
self.cfg["data_path"], keep_number=True,
lowercase=self.cfg["char_lowercase"])
valid_data = valid_data[:self.cfg["data_size"]]
dataset = preprocessor.build_dataset(valid_data,
self.word_dict,
self.char_dict,
self.tag_dict)
dataset_path = os.path.join(self.cfg["save_path"], "tmp.json")
write_json(dataset_path, dataset)
self.logger.info("Valid sentences: {:>7}".format(len(dataset)))
self.n_gold_spans = count_gold_spans(dataset_path)
######################
# Load training data #
######################
train_sents = load_json(self.cfg["train_set"])
if self.cfg["knn_sampling"] == "random":
train_sent_ids = [sent_id for sent_id in range(len(train_sents))]
else:
train_sent_ids = None
train_data = preprocessor.load_dataset(
os.path.join(self.cfg["raw_path"], "train.json"),
keep_number=True, lowercase=False)
self.logger.info("Train sentences: {:>7}".format(len(train_sents)))
#############
# Main loop #
#############
correct = 0
p_total = 0
start_time = time.time()
file_path = os.path.join(self.cfg["checkpoint_path"],
"%s.nearest_spans.txt" % data_name)
fout_txt = open(file_path, "w")
print("PREDICTION START")
for record, data in zip(valid_data, dataset):
valid_sent_id = record["sent_id"]
batch = self.make_one_batch_for_target(data, valid_sent_id)
if (valid_sent_id + 1) % 100 == 0:
print("%d" % (valid_sent_id + 1), flush=True, end=" ")
#####################
# Sentence sampling #
#####################
batch, sampled_sent_ids = self._make_batch_and_sample_sent_ids(
batch, record, train_sents, train_sent_ids)
##############
# Prediction #
##############
feed_dict = self._get_feed_dict(batch)
batch_sims, batch_preds = self.sess.run(
[self.similarity, self.predicts], feed_dict)
crr_i, p_total_i = count_gold_and_system_outputs(
batch["tags"], batch_preds, NULL_LABEL_ID)
correct += crr_i
p_total += p_total_i
####################
# Write the result #
####################
self._write_predictions(fout_txt, record)
self._write_nearest_spans(
fout_txt, record, train_data, sampled_sent_ids, batch_sims,
batch_preds, print_knn)
fout_txt.close()
p, r, f = f_score(correct, p_total, self.n_gold_spans)
self.logger.info("-- Time: %f seconds" % (time.time() - start_time))
self.logger.info(
"-- {} set\tF:{:>7.2%} P:{:>7.2%} ({:>5}/{:>5}) R:{:>7.2%} ({:>5}/{:>5})"
.format(data_name, f, p, correct, p_total, r, correct,
self.n_gold_spans))
@staticmethod
def _write_predictions(fout_txt, record):
fout_txt.write("-SENT:%d || %s || %s\n" % (
record["sent_id"],
" ".join(record["words"]),
" ".join(["(%s,%d,%d)" % (r, i, j) for (r, i, j) in record["tags"]])))
def _write_nearest_spans(self, fout_txt, record, train_data,
sampled_sent_ids, batch_sims, batch_preds,
print_knn):
def _write_train_sents(_sampled_train_sents):
for _train_record in _sampled_train_sents:
fout_txt.write("--kNN:%d || %s || %s\n" % (
_train_record["sent_id"],
" ".join(_train_record["words"]),
" ".join(["(%s,%d,%d)" % (r, i, j)
for (r, i, j) in _train_record["tags"]])))
def _write_gold_and_pred_spans(_record, _pred_label_id, _span_boundaries):
if (i, j) in _span_boundaries:
_index = _span_boundaries.index((i, j))
gold_label = _record["tags"][_index][0]
else:
gold_label = "O"
pred_label = self.rev_tag_dict[_pred_label_id]
fout_txt.write("##(%d,%d) || %s || %s || %s\n" % (
i, j, " ".join(record["words"][i: j + 1]), pred_label, gold_label))
def _get_nearest_spans(_sampled_train_sents):
_nearest_spans = []
_prev_indx = 0
_temp_indx = 0
for _record in _sampled_train_sents:
_indx_i, _indx_j = get_span_indices(n_words=len(_record["words"]),
max_span_len=self.max_span_len)
_temp_indx += len(_indx_i)
_temp_scores = scores[_prev_indx: _temp_indx]
assert len(_temp_scores) == len(_indx_i) == len(_indx_j)
_nearest_spans.extend(
get_scores_and_spans(spans=_record["tags"],
scores=_temp_scores,
sent_id=_record["sent_id"],
indx_i=_indx_i,
indx_j=_indx_j))
_prev_indx = _temp_indx
return _nearest_spans
def _write_nearest_spans_for_each_span(_sampled_train_sents):
nearest_spans = _get_nearest_spans(_sampled_train_sents)
nearest_spans.sort(key=lambda span: span[-1], reverse=True)
for rank, (r, sent_id, i, j, score) in enumerate(nearest_spans[:10]):
mention = " ".join(train_data[sent_id]["words"][i: j + 1])
text = "{} || {} || sent:{} || ({},{}) || {:.3g}".format(
r, mention, sent_id, i, j, score)
fout_txt.write("####RANK:%d %s\n" % (rank, text))
sampled_train_sents = [train_data[sent_id]
for sent_id in sampled_sent_ids]
if print_knn:
_write_train_sents(sampled_train_sents)
sims = batch_sims[0] # 1D: n_spans, 2D: n_instances
preds = batch_preds[0] # 1D: n_spans
indx_i, indx_j = get_span_indices(n_words=len(record["words"]),
max_span_len=self.max_span_len)
span_boundaries = [(i, j) for _, i, j in record["tags"]]
assert len(sims) == len(preds) == len(indx_i) == len(indx_j)
for scores, pred_label_id, i, j in zip(sims, preds, indx_i, indx_j):
if pred_label_id == NULL_LABEL_ID and (i, j) not in span_boundaries:
continue
_write_gold_and_pred_spans(record, pred_label_id, span_boundaries)
_write_nearest_spans_for_each_span(sampled_train_sents)
fout_txt.write("\n")
def save_span_representation(self, data_name, preprocessor):
self.logger.info(str(self.cfg))
########################
# Load validation data #
########################
valid_data = preprocessor.load_dataset(
self.cfg["data_path"], keep_number=True,
lowercase=self.cfg["char_lowercase"])
valid_data = valid_data[:self.cfg["data_size"]]
dataset = preprocessor.build_dataset(valid_data,
self.word_dict,
self.char_dict,
self.tag_dict)
self.logger.info("Valid sentences: {:>7}".format(len(dataset)))
#############
# Main loop #
#############
start_time = time.time()
gold_labels = {}
fout_path = os.path.join(self.cfg["checkpoint_path"],
"%s.span_reps.hdf5" % data_name)
fout = h5py.File(fout_path, 'w')
print("PREDICTION START")
for record, data in zip(valid_data, dataset):
valid_sent_id = record["sent_id"]
batch = self.make_one_batch_for_target(data, valid_sent_id)
if (valid_sent_id + 1) % 100 == 0:
print("%d" % (valid_sent_id + 1), flush=True, end=" ")
##############
# Prediction #
##############
feed_dict = self._get_feed_dict(batch)
span_reps = self.sess.run([self.span_rep], feed_dict)[0][0]
span_tags = batch["tags"][0]
assert len(span_reps) == len(span_tags)
##################
# Add the result #
##################
fout.create_dataset(
name='{}'.format(valid_sent_id),
dtype='float32',
data=span_reps)
gold_labels[valid_sent_id] = [self.rev_tag_dict[int(tag)]
for tag in span_tags]
fout.close()
path = os.path.join(self.cfg["checkpoint_path"],
"%s.gold_labels.json" % data_name)
write_json(path, gold_labels)
self.logger.info(
"-- Time: %f seconds\nFINISHED." % (time.time() - start_time))
def predict_on_command_line(self, preprocessor):
def _load_glove(glove_path):
vocab = {}
vectors = []
total = int(4e5)
with codecs.open(glove_path, mode='r', encoding='utf-8') as f:
for line in tqdm(f, total=total, desc="Load glove"):
line = line.lstrip().rstrip().split(" ")
vocab[line[0]] = len(vocab)
vectors.append([float(x) for x in line[1:]])
assert len(vocab) == len(vectors)
return vocab, np.asarray(vectors)
def _mean_vectors(sents, emb, vocab):
unk_vec = np.zeros(emb.shape[1])
mean_vecs = []
for words in sents:
vecs = []
for word in words:
word = word.lower()
if word in vocab:
vec = emb[vocab[word]]
else:
vec = unk_vec
vecs.append(vec)
mean_vecs.append(np.mean(vecs, axis=0))
return mean_vecs
def _cosine_sim(p0, p1):
d = (norm(p0) * norm(p1))
if d > 0:
return np.dot(p0, p1) / d
return 0.0
def _setup_repository(_train_sents, _train_data=None):
if self.cfg["knn_sampling"] == "random":
_train_sent_ids = [_sent_id for _sent_id in range(len(_train_sents))]
_vocab = _glove = _train_embs = None
else:
_train_sent_ids = None
_vocab, _glove = _load_glove("data/emb/glove.6B.100d.txt")
_train_words = [[w.lower() for w in _train_record["words"]]
for _train_record in _train_data]
_train_embs = _mean_vectors(_train_words, _glove, _vocab)
return _train_sent_ids, _train_embs, _vocab, _glove
def _make_ids(_words):
_char_ids = []
_word_ids = []
for word in _words:
_char_ids.append([self.char_dict[char]
if char in self.char_dict else self.char_dict[UNK]
for char in word])
word = word_convert(word, keep_number=False, lowercase=True)
_word_ids.append(self.word_dict[word]
if word in self.word_dict else self.word_dict[UNK])
return _char_ids, _word_ids
def _retrieve_knn_train_sents(_record, _train_embs, _vocab, _glove):
test_words = [w.lower() for w in _record["words"]]
test_emb = _mean_vectors([test_words], _glove, _vocab)[0]
sim = [_cosine_sim(train_emb, test_emb) for train_emb in _train_embs]
arg_sort =
|
np.argsort(sim)
|
numpy.argsort
|
# (c) Facebook, Inc. and its affiliates. Confidential and proprietary.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
# miscellaneous
import builtins
import datetime
import socket
import json
import sys
import time
import socket
# onnx
# The onnx import causes deprecation warnings every time workers
# are spawned during testing. So, we filter out those warnings.
import warnings
import dlrm_data_pytorch as dp
# For distributed run
import extend_distributed as ext_dist
import mlperf_logger
# numpy
import numpy as np
import sklearn.metrics
# grpc
from benchmark import grpc_client, grpc_server
# pytorch
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
from torch._ops import ops
from torch.autograd.profiler import record_function
from torch.nn.parallel.parallel_apply import parallel_apply
from torch.nn.parallel.replicate import replicate
from torch.nn.parallel.scatter_gather import gather, scatter
from torch.nn.parameter import Parameter
from torch.optim.lr_scheduler import _LRScheduler
from torch.utils.tensorboard import SummaryWriter
# # mixed-dimension trick
# from tricks.md_embedding_bag import PrEmbeddingBag, md_solver
# # quotient-remainder trick
# from tricks.qr_embedding_bag import QREmbeddingBag
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
try:
import onnx
except ImportError as error:
print("Unable to import onnx. ", error)
# from torchviz import make_dot
# import torch.nn.functional as Functional
# from torch.nn.parameter import Parameter
exc = getattr(builtins, "IOError", "FileNotFoundError")
def time_wrap(use_gpu):
use_gpu = True
if use_gpu:
torch.cuda.synchronize(torch.cuda.current_device())
return time.time()
def dlrm_wrap(X, lS_o, lS_i, use_gpu, device, ndevices=1):
with record_function("DLRM forward"):
if use_gpu: # .cuda()
# lS_i can be either a list of tensors or a stacked tensor.
# Handle each case below:
if ndevices == 1:
lS_i = (
[S_i.to(device) for S_i in lS_i]
if isinstance(lS_i, list)
else lS_i.to(device)
)
lS_o = (
[S_o.to(device) for S_o in lS_o]
if isinstance(lS_o, list)
else lS_o.to(device)
)
return dlrm(X.to(device), lS_o, lS_i)
def profile_hook(state_dict, bucket):
rank = dist.get_rank()
tensor = bucket.get_tensor()
metrics = {}
state_dict[bucket.get_index()] = metrics
# record event before comm
e_bfr = torch.cuda.Event(enable_timing=True)
metrics["e_bfr"] = e_bfr
with torch.cuda.device(rank):
e_bfr.record()
# launch async comm
fut = dist.all_reduce(tensor, async_op=True).get_future()
def cb(fut):
# record event after comm
e_aft = torch.cuda.Event(enable_timing=True)
metrics["e_aft"] = e_aft
with torch.cuda.device(rank):
e_aft.record()
fut.then(cb)
return fut
def loss_fn_wrap(Z, T, use_gpu, device):
with record_function("DLRM loss compute"):
if True or args.loss_function == "mse" or args.loss_function == "bce":
return dlrm.loss_fn(Z, T.to(device))
elif args.loss_function == "wbce":
loss_ws_ = dlrm.loss_ws[T.data.view(-1).long()].view_as(T).to(device)
loss_fn_ = dlrm.loss_fn(Z, T.to(device))
loss_sc_ = loss_ws_ * loss_fn_
return loss_sc_.mean()
# The following function is a wrapper to avoid checking this multiple times in th
# loop below.
def unpack_batch(b):
# Experiment with unweighted samples
return b[0], b[1], b[2], b[3], torch.ones(b[3].size()), None
class LRPolicyScheduler(_LRScheduler):
def __init__(self, optimizer, num_warmup_steps, decay_start_step, num_decay_steps):
self.num_warmup_steps = num_warmup_steps
self.decay_start_step = decay_start_step
self.decay_end_step = decay_start_step + num_decay_steps
self.num_decay_steps = num_decay_steps
if self.decay_start_step < self.num_warmup_steps:
sys.exit("Learning rate warmup must finish before the decay starts")
super(LRPolicyScheduler, self).__init__(optimizer)
def get_lr(self):
step_count = self._step_count
if step_count < self.num_warmup_steps:
# warmup
scale = 1.0 - (self.num_warmup_steps - step_count) / self.num_warmup_steps
lr = [base_lr * scale for base_lr in self.base_lrs]
self.last_lr = lr
elif self.decay_start_step <= step_count and step_count < self.decay_end_step:
# decay
decayed_steps = step_count - self.decay_start_step
scale = ((self.num_decay_steps - decayed_steps) / self.num_decay_steps) ** 2
min_lr = 0.0000001
lr = [max(min_lr, base_lr * scale) for base_lr in self.base_lrs]
self.last_lr = lr
else:
if self.num_decay_steps > 0:
# freeze at last, either because we're after decay
# or because we're between warmup and decay
lr = self.last_lr
else:
# do not adjust
lr = self.base_lrs
return lr
### define dlrm in PyTorch ###
class DLRM_Net(nn.Module):
def create_mlp(self, ln, sigmoid_layer):
# build MLP layer by layer
layers = nn.ModuleList()
for i in range(0, ln.size - 1):
n = ln[i]
m = ln[i + 1]
# construct fully connected operator
print(f"Creating linear with {int(n)} {int(m)}")
LL = nn.Linear(int(n), int(m), bias=True)
# initialize the weights
# with torch.no_grad():
# custom Xavier input, output or two-sided fill
mean = 0.0 # std_dev = np.sqrt(variance)
std_dev = np.sqrt(2 / (m + n)) # np.sqrt(1 / m) # np.sqrt(1 / n)
W = np.random.normal(mean, std_dev, size=(m, n)).astype(np.float32)
std_dev = np.sqrt(1 / m) # np.sqrt(2 / (m + 1))
bt = np.random.normal(mean, std_dev, size=m).astype(np.float32)
# approach 1
LL.weight.data = torch.tensor(W, requires_grad=True)
LL.bias.data = torch.tensor(bt, requires_grad=True)
# approach 2
# LL.weight.data.copy_(torch.tensor(W))
# LL.bias.data.copy_(torch.tensor(bt))
# approach 3
# LL.weight = Parameter(torch.tensor(W),requires_grad=True)
# LL.bias = Parameter(torch.tensor(bt),requires_grad=True)
layers.append(LL)
# construct sigmoid or relu operator
if i == sigmoid_layer:
layers.append(nn.Sigmoid())
else:
layers.append(nn.ReLU())
# approach 1: use ModuleList
# return layers
# approach 2: use Sequential container to wrap all layers
print(f"Created {len(layers)} layers")
return torch.nn.Sequential(*layers)
def create_emb(self, m, ln, weighted_pooling=None):
emb_l = nn.ModuleList()
v_W_l = []
for i in range(0, ln.size):
# NOTE: sharded embedding removed, all ranks will use
# local embedding table.
# if ext_dist.my_size > 1:
# if i not in self.local_emb_indices:
# continue
n = ln[i]
# construct embedding operator
if self.qr_flag and n > self.qr_threshold:
EE = QREmbeddingBag(
n,
m,
self.qr_collisions,
operation=self.qr_operation,
mode="sum",
sparse=True,
)
elif self.md_flag and n > self.md_threshold:
_m = m[i]
base = max(m)
EE = PrEmbeddingBag(n, _m, base)
# use np initialization as below for consistency...
W = np.random.uniform(
low=-np.sqrt(1 / n), high=np.sqrt(1 / n), size=(n, _m)
).astype(np.float32)
EE.embs.weight.data = torch.tensor(W, requires_grad=True)
else:
EE = nn.EmbeddingBag(n, m, mode="sum", sparse=True).cuda(
dist.get_rank() % torch.cuda.device_count()
)
# initialize embeddings
# nn.init.uniform_(EE.weight, a=-np.sqrt(1 / n), b=np.sqrt(1 / n))
W = np.random.uniform(
low=-np.sqrt(1 / n), high=np.sqrt(1 / n), size=(n, m)
).astype(np.float32)
# approach 1
EE.weight.data = torch.tensor(
W,
requires_grad=True,
device=dist.get_rank() % torch.cuda.device_count(),
)
# approach 2
# EE.weight.data.copy_(torch.tensor(W))
# approach 3
# EE.weight = Parameter(torch.tensor(W),requires_grad=True)
if weighted_pooling is None:
v_W_l.append(None)
else:
v_W_l.append(torch.ones(n, dtype=torch.float32))
# DDP for embedding
ranks = [i for i in range(dist.get_world_size())]
gloo_pg = dist.new_group(ranks, backend="gloo")
dev_id = dist.get_rank() % torch.cuda.device_count()
print(f"Using dev_id {dev_id} for rank {dist.get_rank()}")
ee_params = {p.device for p in EE.parameters()}
print(f"ee_params {ee_params}")
if not self.use_grpc:
# Only do DDP for embeddings when not using grpc.
EE = ext_dist.DDP(EE, device_ids=[dev_id], process_group=gloo_pg)
EE._set_ddp_runtime_logging_sample_rate(1)
# self.state_dicts[i] = {}
# EE.register_comm_hook(self.state_dicts[i], profile_hook)
# if not self.sparse_state_dict:
# self.sparse_state_dict = {}
# self.sparse_state_dict
print(f"Type of EE is {type(EE)}")
emb_l.append(EE)
output = [None for _ in range(dist.get_world_size())]
if self.use_grpc:
if dist.get_rank() == 0:
print(" -- creating embedding on server --")
embs = []
for e in emb_l:
embs.append(e.module if isinstance(e, torch.nn.parallel.DistributedDataParallel) else e)
print(f"embs {embs}, {set(type(c) for c in embs)}")
self.client.create_dlrm_embedding(name="create_dlrm_embedding", size=ln.size, n=ln[0],m=m,cuda=True)
print("-- created embedding!! --")
dist.all_gather_object(output, {1:2}, group=None)
return emb_l, v_W_l
def __init__(
self,
args,
m_spa=None,
ln_emb=None,
ln_bot=None,
ln_top=None,
arch_interaction_op=None,
arch_interaction_itself=False,
sigmoid_bot=-1,
sigmoid_top=-1,
sync_dense_params=True,
loss_threshold=0.0,
ndevices=-1,
qr_flag=False,
qr_operation="mult",
qr_collisions=0,
qr_threshold=200,
md_flag=False,
md_threshold=200,
weighted_pooling=None,
):
super(DLRM_Net, self).__init__()
if (
(m_spa is not None)
and (ln_emb is not None)
and (ln_bot is not None)
and (ln_top is not None)
and (arch_interaction_op is not None)
):
self.use_grpc = args.grpc
self.state_dicts = {}
# create grpc client if needed
if self.use_grpc:
print("Creating grpc client")
maddr = os.environ["GRPC_MASTER_ADDR"]
mport = os.environ["GRPC_MASTER_PORT"]
client = grpc_client.Client(f"{maddr}:{mport}")
self.client = client
ext_dist.print_all("Created client!")
# Create embedding
num_tries = 0
sleep_time = 2
max_num_tries = 8
conn_to_server = False
while not conn_to_server and num_tries < max_num_tries:
num_tries += 1
try:
client.create_embedding(name="create_embedding", tensor=torch.tensor([0]))
conn_to_server = True
break
except:
time.sleep(sleep_time)
sleep_time *= 2
if not conn_to_server:
raise ValueError(f"Client failed to connect to grpc master after {num_tries} tries!")
ext_dist.print_all("Created embedding!")
idx = torch.tensor([1])
cpu_tensors = client.embedding_lookup(name="embedding", tensor=idx, cuda=False)
ext_dist.print_all(f"Client on {socket.gethostname()} got {cpu_tensors}")
# self.client.terminate()
# save arguments
self.ndevices = ndevices
self.output_d = 0
self.parallel_model_batch_size = -1
self.parallel_model_is_not_prepared = True
self.arch_interaction_op = arch_interaction_op
self.arch_interaction_itself = arch_interaction_itself
self.sync_dense_params = sync_dense_params
self.loss_threshold = loss_threshold
if weighted_pooling is not None and weighted_pooling != "fixed":
self.weighted_pooling = "learned"
else:
self.weighted_pooling = weighted_pooling
# create variables for QR embedding if applicable
self.qr_flag = qr_flag
if self.qr_flag:
self.qr_collisions = qr_collisions
self.qr_operation = qr_operation
self.qr_threshold = qr_threshold
# create variables for MD embedding if applicable
self.md_flag = md_flag
if self.md_flag:
self.md_threshold = md_threshold
# If running distributed, get local slice of embedding tables
# Note: Sharded embedding removed, all ranks will use local embedding.
# if ext_dist.my_size > 1:
# n_emb = len(ln_emb)
# self.n_global_emb = n_emb
# self.n_local_emb, self.n_emb_per_rank = ext_dist.get_split_lengths(
# n_emb
# )
# self.local_emb_slice = ext_dist.get_my_slice(n_emb)
# self.local_emb_indices = list(range(n_emb))[self.local_emb_slice]
# create operators
if ndevices <= 1:
print("--- calling create emb in DLRM __init__")
self.emb_l, w_list = self.create_emb(m_spa, ln_emb, weighted_pooling)
if self.weighted_pooling == "learned":
self.v_W_l = nn.ParameterList()
for w in w_list:
self.v_W_l.append(Parameter(w))
else:
self.v_W_l = w_list
self.bot_l = self.create_mlp(ln_bot, sigmoid_bot)
self.top_l = self.create_mlp(ln_top, sigmoid_top)
# quantization
self.quantize_emb = False
self.emb_l_q = []
self.quantize_bits = 32
# specify the loss function
if args.loss_function == "mse":
self.loss_fn = torch.nn.MSELoss(reduction="mean")
elif args.loss_function == "bce":
self.loss_fn = torch.nn.BCELoss(reduction="mean")
elif args.loss_function == "wbce":
self.loss_ws = torch.tensor(
np.fromstring(args.loss_weights, dtype=float, sep="-")
)
self.loss_fn = torch.nn.BCELoss(reduction="none")
else:
sys.exit(
"ERROR: --loss-function=" + args.loss_function + " is not supported"
)
def apply_mlp(self, x, layers):
# approach 1: use ModuleList
# for layer in layers:
# x = layer(x)
# return x
# approach 2: use Sequential container to wrap all layers
return layers(x)
def apply_emb(self, lS_o, lS_i, emb_l, v_W_l):
# WARNING: notice that we are processing the batch at once. We implicitly
# assume that the data is laid out such that:
# 1. each embedding is indexed with a group of sparse indices,
# corresponding to a single lookup
# 2. for each embedding the lookups are further organized into a batch
# 3. for a list of embedding tables there is a list of batched lookups
self.Vs = [None for _ in range(len(lS_i))]
ly = []
for k, sparse_index_group_batch in enumerate(lS_i):
sparse_offset_group_batch = lS_o[k]
# embedding lookup
# We are using EmbeddingBag, which implicitly uses sum operator.
# The embeddings are represented as tall matrices, with sum
# happening vertically across 0 axis, resulting in a row vector
# E = emb_l[k]
if v_W_l[k] is not None:
per_sample_weights = v_W_l[k].gather(0, sparse_index_group_batch)
else:
per_sample_weights = None
if self.quantize_emb:
s1 = self.emb_l_q[k].element_size() * self.emb_l_q[k].nelement()
s2 = self.emb_l_q[k].element_size() * self.emb_l_q[k].nelement()
print("quantized emb sizes:", s1, s2)
if self.quantize_bits == 4:
QV = ops.quantized.embedding_bag_4bit_rowwise_offsets(
self.emb_l_q[k],
sparse_index_group_batch,
sparse_offset_group_batch,
per_sample_weights=per_sample_weights,
)
elif self.quantize_bits == 8:
QV = ops.quantized.embedding_bag_byte_rowwise_offsets(
self.emb_l_q[k],
sparse_index_group_batch,
sparse_offset_group_batch,
per_sample_weights=per_sample_weights,
)
ly.append(QV)
else:
# E = emb_l[k]
# # print(" --- EMBEDDING LOOK UP HERE f{type(sparse_index_grou)} ---")
# V = E(
# sparse_index_group_batch,
# sparse_offset_group_batch,
# per_sample_weights=per_sample_weights,
# )
if self.use_grpc:
# ext_dist.print_all("Trying grpc embedding lookup")
futs = self.client.dlrm_embedding_lookup_async(
name="dlrm_embedding_lookup_async",
k=k,
sparse_index_group_batch=sparse_index_group_batch,
sparse_offset_group_batch=sparse_offset_group_batch,
per_sample_weights=per_sample_weights,
cuda=False,
)
tens = self.client.wait_all_futs(futs, cuda=False)
ret_tensors = []
for t in tens:
ret_tensors.append(t.to(torch.cuda.current_device()))
V = ret_tensors[0]
# V = futs[0]
# Save embedding shapes
try:
self.Vs[k] = emb_l[k].module.weight.shape
except:
self.Vs[k] = emb_l[k].weight.shape
# ext_dist.print_all(f"Client got back embedding {ret_tensors} compared to local {V}")
else:
E = emb_l[k]
V = E(
sparse_index_group_batch,
sparse_offset_group_batch,
per_sample_weights=per_sample_weights,
)
ly.append(V)
# print(ly)
return ly
# using quantizing functions from caffe2/aten/src/ATen/native/quantized/cpu
def quantize_embedding(self, bits):
n = len(self.emb_l)
self.emb_l_q = [None] * n
for k in range(n):
if bits == 4:
self.emb_l_q[k] = ops.quantized.embedding_bag_4bit_prepack(
self.emb_l[k].weight
)
elif bits == 8:
self.emb_l_q[k] = ops.quantized.embedding_bag_byte_prepack(
self.emb_l[k].weight
)
else:
return
self.emb_l = None
self.quantize_emb = True
self.quantize_bits = bits
def interact_features(self, x, ly):
if self.arch_interaction_op == "dot":
# concatenate dense and sparse features
(batch_size, d) = x.shape
T = torch.cat([x] + ly, dim=1).view((batch_size, -1, d))
# perform a dot product
Z = torch.bmm(T, torch.transpose(T, 1, 2))
# append dense feature with the interactions (into a row vector)
# approach 1: all
# Zflat = Z.view((batch_size, -1))
# approach 2: unique
_, ni, nj = Z.shape
# approach 1: tril_indices
# offset = 0 if self.arch_interaction_itself else -1
# li, lj = torch.tril_indices(ni, nj, offset=offset)
# approach 2: custom
offset = 1 if self.arch_interaction_itself else 0
li = torch.tensor([i for i in range(ni) for j in range(i + offset)])
lj = torch.tensor([j for i in range(nj) for j in range(i + offset)])
Zflat = Z[:, li, lj]
# concatenate dense features and interactions
R = torch.cat([x] + [Zflat], dim=1)
elif self.arch_interaction_op == "cat":
# concatenation features (into a row vector)
R = torch.cat([x] + ly, dim=1)
else:
sys.exit(
"ERROR: --arch-interaction-op="
+ self.arch_interaction_op
+ " is not supported"
)
return R
def forward(self, dense_x, lS_o, lS_i):
e_before = torch.cuda.Event(enable_timing=True)
with torch.cuda.device(torch.cuda.current_device()):
e_before.record()
# Launch forward
fwd_return = self.sequential_forward(dense_x, lS_o, lS_i)
e_after = torch.cuda.Event(enable_timing=True)
with torch.cuda.device(torch.cuda.current_device()):
e_after.record()
# torch.cuda.synchronize(torch.cuda.current_device())
e_before.synchronize()
e_after.synchronize()
elapsed_time_ms = e_before.elapsed_time(e_after)
print(f"Forward {elapsed_time_ms} ms")
return fwd_return
if ext_dist.my_size > 1:
# multi-node multi-device run
return self.distributed_forward(dense_x, lS_o, lS_i)
elif self.ndevices <= 1:
# single device run
return self.sequential_forward(dense_x, lS_o, lS_i)
else:
# single-node multi-device run
return self.parallel_forward(dense_x, lS_o, lS_i)
def distributed_forward(self, dense_x, lS_o, lS_i):
batch_size = dense_x.size()[0]
# WARNING: # of ranks must be <= batch size in distributed_forward call
if batch_size < ext_dist.my_size:
sys.exit(
"ERROR: batch_size (%d) must be larger than number of ranks (%d)"
% (batch_size, ext_dist.my_size)
)
if batch_size % ext_dist.my_size != 0:
sys.exit(
"ERROR: batch_size %d can not split across %d ranks evenly"
% (batch_size, ext_dist.my_size)
)
dense_x = dense_x[ext_dist.get_my_slice(batch_size)]
lS_o = lS_o[self.local_emb_slice]
lS_i = lS_i[self.local_emb_slice]
if (len(self.emb_l) != len(lS_o)) or (len(self.emb_l) != len(lS_i)):
sys.exit(
"ERROR: corrupted model input detected in distributed_forward call"
)
# embeddings
with record_function("DLRM embedding forward"):
ly = self.apply_emb(lS_o, lS_i, self.emb_l, self.v_W_l)
# WARNING: Note that at this point we have the result of the embedding lookup
# for the entire batch on each rank. We would like to obtain partial results
# corresponding to all embedding lookups, but part of the batch on each rank.
# Therefore, matching the distribution of output of bottom mlp, so that both
# could be used for subsequent interactions on each device.
if len(self.emb_l) != len(ly):
sys.exit("ERROR: corrupted intermediate result in distributed_forward call")
a2a_req = ext_dist.alltoall(ly, self.n_emb_per_rank)
with record_function("DLRM bottom nlp forward"):
x = self.apply_mlp(dense_x, self.bot_l)
ly = a2a_req.wait()
ly = list(ly)
# interactions
with record_function("DLRM interaction forward"):
z = self.interact_features(x, ly)
# top mlp
with record_function("DLRM top nlp forward"):
p = self.apply_mlp(z, self.top_l)
# clamp output if needed
if 0.0 < self.loss_threshold and self.loss_threshold < 1.0:
z = torch.clamp(p, min=self.loss_threshold, max=(1.0 - self.loss_threshold))
else:
z = p
return z
def sequential_forward(self, dense_x, lS_o, lS_i):
# process dense features (using bottom mlp), resulting in a row vector
# process sparse features(using embeddings), resulting in a list of row vectors
ly = self.apply_emb(lS_o, lS_i, self.emb_l, self.v_W_l)
x = self.apply_mlp(dense_x, self.bot_l)
# debug prints
# print("intermediate")
# print(x.detach().cpu().numpy())
# for y in ly:
# print(y.detach().cpu().numpy())
# interact features (dense and sparse)
# Wait on async grpc embedding lookup
# if self.use_grpc:
# futs = ly
# fwd_comm_start = time.time()
# cpu_tensors = self.client.wait_all_futs(futs, cuda=False)
# fwd_comm_end = time.time()
# print(f'Fwd comm: {(fwd_comm_end - fwd_comm_start) / 1000} ms')
# tmp = cpu_tensors
# for i in range(len(tmp)):
# ly[i] = tmp[i].to(torch.cuda.current_device())
z = self.interact_features(x, ly)
# print(z.detach().cpu().numpy())
# obtain probability of a click (using top mlp)
p = self.apply_mlp(z, self.top_l)
# clamp output if needed
if 0.0 < self.loss_threshold and self.loss_threshold < 1.0:
z = torch.clamp(p, min=self.loss_threshold, max=(1.0 - self.loss_threshold))
else:
z = p
return z
def parallel_forward(self, dense_x, lS_o, lS_i):
### prepare model (overwrite) ###
# WARNING: # of devices must be >= batch size in parallel_forward call
batch_size = dense_x.size()[0]
ndevices = min(self.ndevices, batch_size, len(self.emb_l))
device_ids = range(ndevices)
# WARNING: must redistribute the model if mini-batch size changes(this is common
# for last mini-batch, when # of elements in the dataset/batch size is not even
if self.parallel_model_batch_size != batch_size:
self.parallel_model_is_not_prepared = True
if self.parallel_model_is_not_prepared or self.sync_dense_params:
# replicate mlp (data parallelism)
self.bot_l_replicas = replicate(self.bot_l, device_ids)
self.top_l_replicas = replicate(self.top_l, device_ids)
self.parallel_model_batch_size = batch_size
if self.parallel_model_is_not_prepared:
# distribute embeddings (model parallelism)
t_list = []
w_list = []
for k, emb in enumerate(self.emb_l):
d = torch.device("cuda:" + str(k % ndevices))
t_list.append(emb.to(d))
if self.weighted_pooling == "learned":
w_list.append(Parameter(self.v_W_l[k].to(d)))
elif self.weighted_pooling == "fixed":
w_list.append(self.v_W_l[k].to(d))
else:
w_list.append(None)
self.emb_l = nn.ModuleList(t_list)
if self.weighted_pooling == "learned":
self.v_W_l = nn.ParameterList(w_list)
else:
self.v_W_l = w_list
self.parallel_model_is_not_prepared = False
### prepare input (overwrite) ###
# scatter dense features (data parallelism)
# print(dense_x.device)
dense_x = scatter(dense_x, device_ids, dim=0)
# distribute sparse features (model parallelism)
if (len(self.emb_l) != len(lS_o)) or (len(self.emb_l) != len(lS_i)):
sys.exit("ERROR: corrupted model input detected in parallel_forward call")
t_list = []
i_list = []
for k, _ in enumerate(self.emb_l):
d = torch.device("cuda:" + str(k % ndevices))
t_list.append(lS_o[k].to(d))
i_list.append(lS_i[k].to(d))
lS_o = t_list
lS_i = i_list
### compute results in parallel ###
# bottom mlp
# WARNING: Note that the self.bot_l is a list of bottom mlp modules
# that have been replicated across devices, while dense_x is a tuple of dense
# inputs that has been scattered across devices on the first (batch) dimension.
# The output is a list of tensors scattered across devices according to the
# distribution of dense_x.
x = parallel_apply(self.bot_l_replicas, dense_x, None, device_ids)
# debug prints
# print(x)
# embeddings
ly = self.apply_emb(lS_o, lS_i, self.emb_l, self.v_W_l)
# debug prints
# print(ly)
# butterfly shuffle (implemented inefficiently for now)
# WARNING: Note that at this point we have the result of the embedding lookup
# for the entire batch on each device. We would like to obtain partial results
# corresponding to all embedding lookups, but part of the batch on each device.
# Therefore, matching the distribution of output of bottom mlp, so that both
# could be used for subsequent interactions on each device.
if len(self.emb_l) != len(ly):
sys.exit("ERROR: corrupted intermediate result in parallel_forward call")
t_list = []
for k, _ in enumerate(self.emb_l):
d = torch.device("cuda:" + str(k % ndevices))
y = scatter(ly[k], device_ids, dim=0)
t_list.append(y)
# adjust the list to be ordered per device
ly = list(map(lambda y: list(y), zip(*t_list)))
# debug prints
# print(ly)
# interactions
z = []
for k in range(ndevices):
zk = self.interact_features(x[k], ly[k])
z.append(zk)
# debug prints
# print(z)
# top mlp
# WARNING: Note that the self.top_l is a list of top mlp modules that
# have been replicated across devices, while z is a list of interaction results
# that by construction are scattered across devices on the first (batch) dim.
# The output is a list of tensors scattered across devices according to the
# distribution of z.
p = parallel_apply(self.top_l_replicas, z, None, device_ids)
### gather the distributed results ###
p0 = gather(p, self.output_d, dim=0)
# clamp output if needed
if 0.0 < self.loss_threshold and self.loss_threshold < 1.0:
z0 = torch.clamp(
p0, min=self.loss_threshold, max=(1.0 - self.loss_threshold)
)
else:
z0 = p0
return z0
def dash_separated_ints(value):
vals = value.split("-")
for val in vals:
try:
int(val)
except ValueError:
raise argparse.ArgumentTypeError(
"%s is not a valid dash separated list of ints" % value
)
return value
def dash_separated_floats(value):
vals = value.split("-")
for val in vals:
try:
float(val)
except ValueError:
raise argparse.ArgumentTypeError(
"%s is not a valid dash separated list of floats" % value
)
return value
def inference(
args,
dlrm,
best_acc_test,
best_auc_test,
test_ld,
device,
use_gpu,
log_iter=-1,
):
test_accu = 0
test_samp = 0
if args.mlperf_logging:
scores = []
targets = []
for i, testBatch in enumerate(test_ld):
# early exit if nbatches was set by the user and was exceeded
if nbatches > 0 and i >= nbatches:
break
X_test, lS_o_test, lS_i_test, T_test, W_test, CBPP_test = unpack_batch(
testBatch
)
# Skip the batch if batch size not multiple of total ranks
if ext_dist.my_size > 1 and X_test.size(0) % ext_dist.my_size != 0:
print("Warning: Skiping the batch %d with size %d" % (i, X_test.size(0)))
continue
# forward pass
Z_test = dlrm_wrap(
X_test,
lS_o_test,
lS_i_test,
use_gpu,
device,
ndevices=ndevices,
)
### gather the distributed results on each rank ###
# For some reason it requires explicit sync before all_gather call if
# tensor is on GPU memory
if Z_test.is_cuda:
torch.cuda.synchronize()
(_, batch_split_lengths) = ext_dist.get_split_lengths(X_test.size(0))
if ext_dist.my_size > 1:
Z_test = ext_dist.all_gather(Z_test, batch_split_lengths)
if args.mlperf_logging:
S_test = Z_test.detach().cpu().numpy() # numpy array
T_test = T_test.detach().cpu().numpy() # numpy array
scores.append(S_test)
targets.append(T_test)
else:
with record_function("DLRM accuracy compute"):
# compute loss and accuracy
S_test = Z_test.detach().cpu().numpy() # numpy array
T_test = T_test.detach().cpu().numpy() # numpy array
mbs_test = T_test.shape[0] # = mini_batch_size except last
A_test = np.sum((np.round(S_test, 0) == T_test).astype(np.uint8))
test_accu += A_test
test_samp += mbs_test
if args.mlperf_logging:
with record_function("DLRM mlperf sklearn metrics compute"):
scores =
|
np.concatenate(scores, axis=0)
|
numpy.concatenate
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the available qubit operations for quantum chemistry purposes.
"""
import pytest
import numpy as np
from scipy.linalg import expm, fractional_matrix_power
import pennylane as qml
from pennylane import numpy as pnp
from gate_data import (
X,
StateZeroProjector,
StateOneProjector,
ControlledPhaseShift,
SingleExcitation,
SingleExcitationPlus,
SingleExcitationMinus,
DoubleExcitation,
DoubleExcitationPlus,
DoubleExcitationMinus,
OrbitalRotation,
)
PARAMETRIZED_QCHEM_OPERATIONS = [
qml.SingleExcitation(0.14, wires=[0, 1]),
qml.SingleExcitationMinus(0.14, wires=[0, 1]),
qml.SingleExcitationPlus(0.14, wires=[0, 1]),
qml.DoubleExcitation(0.14, wires=[0, 1, 2, 3]),
qml.DoubleExcitationMinus(0.14, wires=[0, 1, 2, 3]),
qml.DoubleExcitationPlus(0.14, wires=[0, 1, 2, 3]),
qml.OrbitalRotation(0.14, wires=[0, 1, 2, 3]),
]
class TestParameterFrequencies:
@pytest.mark.parametrize("op", PARAMETRIZED_QCHEM_OPERATIONS)
def test_parameter_frequencies_match_generator(self, op, tol):
if not qml.operation.has_gen(op):
pytest.skip(f"Operation {op.name} does not have a generator defined to test against.")
gen = op.generator()
try:
mat = gen.matrix()
except (AttributeError, qml.operation.MatrixUndefinedError):
if isinstance(gen, qml.Hamiltonian):
mat = qml.utils.sparse_hamiltonian(gen).toarray()
elif isinstance(gen, qml.SparseHamiltonian):
mat = gen.sparse_matrix().toarray()
else:
pytest.skip(f"Operation {op.name}'s generator does not define a matrix.")
gen_eigvals = np.round(np.linalg.eigvalsh(mat), 8)
freqs_from_gen = qml.gradients.eigvals_to_frequencies(tuple(gen_eigvals))
freqs = op.parameter_frequencies
assert np.allclose(freqs, np.sort(freqs_from_gen), atol=tol)
class TestDecomposition:
@pytest.mark.parametrize("phi", [-0.1, 0.2, 0.5])
def test_single_excitation_plus_decomp(self, phi):
"""Tests that the SingleExcitationPlus operation calculates the correct decomposition.
Need to consider the matrix of CRY separately, as the control is wire 1
and the target is wire 0 in the decomposition. (Not applicable for
ControlledPhase as it has the same matrix representation regardless of the
control and target wires.)"""
decomp1 = qml.SingleExcitationPlus(phi, wires=[0, 1]).decomposition()
decomp2 = qml.SingleExcitationPlus.compute_decomposition(phi, wires=[0, 1])
for decomp in [decomp1, decomp2]:
mats = []
for i in reversed(decomp):
if i.wires.tolist() == [0]:
mats.append(np.kron(i.matrix(), np.eye(2)))
elif i.wires.tolist() == [1]:
mats.append(np.kron(
|
np.eye(2)
|
numpy.eye
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 7 18:20:28 2019
@author: ishan
"""
import numpy as np
from numpy import linalg as LA
import os, sys
from numpy import linalg as la
import math
from matplotlib import pyplot as plt
try:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
except:
pass
import cv2
cap = cv2.VideoCapture('project_video.mp4')
def image_processing(img, sthreshold = (100, 255), sxthreshold =(15, 255)):
# Undistorting the image
img = undistort(img)
img = np.copy(img)
# Converting to HLS color space and separate the L channel
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)
(h, l, s) = cv2.split(hls)
# calculating derivate in x direction using Sobel filter
sobel_x = cv2.Sobel(l, cv2.CV_64F, 1, 0)
absolute_sobel = np.absolute(sobel_x)
scale_sobel = np.uint8(255*absolute_sobel/np.max(absolute_sobel))
# Threshold x gradient
sx_binary = np.zeros_like(scale_sobel)
sx_binary[(scale_sobel >= sxthreshold[0]) & (scale_sobel <= sxthreshold[1])] = 1
# Threshold s_channel
sbinary = np.zeros_like(s)
sbinary[(s >= sthreshold[0]) & (s <= sthreshold[1])] = 1
# clr = np.dstack((np.zeros_like(sx_binary), sx_binary, sbinary)) * 255
img_comb = np.zeros_like(sx_binary)
img_comb[((sbinary==1) | (sx_binary == 1))] = 1
return img_comb
def undistort(img):
# this function returns the undistorted image
# Camera matrix k
K = np.array([[ 1.15422732e+03 ,0.00000000e+00 ,6.71627794e+02],
[ 0.00000000e+00 ,1.14818221e+03 ,3.86046312e+02],
[ 0.00000000e+00 ,0.00000000e+00 ,1.00000000e+00]])
# distortion coefficients d
d = np.array([ -2.42565104e-01, -4.77893070e-02, -1.31388084e-03, -8.79107779e-05,2.20573263e-02])
h, w = img.shape[:2]
newcameramatrix, roi = cv2.getOptimalNewCameraMatrix(K, d, (w,h), 0,(w,h))
dst = cv2.undistort(img, K, d, None, newcameramatrix)
return dst
def perspective_warp(img):
# For calculating the perspective transform
h,w = img.shape[:2]
pts_src = np.float32([[550,460],[740, 460],[1280,720],[128, 720]])
pts_dst = np.float32([[0,0],[w, 0],[w,h],[0, h]])
P = cv2.getPerspectiveTransform(pts_src, pts_dst)
warp = cv2.warpPerspective(img, P, (img.shape[1],img.shape[0]))
return warp
def inv_perspective_warp(img):
# For calculating the inverse perspective transform
h,w = img.shape[:2]
pts_src = np.float32([[550,460],[740, 460],[1280,720],[128, 720]])
pts_dst = np.float32([[0,0],[w, 0],[w,h],[0, h]])
P = cv2.getPerspectiveTransform(pts_src, pts_dst)
warp = cv2.warpPerspective(img, np.linalg.inv(P), (img.shape[1],img.shape[0]))
return warp
def get_hist(img):
# Calculating the histogram
hist = np.sum(img[img.shape[0]//2:,:], axis=0)
return hist
left_p, left_q, left_r = [],[],[]
right_p, right_q, right_r = [],[],[]
def windows(img, min_pix = 1, margin=100,num_wind=9, windows_flag = True):
global left_p, left_q, left_r,right_p, right_q, right_r
l_point= np.empty(3)
r_point = np.empty(3)
out_img = np.dstack((img, img, img))*255
histogram = get_hist(img)
# find peaks of left and right halves
mid_point = int(histogram.shape[0]/2)
# Creating the base of bins/windows for left and right lanes
left_bin_base = np.argmax(histogram[:mid_point])
right_bin_base = np.argmax(histogram[mid_point:]) + mid_point
# Creating empty lists to receive left and right lane pixel indices
leftlane_indices = []
rightlane_indices = []
# Setting the height of windows
bin_h = np.int(img.shape[0]/num_wind)
# Finding the x and y positions of all nonzero pixels
pixel_indices = img.nonzero()
pixel_y = np.array(pixel_indices[0])
pixel_x = np.array(pixel_indices[1])
# Current position to be updated for each window
current_bin_left = left_bin_base
current_bin_right = right_bin_base
# Iterating over the bins/windows
for w in range(num_wind):
# Identify window boundaries in x and y (and right and left)
w_y_bottom = img.shape[0] - (w +1)*bin_h
w_y_top = img.shape[0] - w * bin_h
w_xleft_bottom = current_bin_left - margin
w_xleft_top = current_bin_left + margin
w_xright_bottom = current_bin_right - margin
w_xright_top = current_bin_right + margin
# Draw the windows on the image
if windows_flag == True:
cv2.rectangle(out_img,(w_xleft_bottom,w_y_bottom),(w_xleft_top,w_y_top),
(100,255,255), 3)
cv2.rectangle(out_img,(w_xright_bottom,w_y_bottom),(w_xright_top,w_y_top),
(100,255,255), 3)
# Findding the nonzero pixels in x and y within the window
req_left_pixels = ((pixel_y >= w_y_bottom) & (pixel_y < w_y_top) &
(pixel_x >= w_xleft_bottom) & (pixel_x < w_xleft_top)).nonzero()[0]
req_right_pixels = ((pixel_y >= w_y_bottom) & (pixel_y < w_y_top) &
(pixel_x >= w_xright_bottom) & (pixel_x < w_xright_top)).nonzero()[0]
# Append these indices to the corresponding lists
leftlane_indices.append(req_left_pixels)
rightlane_indices.append(req_right_pixels)
# If we found > minpix pixels, recenter next window on their mean position
if len(req_left_pixels) > min_pix:
current_bin_left = np.int(np.mean(pixel_x[req_left_pixels]))
if len(req_right_pixels) > min_pix:
current_bin_right = np.int(np.mean(pixel_x[req_right_pixels]))
# Concatenate the arrays of left and right lane pixel indices
leftlane_indices = np.concatenate(leftlane_indices)
rightlane_indices = np.concatenate(rightlane_indices)
# Calculating the left and right lane pixel positions
leftlane_x_pixels = pixel_x[leftlane_indices]
leftlane_y_pixels = pixel_y[leftlane_indices]
rightlane_x_pixels = pixel_x[rightlane_indices]
rightlane_y_pixels = pixel_y[rightlane_indices]
# Fitting a second order polynomial to each lane
leftlane_fit = np.polyfit(leftlane_y_pixels, leftlane_x_pixels, 2)
rightlane_fit = np.polyfit(rightlane_y_pixels, rightlane_x_pixels, 2)
left_p.append(leftlane_fit[0])
left_q.append(leftlane_fit[1])
left_r.append(leftlane_fit[2])
right_p.append(rightlane_fit[0])
right_q.append(rightlane_fit[1])
right_r.append(rightlane_fit[2])
l_point[0] = np.mean(left_p[-10:])
l_point[1] = np.mean(left_q[-10:])
l_point[2] = np.mean(left_r[-10:])
r_point[0] = np.mean(right_p[-10:])
r_point[1] = np.mean(right_q[-10:])
r_point[2] = np.mean(right_r[-10:])
# Generating x and y values for plotting
y_values = np.linspace(0, img.shape[0]-1, img.shape[0] )
leftlane_fit_x = l_point[0]*y_values**2 + l_point[1]*y_values + l_point[2]
rightlane_fit_x = r_point[0]*y_values**2 + r_point[1]*y_values + r_point[2]
out_img[pixel_y[leftlane_indices], pixel_x[leftlane_indices]] = [255, 0, 100]
out_img[pixel_y[rightlane_indices], pixel_x[rightlane_indices]] = [0, 100, 255]
return out_img, (leftlane_fit_x, rightlane_fit_x), (l_point, r_point), y_values
def get_polynomial(img, leftlane_x_pixels, rightlane_x_pixels):
y_values = np.linspace(0, img.shape[0]-1, img.shape[0])
y_eval = np.max(y_values)
y_unit_pix = 30.5/720 # meters per pixel in y direction
x_unit_pix = 3.7/720 # meters per pixel in x direction
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(y_values*y_unit_pix, leftlane_x_pixels*x_unit_pix, 2)
right_fit_cr = np.polyfit(y_values*y_unit_pix, rightlane_x_pixels*x_unit_pix, 2)
# Calculate the new radii of curvature for lanes
left_curve_rad = ((1 + (2*left_fit_cr[0]*y_eval*y_unit_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curve_rad = ((1 + (2*right_fit_cr[0]*y_eval*y_unit_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# Getting the car center position
car_center_pos = img.shape[1]/2
l_fit_x_int = left_fit_cr[0]*img.shape[0]**2 + left_fit_cr[1]*img.shape[0] + left_fit_cr[2]
r_fit_x_int = +right_fit_cr[0]*img.shape[0]**2 + right_fit_cr[1]*img.shape[0] + right_fit_cr[2]
# Getting the lane center position
lane_center_pos = (r_fit_x_int + l_fit_x_int) /2
center = (car_center_pos - lane_center_pos) * x_unit_pix / 10
# Now our radius of curvature is in meters
return (left_curve_rad, right_curve_rad, center)
def draw_lanes(img, leftlane_fit_x, rightlane_fit_x):
# Plotting the x and y values
y_values =
|
np.linspace(0, img.shape[0]-1, img.shape[0])
|
numpy.linspace
|
import os
import numpy as np
from torch.utils.data import DataLoader
from src.nets import *
from src.CGAE.model import train, impute, extract, MultiOmicsDataset
from src.util import logger
from src.util.early_stopping import EarlyStopping
from src.util.umapplotter import UMAPPlotter
def run(args: dict) -> None:
# Check cuda availability
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
logger.info("Selected device: {}".format(device))
torch.manual_seed(args['random_seed'])
save_dir = os.path.join(args['save_dir'], '{}'.format('CGAE'))
os.makedirs(save_dir)
# Load in data, depending on task
# Task 1 : Imputation
if args['task'] == 1:
logger.info("Running Task {} on omic {} and omic {}".format(args['task'], args['data1'], args['data2']))
# Load in data
omic1 = np.load(args['data_path1'])
omic2 = np.load(args['data_path2'])
cancertypes = np.load(args['cancertypes'])
cancer_type_index = np.load(args['cancer_type_index'])
# Use predefined split
train_ind = np.load(args['train_ind'])
val_ind = np.load(args['val_ind'])
test_ind = np.load(args['test_ind'])
omic1_train_file = omic1[train_ind]
omic1_val_file = omic1[val_ind]
omic1_test_file = omic1[test_ind]
omic2_train_file = omic2[train_ind]
omic2_val_file = omic2[val_ind]
omic2_test_file = omic2[test_ind]
if args['task'] == 2:
logger.success("Running Task 2: {} classification.".format(args['ctype']))
# NOTE
# For testing purposes, this code uses predefined splits, later this should be done everytime the model is run
Xtrainctype = np.load(args['x_ctype_train_file'])
Xtrainrest = np.load(args['x_train_file'])
Xtrain = np.vstack((Xtrainctype, Xtrainrest))
Xvalctype = np.load(args['x_ctype_val_file'])
Xvalrest = np.load(args['x_val_file'])
Xval = np.vstack((Xvalctype, Xvalrest))
Ytrainctype = np.load(args['y_ctype_train_file'])
Ytrainrest = np.load(args['y_train_file'])
Ytrain = np.vstack((Ytrainctype, Ytrainrest))
Yvalctype = np.load(args['y_ctype_val_file'])
Yvalrest = np.load(args['y_val_file'])
Yval = np.vstack((Yvalctype, Yvalrest))
# Number of features
input_dim1 = args['num_features']
input_dim2 = args['num_features']
encoder_layers = [args['latent_dim']]
decoder_layers = [args['latent_dim']]
# Initialize network model
net = MultiOmicVAE(input_dim1, input_dim2, encoder_layers, decoder_layers, args['loss_function'],
args['loss_function'],
args['use_batch_norm'], args['dropout_probability'], args['optimizer'], args['enc1_lr'],
args['dec1_lr'], args['enc1_last_activation'], args['enc1_output_scale'], args['enc2_lr'],
args['dec2_lr'], args['enc2_last_activation'], args['enc1_output_scale'],
args['beta_start_value'],
args['zconstraintCoef'], args['crossPenaltyCoef']).to(device)
net = net.double()
logger.success("Initialized MultiOmicVAE model.")
logger.info(str(net))
logger.info("Number of model parameters: ")
num_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
logger.info("{}".format(num_params))
# Create directories for checkpoint, sample and logs files
ckpt_dir = save_dir + '/checkpoint'
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
logs_dir = save_dir + '/logs'
# Data loading
logger.info("Loading training and validation data into MultiOmicVAE...")
dataTrain1 = torch.tensor(omic1_train_file, device=device)
dataTrain2 = torch.tensor(omic2_train_file, device=device)
dataValidation1 = torch.tensor(omic1_val_file, device=device)
dataValidation2 = torch.tensor(omic2_val_file, device=device)
datasetTrain = MultiOmicsDataset(dataTrain1, dataTrain2)
datasetValidation = MultiOmicsDataset(dataValidation1, dataValidation2)
train_loader = DataLoader(datasetTrain, batch_size=args['batch_size'], shuffle=True, num_workers=0,
drop_last=False)
train_loader_eval = DataLoader(datasetTrain, batch_size=dataTrain1.shape[0], shuffle=False, num_workers=0,
drop_last=False)
valid_loader = DataLoader(datasetValidation, batch_size=dataValidation1.shape[0], shuffle=False, num_workers=0,
drop_last=False)
# Setup early stopping, terminates training when validation loss does not improve for early_stopping_patience epochs
early_stopping = EarlyStopping(patience=args['early_stopping_patience'], verbose=True)
# Training and validation
train(device=device, net=net, num_epochs=args['epochs'], train_loader=train_loader,
train_loader_eval=train_loader_eval, valid_loader=valid_loader,
ckpt_dir=ckpt_dir, logs_dir=logs_dir, early_stopping=early_stopping, save_step=5, multimodal=True)
# Extract Phase #
# Imputation
if args['task'] == 1:
logger.info("Imputation: Extracting Z1 and Z2 using test set")
dataExtract1 = omic1_test_file
dataExtract2 = omic2_test_file
dataExtract1 = torch.tensor(dataExtract1, device=device)
dataExtract2 = torch.tensor(dataExtract2, device=device)
datasetExtract = MultiOmicsDataset(dataExtract1, dataExtract2)
extract_loader = DataLoader(datasetExtract, batch_size=dataExtract1.shape[0], shuffle=False, num_workers=0,
drop_last=False)
# Compute imputation loss
z1, z2 = impute(net=net,
model_file="/home/bram/jointomicscomp/results/cgae_task1_earlystop 12-10-2021 09:31:35/CGAE/checkpoint/model_epoch460.pth.tar",
loader=extract_loader, save_dir=save_dir, multimodal=True)
cancertypes = np.load(args['cancertypes'])
labels = np.load(args['cancer_type_index']).astype(int)
test_labels = cancertypes[[labels[test_ind]]]
z1_plot = UMAPPlotter(z1, test_labels, "CGAE Z1: Task {} | {} & {} \n"
"Epochs: {}, Latent Dimension: {}, LR: {}, Batch size: {}"
.format(args['task'], args['data1'], args['data2'],
args['epochs'], args['latent_dim'], args['enc1_lr'], args['batch_size']),
save_dir + "/{} UMAP_Z1.png".format('CGAE'))
z1_plot.plot()
z2_plot = UMAPPlotter(z2, test_labels, "CGAE Z2: Task {} | {} & {} \n"
"Epochs: {}, Latent Dimension: {}, LR: {}, Batch size: {}"
.format(args['task'], args['data1'], args['data2'],
args['epochs'], args['latent_dim'], args['enc1_lr'], args['batch_size']),
save_dir + "/{} UMAP_Z2.png".format('CGAE'))
z2_plot.plot()
# Cancer stage prediction
if args['task'] == 2:
logger.info("Cancer Type Classification: Extracting Z1 and Z2 using {} set".format(args['ctype']))
# Test sets are stratified data from cancer type into stages
dataExtract1 = np.vstack((Xtrainctype, Xvalctype, np.load(args['x_ctype_test_file'])))
dataExtract2 = np.vstack((Ytrainctype, Yvalctype, np.load(args['y_ctype_test_file'])))
dataExtract1 = torch.tensor(dataExtract1, device=device)
dataExtract2 = torch.tensor(dataExtract2, device=device)
datasetExtract = MultiOmicsDataset(dataExtract1, dataExtract2)
extract_loader = DataLoader(datasetExtract, batch_size=dataExtract1.shape[0], shuffle=False, num_workers=0,
drop_last=False)
# Extract Z from all data from the chosen cancer type
# Do predictions separately
z1, z2 = extract(net=net, model_file=os.path.join(ckpt_dir, "model_last.pth.tar".format(args['epochs'])),
loader=extract_loader, save_dir=save_dir, multimodal=True)
prediction_test_labels =
|
np.load(args['ctype_test_file_labels'])
|
numpy.load
|
from .mulfft import PolyMul,PolyMullvl2Long
from .utils import gaussian32, dtot32, gaussian64, dtot64
from secrets import randbits
import os
import numpy as np
def trlweSymEncrypt(p, alpha, key, twist):
# a = np.array([randbits(32) for i in range(len(key))], dtype=np.uint32)
a = np.frombuffer(os.urandom(len(key) * 4), dtype=np.uint32)
b = gaussian32(dtot32(p), alpha, len(key))
b += PolyMul(a, key, twist)
return
|
np.array([a, b])
|
numpy.array
|
import numpy as np
from scipy.stats import levy,norm,uniform
def sample_distro(distro_tuple):
"""
Samples a certain probability distrobution function (PDF) described by a tuple of parameters
Parameters
----------
distro_tuple : tuple (distrobution_name, arg1,arg2...)
The PDF to sample from
Returns
---------
float
the number generated from the PDF
"""
distro_type = distro_tuple[0]
if distro_type == "levy":
return levy.rvs(loc=distro_tuple[1], scale=distro_tuple[2],size=1)[0]
elif distro_type == "gaussian":
return norm.rvs(loc=distro_tuple[1], scale=distro_tuple[2],size=1)[0]
elif distro_type == "uniform":
return uniform.rvs(loc=distro_tuple[1], scale=(distro_tuple[2]-distro_tuple[1]),size=1)[0]
class SimulationRobot:
"""
Class describing a robot in SimulationWorld
"""
def __init__(self,robot_params):
"""
Initialises the robot class from a dictionary containing all key robot parameters
Parameters
----------
robot_params : dict
Dictionary containing all key robot parameters
"""
#Kinematic information
self.position = np.zeros(2)
self.rotation = 0.0
self.velocity = np.zeros(2)
#Timer can be used to limit the rate at which of control loop executes
self.timer = 0
self.robot_state = 0
self.robot_params = robot_params
#These are assigned by the world class
self.robot_index = None
self.bin_index = None
def on_sim_start(self):
"""
Initialises the robot depending on the alogorithm set in the robot_params dict
Executed during the init position log of the SimulationWorld class, so should be executed just before the first time step
"""
if self.robot_params["algorithm"] == "boid_flocker":
#Initialises boids so they don't all update at the same time
self.timer = np.random.uniform(0.0,self.robot_params["update_period"])
rand_dir = np.random.uniform(-np.pi,np.pi)
self.target_vect = np.array((np.cos(rand_dir),np.sin(rand_dir)))
self.neighbour_indexs = []
if self.robot_params["algorithm"] == "firefly_sync":
#self.timer = np.random.uniform(0.0,self.robot_params["update_period"])
self.movement_timer = 0
self.activation_value = np.random.uniform(0.0,1.0)
self.led_timer = 0
self.prev_flashes_detected = 0
def movement_update(self,dt):
"""
Updates the robots position based on its current position and velocity
Parameters
----------
dt : float
The time difference since the last movement update
"""
self.prev_position = self.position
self.position = self.position + self.velocity*dt
def get_neighbours(self,distance_matrix):
"""
Returns the robot's neighbours via their robot_index
Nebighbourhood params are dictated by neighbourhood_mode, neighbourhood_size and neighbourhood_distance in robot_params
Parameters
----------
distance_matrix : np.array
NxN array for a world with N robots in it. Represents the distance between every robot in the array
Returns
----------
list
Neighbour indexes
"""
neighbour_indexs = []
#There are two ways of defining your neighbourhood, X closest robots to you and all the robots that are within X distance. Both are implimented here and can be changed with the "neighbourhood_mode" key
if self.robot_params["neighbourhood_mode"] == "distance":
#we select robot indexes if their coressponding distance is less than our neighbourhood distance
neighbour_indexs = np.arange(0,distance_matrix.shape[0])[distance_matrix[self.robot_index,:] < self.robot_params["neighbourhood_distance"]]
elif self.robot_params["neighbourhood_mode"] == "nearist" and self.robot_params["neighbourhood_size"] > 0:
#argpartiion sorts the distance matrix in such a way that we are garanteed to have the X closest distances, but avoids sorting the whole thing
neighbour_indexs = np.argpartition(distance_matrix[self.robot_index,:],self.robot_params["neighbourhood_size"])
neighbour_indexs = neighbour_indexs[:self.robot_params["neighbourhood_size"]+1]
neighbour_indexs = neighbour_indexs[neighbour_indexs!= self.robot_index]
return neighbour_indexs
def control_update(self,dt,world = None):
"""
Updates the robot's velocity and rotation according to the alogorithm set in the robot_params dict
Parameters
----------
dt : float
The time difference since the last control update
world : SimulationWorld
The world this robot exists within. Used to detect other objects in the world such as other robots and barriers
"""
self.timer -=dt
if self.robot_params["algorithm"] == "random_walker":
#Random walker consist of picking a new direction randomly at random time intervals. Step length is dictated by the PDF in "step_len_distro"
#Direction changes are decided by "dir_change_distro"
if (self.timer <= 0):
self.timer = sample_distro(self.robot_params["step_len_distro"])
self.rotation += sample_distro(self.robot_params["dir_change_distro"])
self.velocity = np.array((np.cos(self.rotation),np.sin(self.rotation)))*self.robot_params["max_speed"]
elif self.robot_params["algorithm"] == "boid_flocker":
#boid flockers consist of three rules
# Cohesion - Aim for the centroid of your neighbours
# Alignment - Aim to align with neighbours
# Seperation - Move alway from neighbours if two close
#This implimentation contains an additional rule
#Centre homing - Move towards the centre of the world (0,0)
if (self.timer <= 0):
self.timer = self.robot_params["update_period"]
self.neighbour_indexs = self.get_neighbours(world.world_sense_data["robot_distances"])
#If we have neighbours
if len(self.neighbour_indexs) != 0:
#Get neighbour's distances, bearings and calculate their centroid
self.neighbour_dists = world.world_sense_data["robot_distances"][self.robot_index][self.neighbour_indexs]
self.neighbour_bearings = world.world_sense_data["current_robot_poses"][self.neighbour_indexs,2]
self.neighbour_centroid = np.mean(world.world_sense_data["current_robot_poses"][self.neighbour_indexs,:2],axis = 0)
#Use these to calculate the forces
self.cohesion_force = (self.neighbour_centroid - self.position)
self.allignment_force = np.array((np.cos(np.mean(self.neighbour_bearings)),np.sin(np.mean(self.neighbour_bearings))))
#We only apply the seperation to those neighbours which are especially close
close_neighbours = self.neighbour_indexs[self.neighbour_dists < self.robot_params["seperation_dist"]]
self.seperation_force = np.sum(np.tile(self.position,(close_neighbours.shape[0],1))-world.world_sense_data["current_robot_poses"][close_neighbours,:2],axis = 0)
else:
#No neighbours these forces are zero
self.cohesion_force=np.zeros(2)
self.allignment_force=np.zeros(2)
self.seperation_force=np.zeros(2)
#Calculate our distance from the centre
dist_from_centre = np.sqrt(np.sum(np.power(self.position,2.0)))
if dist_from_centre > 0: #Avoid dividing by zero
self.centre_force = -self.position/dist_from_centre
else:
self.centre_force = np.zeros(2)
#The final direction we want the robot to head in is the sum of each of the four force multiplied by their coefficients
self.cohesion_force*=self.robot_params["cohesion_coefficient"]
self.allignment_force*=self.robot_params["alignment_coefficient"]
self.seperation_force*=self.robot_params["seperation_coefficient"]
self.centre_force*=self.robot_params["central_pull_coefficient"]
self.target_vect = self.cohesion_force + self.allignment_force + self.seperation_force + self.centre_force
#Proportional controller to align the robots velocity vector with the desired vector
self.error_vect = self.target_vect - self.velocity
#Angle between the robot's current heading and the traget vector's heading
#Uses arctan2 on the robot's velocity to ensure this angle will remain with -pi to pi
angle_error = (np.arctan2(self.target_vect[1],self.target_vect[0]) -np.arctan2( self.velocity[1], self.velocity[0]))
#Ensures the error angle is between -pi to pi
angle_error = np.arctan2(np.sin(angle_error), np.cos(angle_error))
#Proportional control to control our angular velocity
self.rotation += angle_error*self.robot_params["rotational_p_control"]*dt
self.velocity = np.array((np.cos(self.rotation),np.sin(self.rotation)))*self.robot_params["max_speed"]
elif self.robot_params["algorithm"] == "firefly_sync":
#Firefly synchronisation is based on each agent moving its phase of flashing forward when its neighbours flash. Here we use robot_state to represent and LED where 0 == LED off and 1 == LED on
#Each agent increases an internal activation value every time step when this value reaches 1 the agent flashes its led and resets its activation to zero
#Every update cycle the agent suverys its neighbourhood for flashes. This algorithm takes the difference between the last number of activated neighbours and the current number to only count new flashes.
#New flashes increase the agent's activation by an amount dictated by activation_increase known as the coupling constant
#When the led turns on it does so for a certain period which is more realistic for robotic agents due to latency agents
#Further discussion of the algorithm implimentation can be found in : https://ieeexplore.ieee.org/abstract/document/5166479
if not self.robot_params["static"]: #Moving can help synchronisation as it changes the neighbour graph, if true activates a random walk
self.movement_timer -=dt
if self.movement_timer <= 0:
self.movement_timer = sample_distro(self.robot_params["step_len_distro"])
self.rotation += sample_distro(self.robot_params["dir_change_distro"])
self.velocity = np.array((
|
np.cos(self.rotation)
|
numpy.cos
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 4 14:07:39 2017
@author: ben
"""
#import scipy.sparse as sp
import numpy as np
from osgeo import gdal, osr, ogr
import copy
import pointCollection as pc
class fd_grid(object):
# a fd_grid is an object that defines the nodal locations and their indices
# for a regular grid of points. In a k-dimensional grid, each node has k
# subscripts, and one global index. The global index gives the index into
# the raveled node values. To allow multiple grids to be combined, the
# first global index can be shifted by specifying a nonzero col_0 value,
# and room for additional grids can be allocated by specifying a col_N value
# that is greater than the number of nodes.
def __init__(self, bounds, deltas, col_0=0, col_N=None, srs_proj4=None, mask_file=None, mask_data=None, name=''):
self.shape=np.array([((b[1]-b[0])/delta)+1 for b, delta in zip(bounds, deltas)]).astype(int) # number of nodes in each dimension
self.ctrs=[b[0]+ delta*np.arange(N) for b, delta, N in zip(bounds, deltas, self.shape)] # node center locations
self.bds=[np.array([c[0], c[-1]]) for c in self.ctrs]
self.delta=np.array(deltas) # node spacing in each dimension
self.N_dims=len(self.shape) # number of dimensions
self.N_nodes=np.prod(self.shape) # total number of nodes
self.stride=np.flipud(np.cumprod(np.flipud(np.r_[self.shape[1:], 1]))) # difference in global_ind between adjacent nodes
self.col_0=col_0 # first global_ind for the grid
self.srs_proj4=srs_proj4 # Well Known Text for the spatial reference system of the grid
if mask_data is None:
# ignore the mask file if mask_data is provided
self.mask_file=mask_file
else:
self.mask_file=None
self.mask=None # binary mask
self.user_data=dict() # storage space
self.name=name # name of the degree of freedom specified by the grid
self.cell_area=None
if col_N is None:
self.col_N=self.col_0+self.N_nodes
else:
self.col_N=col_N
if self.mask_file is not None:
# read the mask based on its extension
# geotif:
if self.mask_file.endswith('.tif'):
self.mask=self.read_geotif(self.mask_file, interp_algorithm=gdal.GRA_Average)
self.mask=np.round(self.mask).astype(np.int)
# vector (add more formats as needed)
elif self.mask_file.endswith('.shp') or self.mask_file.endswith('.db'):
self.mask=self.burn_mask(self.mask_file)
elif mask_data is not None:
mask_delta = [mask_data.y[1]-mask_data.y[0] , mask_data.x[1]-mask_data.x[0]]
interp_field='z' # default
if self.delta[1] > mask_data.x[1]-mask_data.x[0] or self.delta[0] > mask_data.y[1]-mask_data.y[0]:
from scipy.ndimage import binary_erosion
Nx = int(np.ceil(self.delta[1]/mask_delta[0]/2))
Ny = int(np.ceil(self.delta[0]/mask_delta[0]/2))
if Nx > 1 or Ny > 1:
mask_data.assign({'z_coarse':mask_data.z.copy()})
interp_field='z_coarse'
if Nx > 1:
mask_data.z_coarse = binary_erosion(mask_data.z_coarse, np.ones([Nx,1]))
if Ny > 1:
mask_data.z_coarse = binary_erosion(mask_data.z_coarse, np.ones([1, Ny]))
self.mask = mask_data.interp(self.ctrs[1], self.ctrs[0], gridded=True, field=interp_field) > 0.5
else:
self.mask=np.ones(self.shape[0:2], dtype=bool)
def copy(self):
return copy.deepcopy(self)
def validate_pts(self, pts):
# check whether points are inside the grid
good=np.isfinite(pts[0])
for dim in range(self.N_dims):
good[good]=np.logical_and(good[good], pts[dim][good] >= self.bds[dim][0])
good[good]=np.logical_and(good[good], pts[dim][good] <= self.bds[dim][1])
return good
def pos_for_nodes(self, nodes):
# find the location for a node in the grid from its subscripts
pos=list()
inds=np.unravel_index(nodes, self.shape)
for delta, bd, ind in zip(self.delta, self.bds, inds):
pos.append((ind*delta)+bd[0])
return pos
def get_extent(self, dims=[1, 0]):
# return the extent for a slice of the grid for the dimensions in 'dims'.
# Extents are the bounds of the grid padded by one half cell.
return np.concatenate([self.bds[dim]+self.delta[dim]*np.array([-0.5, 0.5]) for dim in dims])
def float_sub(self, pts, good=None):
# find the normalized point location within the grid (in subscript coordinates)
idxf=[np.NaN+
|
np.zeros_like(pts[0])
|
numpy.zeros_like
|
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import pims
from skimage import img_as_uint
from skimage.io import imsave
from skimage.transform import SimilarityTransform, warp, rotate, downscale_local_mean
from skimage.filters import laplace, sobel, scharr, median
from scipy import signal
import argparse
def get_params(target, img, sigma_t, sigma_r, max_iteration):
[tx, ty, rot] = [0, 0, 0]
diff = transform_diff(target, img, tx, ty, rot)
params_record = np.zeros((max_iteration, 4))
for it in tqdm(range(max_iteration), desc='Optimizing Transformation'):
tx_p, ty_p, rot_p = propose_params(tx, ty, rot, sigma_t, sigma_r)
diff_p = transform_diff(target, img, tx_p, ty_p, rot_p)
if diff_p < diff:
tx, ty, rot, diff = tx_p, ty_p, rot_p, diff_p
elif diff_p/diff < np.random.rand():
tx, ty, rot, diff = tx_p, ty_p, rot_p, diff_p
params_record[it, :] = [tx_p, ty_p, rot_p, diff_p]
params_sort = params_record[params_record[:, 3].argsort()]
params_sort = np.mean(params_sort[:int(max_iteration/10), :], axis=0)
tx, ty, rot = params_sort[0], params_sort[1], params_sort[2]
img_trans = transform(img, tx, ty, rot)
return img_trans, tx, ty, rot, params_record
def mean_sq_diff(target, img):
return ((target-img)**2)[img > 0].mean()
def corr(target, img):
coeff = np.corrcoef(target[img > 0].ravel(), img[img > 0].ravel())
return -coeff[0, 1]
def transform(img, tx, ty, rot):
img_rot = rotate(img, rot, preserve_range=True)
transf = SimilarityTransform(translation=[tx, ty])
return warp(img_rot, transf, preserve_range=True)
def transform_diff(target, img, tx, ty, rot):
img_transf = transform(img, tx, ty, rot)
return corr(target, img_transf)
def propose_params(tx, ty, rot, sigma_t, sigma_r):
tx_prop = np.random.normal(tx, sigma_t)
ty_prop = np.random.normal(ty, sigma_t)
rot_prop =
|
np.random.normal(rot, sigma_r)
|
numpy.random.normal
|
#!/usr/bin/env python3
"""
Assistant de paris sportifs
"""
import copy
from itertools import product, combinations
import numpy as np
def gain(cotes, mise=1):
"""
:param cotes: Cotes au format décimal
:type cotes: list[float]
:param mise: Mise à répartir sur toutes les cotes
:type mise: float
:return: Gain pour une somme des mises égale à mise
:rtype: int
"""
return mise / sum(map(lambda x: 1 / x, cotes))
def gain2(cotes, i, mise=1):
"""
:param cotes: Cotes au format décimal
:type cotes: list[float]
:param i: Indice de la cote sur laquelle miser
:type i: int
:param mise: Mise à placer sur une unique issue
:type mise: float
:return: Plus-value générée
:rtype: float
"""
return cotes[i] * mise - sum(mises2(cotes, mise, i))
def mises(cotes, mise=1, output=False):
"""
:param cotes: Cotes au format décimal
:type cotes: list[float]
:param mise: Mise à répartir sur toutes les cotes
:param output: Affichage des détails
:type output: bool
:return: Répartition optimale des mises
:rtype: list[float] or None
"""
gains = gain(cotes, mise)
mises_reelles = list(map(lambda x: gains / x, cotes))
if output:
mis = list(map(lambda x: round(x, 2), mises_reelles))
print("somme des mises =", round(sum(mis), 2))
print("gain min =", min([round(mis[i] * cotes[i], 2)
for i in range(len(mis))]))
print("gain max =", max([round(mis[i] * cotes[i], 2)
for i in range(len(mis))]))
print("plus-value max =",
round(min([round(mis[i] * cotes[i], 2)
for i in range(len(mis))]) - sum(mis), 2))
print("mises arrondies =", mis)
return
return mises_reelles
def mises2(cotes, mise_requise, choix=-1, output=False):
"""
Calcule la repartition des mises en pariant mise_requise sur l'une des
issues. Par défaut, mise_requise est placée sur la cote la plus basse.
:param cotes: Cotes au format décimal
:type cotes: list[float]
:param mise_requise: Mise à répartir sur toutes les cotes
:type mise_requise: float
:param choix: Indice de la cote sur laquelle miser
:type choix: int
:param output: Affichage des détails
:type output: bool
:return: Répartition optimale des mises
:rtype: list[float] or None
"""
if choix == -1:
choix = np.argmin(cotes)
gains = mise_requise * cotes[choix]
mises_reelles = list(map(lambda x: gains / x, cotes))
if output:
mis = list(map(lambda x: round(x, 2), mises_reelles))
print("somme des mises =", round(sum(mis), 2))
print("gain min =", min([round(mis[i] * cotes[i], 2)
for i in range(len(mis))]))
print("gain max =", max([round(mis[i] * cotes[i], 2)
for i in range(len(mis))]))
print("plus-value min =",
round(min([round(mis[i] * cotes[i], 2)
for i in range(len(mis))]) - sum(mis), 2))
print("plus-value max =",
round(max([round(mis[i] * cotes[i], 2)
for i in range(len(mis))]) - sum(mis), 2))
print("mises arrondies =", mis)
return
return mises_reelles
def mises3(odds, best_odds, stake, minimum_odd, output=False):
assert len(odds) == len(best_odds)
n = len(odds)
indices_valid_odds = [i for i in range(n) if odds[i] >= minimum_odd]
n_valid_odds = len(indices_valid_odds)
profit = -stake
stakes = []
odds_best_profit = []
best_combination = []
reference_stake = []
for i in range(n_valid_odds):
for combination in combinations(indices_valid_odds, i+1):
odds_to_check = []
for j in range(n):
odd = odds[j] if j in combination else best_odds[j]
odds_to_check.append(odd)
odds_site = [odds[k] for k in combination]
first_stake_site = mises(odds_site, stake)[0]
profit_combination = gain2(odds_to_check, combination[0], first_stake_site)
if profit_combination > profit:
reference_stake = first_stake_site
stakes = mises2(odds_to_check, first_stake_site, combination[0])
odds_best_profit = copy.deepcopy(odds_to_check)
best_combination = copy.deepcopy(combination)
profit = profit_combination
if output:
mises2(odds_best_profit, reference_stake, best_combination[0], True)
print("cotes =", odds_best_profit)
return
if best_combination:
return mises2(odds_best_profit, reference_stake, best_combination[0]), best_combination
return
def gain3(odds, best_odds, stake, minimum_odd):
assert len(odds) == len(best_odds)
n = len(odds)
indices_valid_odds = [i for i in range(n) if odds[i] >= minimum_odd]
n_valid_odds = len(indices_valid_odds)
profit = -float("inf")
stakes = []
odds_best_profit = []
best_combnation = []
reference_stake = []
for i in range(n_valid_odds):
for combination in combinations(indices_valid_odds, i+1):
odds_to_check = []
for j in range(n):
odd = odds[j] if j in combination else best_odds[j]
odds_to_check.append(odd)
odds_site = [odds[k] for k in combination]
first_stake_site = mises(odds_site, stake)[0]
profit_combination = gain2(odds_to_check, combination[0], first_stake_site)
if profit_combination > profit:
reference_stake = first_stake_site
stakes = mises2(odds_to_check, first_stake_site, combination[0])
odds_best_profit = copy.deepcopy(odds_to_check)
best_combination = copy.deepcopy(combination)
profit = profit_combination
return profit
def cotes_freebet(cotes):
"""
Calcule les cotes d'un match joué avec des paris gratuits
:param cotes: Cotes au format décimal
:type cotes: list[float]
:return: Cotes réduites de 1
:rtype: list[float]
"""
return list(map(lambda x: (x - 1 if x > 1 else 0.01), cotes))
def mises_freebets(cotes, mise):
"""
Calcule la repartition des mises en paris gratuits pour maximiser les gains
avec une mise totale égale à mise
:param cotes:
:type cotes:
:param mise:
:type mise:
:return:
:rtype:
"""
return mises(cotes_freebet(cotes), mise)
def mises_freebet(cotes, freebet, issue=-1, output=False):
"""
Calcule la repartition des mises en presence d'un freebet a placer sur l'une
des issues. Par defaut, le freebet est place sur la cote la plus haute.
"""
if issue == -1:
issue = np.argmax(cotes)
mises_reelles = mises2(cotes[:issue] + [cotes[issue] - 1] + cotes[issue + 1:], freebet, issue)
gains = mises_reelles[issue] * (cotes[issue] - 1)
if output:
mis = list(map(lambda x: round(x, 2), mises_reelles))
print("gain sur freebet =", round(gains + freebet - sum(mis), 2))
print("gain sur freebet / mise freebet =", round(gains + freebet - sum(mis), 2) / freebet)
print("gain =", round(gains, 2))
print("mise totale (hors freebet) =", round(sum(mis) - freebet, 2))
print("mises arrondies =", mis)
return
return mises_reelles
def mises_freebet2(cotes, freebet, issue=-1, output=False):
"""
Calcule la repartition des mises en presence de 2 freebets a placer sur des issues d'un même
match. Le 2e freebet est placé automatiquement.
"""
i_max = np.argmax(cotes)
if issue == -1:
issue = i_max
mises_reelles = mises2(cotes[:issue] + [cotes[issue] - 1] + cotes[issue + 1:], freebet, issue)
gains = mises_reelles[issue] * (cotes[issue] - 1)
issue2 = int(np.argmax(cotes[:i_max] + [0] + cotes[i_max + 1:]) if issue == i_max else i_max)
mis = list(map(lambda x: round(x, 2), mises_reelles))
rapport_gain = (gains + freebet - sum(mis)) / freebet
if rapport_gain < (cotes[issue2] - 1) / cotes[issue2]:
mises_reelles[issue2] = round(gains / (cotes[issue2] - 1), 2)
mis = list(map(lambda x: round(x, 2), mises_reelles))
freebet += mis[issue2]
if output:
print("gain sur freebet =", round(gains + freebet - sum(mis), 2))
print("gain sur freebet / mise freebet =", round(gains + freebet - sum(mis), 2) / freebet)
print("gain =", round(gains, 2))
print("mise totale (hors freebet) =", round(sum(mis) - freebet, 2))
print("mises arrondies =", mis)
return issue2
return mises_reelles
def gain_freebet2(cotes, freebet, issue=-1):
"""
Calcule le taux de gain si l'on place deux freebets sur un match même match.
"""
i_max = np.argmax(cotes)
if issue == -1:
issue = i_max
mises_reelles = mises2(cotes[:issue] + [cotes[issue] - 1] + cotes[issue + 1:], freebet, issue)
gains = mises_reelles[issue] * (cotes[issue] - 1)
issue2 = int(np.argmax(cotes[:i_max] + cotes[i_max + 1:]) if issue == i_max else i_max)
mis = list(map(lambda x: round(x, 2), mises_reelles))
rapport_gain = (gains + freebet - sum(mis)) / freebet
if rapport_gain < (cotes[issue2] - 1) / cotes[issue2]:
mis[issue2] = round(gains / (cotes[issue2] - 1), 2)
freebet += mis[issue2]
return (gains + freebet - sum(mis)) / freebet
def cotes_combine(cotes):
"""
Calcule les cotes de plusieurs matches combines
"""
return [round(
|
np.prod(i)
|
numpy.prod
|
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
full_data = pd.read_csv(filename).drop_duplicates().dropna()
full_data = full_data.loc[full_data["price"] >= 0]
full_data = full_data.loc[full_data["yr_built"] >= 1800]
full_data = full_data.loc[full_data["sqft_living"] >= 0]
full_data = full_data.loc[full_data["sqft_lot"] >= 0]
full_data = full_data.loc[full_data["sqft_living15"] >= 0]
full_data = full_data.loc[full_data["sqft_lot15"] >= 0]
full_data = full_data.loc[full_data["sqft_above"] >= 0]
full_data = full_data.loc[full_data["sqft_basement"] >= 0]
response = full_data["price"]
full_data["is_renovated"] = full_data["yr_renovated"].apply(lambda x: 1 if x > 2000 else 0)
full_data = pd.get_dummies(full_data, prefix='',
columns=[
"zipcode"])
full_data.drop(["id", "date", "long", "lat", "price", "yr_renovated"], inplace=True,
axis=1)
return full_data, response
def feature_evaluation(X: pd.DataFrame, y: pd.Series, output_path: str = ".") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Design matrix of regression problem
y : array-like of shape (n_samples, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
std_of_y = np.std(y)
for feature in X:
pearson_corr = np.cov(X[feature], y)[0][1] / (np.std(X[feature]) * std_of_y)
fig = px.scatter(x=X[feature], y=y, width=1000, opacity=0.65,
trendline='ols', trendline_color_override='darkblue')
fig.update_layout(
title="Pearson Correlation of " + feature + " and the response: " + str(round(pearson_corr, 3)),
xaxis={"title": feature},
yaxis={"title": "Response"})
fig.write_image(output_path + "" + feature + "_correlation.png")
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of housing prices dataset
df, price = load_data("datasets/house_prices.csv")
# Question 2 - Feature evaluation with respect to response
feature_evaluation(df, price, "exercises/ex_2_plots")
# Question 3 - Split samples into training- and testing sets.
train_X, train_y, test_X, test_y = split_train_test(df, price, .75)
# Question 4 - Fit model over increasing percentages of the overall training data
# For every percentage p in 10%, 11%, ..., 100%, repeat the following 10 times:
# 1) Sample p% of the overall training data
# 2) Fit linear model (including intercept) over sampled set
# 3) Test fitted model over test set
# 4) Store average and variance of loss over test set
# Then plot average loss as function of training size with error ribbon of size (mean-2*std, mean+2*std)
linear_regression = LinearRegression(True)
average_loss = []
variance_of_loss = []
upper_bound = []
for i in range(10, 101):
loss = 0
for j in range(10):
p_train_X, p_train_y, p_test_X, p_test_y = split_train_test(train_X, train_y, i / 100)
linear_regression.fit(p_train_X, p_train_y)
loss += linear_regression.loss(test_X, test_y)
average_loss.append(loss/10)
# upper_bound = average_loss + 2*variance_of_loss
fig = px.scatter(x=
|
np.arange(10, 101, step=1)
|
numpy.arange
|
#!/usr/bin/env python
# coding: utf-8
# # Preferential Bayesian Optimization: Multinomial Predictive Entropy Search
# This notebook demonstrates the use of the Multinomial Predictive Entropy Search (MPES) acquisition function on ordinal (preference) data.
# In[ ]:
import numpy as np
import gpflow
import tensorflow as tf
import matplotlib.pyplot as plt
import sys
import os
import pickle
from gpflow.utilities import set_trainable, print_summary
gpflow.config.set_default_summary_fmt("notebook")
sys.path.append(os.path.split(os.path.split(os.path.split(os.getcwd())[0])[0])[0]) # Move 3 levels up directory to import project files as module
import importlib
PBO = importlib.import_module("Top-k-Ranking-Bayesian-Optimization")
# In[ ]:
gpu_to_use = 0
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
# Restrict TensorFlow to only use the first GPU
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
tf.config.experimental.set_visible_devices(gpus[gpu_to_use], 'GPU')
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPU")
except RuntimeError as e:
# Visible devices must be set before GPUs have been initialized
print(e)
# In[ ]:
objective = PBO.objectives.six_hump_camel
objective_low = -1.5
objective_high = 1.5
objective_name = "SHC"
acquisition_name = "MPES"
experiment_name = acquisition_name + "_" + objective_name
# In[ ]:
num_runs = 10
num_evals = 35
num_samples = 1000
num_choices = 2
input_dims = 2
objective_dim = input_dims # CHANGE 1: require the objective dim
num_maximizers = 20
num_maximizers_init = 50
num_fourier_features = 1000
num_init_prefs = 6 # CHANGE 2: randomly initialize with some preferences
# CHANGE 1: reduce the value of delta to avoid numerical error
# as k(x,x') = sigma^2 * exp( -[(x-x')/l]^2 )
# which could be very small if l is too small
# so we define l relatively by the range of input (objective_high - objective_low)
# It is ok for the total number of observations > the total number of possible inputs
# because there is a noise in the observation, it might require repeated observations
# at the same input pair to improve the confidence
num_discrete_per_dim = 40
delta = (objective_high - objective_low) / num_discrete_per_dim
# In[ ]:
results_dir = os.getcwd() + '/results/' + experiment_name + '/'
try:
# Create target Directory
os.makedirs(results_dir)
print("Directory " , results_dir , " created ")
except FileExistsError:
print("Directory " , results_dir , " already exists")
# Plot of the SHC function (global min at at x = [0.0898, -0.7126] and x = [-0.0898, 0.7126]):
# In[ ]:
# CHANGE 4: use a discrete grid of with cells of size = delta
inputs = PBO.models.learning_fullgp.get_all_discrete_inputs(objective_low, objective_high, objective_dim, delta)
fvals = objective(inputs).reshape(num_discrete_per_dim, num_discrete_per_dim)
# In[ ]:
fig, ax = plt.subplots()
im = ax.imshow(fvals,
interpolation="nearest",
extent=(objective_low, objective_high, objective_low, objective_high),
origin="lower",
cmap="Spectral")
fig.colorbar(im, ax=ax)
plt.show()
# In[ ]:
def plot_gp(model, inducing_points, inputs, title, cmap="Spectral"):
side = np.linspace(objective_low, objective_high, num_discrete_per_dim)
combs = PBO.acquisitions.dts.combinations(np.expand_dims(side, axis=1))
predictions = model.predict_y(combs)
preds = tf.transpose(tf.reshape(predictions[0], [num_discrete_per_dim, num_discrete_per_dim]))
variances = tf.transpose(tf.reshape(predictions[1], [num_discrete_per_dim, num_discrete_per_dim]))
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.suptitle(title)
fig.set_size_inches(18.5, 6.88)
fig.set_dpi((200))
ax1.axis('equal')
im1 = ax1.imshow(preds,
interpolation='nearest',
extent=(objective_low, objective_high, objective_low, objective_high),
origin='lower',
cmap=cmap)
ax1.plot(inducing_points[:, 0], inducing_points[:, 1], 'kx', mew=2)
ax1.plot(inputs[:, 0], inputs[:, 1], 'ko', mew=2, color='w')
ax1.set_title("Mean")
ax1.set_xlabel("x0")
ax1.set_ylabel("x1")
fig.colorbar(im1, ax=ax1)
ax2.axis('equal')
im2 = ax2.imshow(variances,
interpolation='nearest',
extent=(objective_low, objective_high, objective_low, objective_high),
origin='lower',
cmap=cmap)
ax2.plot(inducing_points[:, 0], inducing_points[:, 1], 'kx', mew=2)
ax2.plot(inputs[:, 0], inputs[:, 1], 'ko', mew=2, color='w')
ax2.set_title("Variance")
ax2.set_xlabel("x0")
ax2.set_ylabel("x1")
fig.colorbar(im2, ax=ax2)
plt.savefig(fname=results_dir + title + ".png")
plt.show()
# In[ ]:
def get_noisy_observation(X, objective):
f = PBO.objectives.objective_get_f_neg(X, objective)
return PBO.observation_model.gen_observation_from_f(X, f, 1)
# In[ ]:
def train_and_visualize(X, y, title, lengthscale_init=None, signal_variance_init=None):
# Train model with data
# CHANGE 6: use full_gp instead of sparse,
result = PBO.models.learning_fullgp.train_model_fullcov(
X, y,
obj_low=objective_low,
obj_high=objective_high,
lengthscale_init=lengthscale_init,
signal_variance_init=signal_variance_init,
indifference_threshold=0.,
n_sample=1000,
deterministic=True, # only sample f values once, not re-sampling
num_steps=3000)
q_mu = result['q_mu']
q_sqrt = result['q_sqrt']
u = result['u']
inputs = result['inputs']
k = result['kernel']
likelihood = gpflow.likelihoods.Gaussian()
model = PBO.models.learning.init_SVGP_fullcov(q_mu, q_sqrt, u, k, likelihood)
u_mean = q_mu.numpy()
inducing_vars = u.numpy()
# Visualize model
plot_gp(model, inducing_vars, inputs, title)
return model, inputs, u_mean, inducing_vars
# This function is our main metric for the performance of the acquisition function: The closer the model's best guess to the global minimum, the better.
# In[ ]:
def best_guess(model):
"""
Returns a GP model's best guess of the global maximum of f.
"""
# CHANGE 7: use a discrete grid
xx = PBO.models.learning_fullgp.get_all_discrete_inputs(objective_low, objective_high, objective_dim, delta)
res = model.predict_f(xx)[0].numpy()
return xx[np.argmax(res)]
# Store the results in these arrays:
# In[ ]:
num_data_at_end = int(num_init_prefs + num_evals)
X_results = np.zeros([num_runs, num_data_at_end, num_choices, input_dims])
y_results = np.zeros([num_runs, num_data_at_end, 1, input_dims])
best_guess_results = np.zeros([num_runs, num_evals, input_dims])
# Create the initial values for each run:
# In[ ]:
np.random.seed(0)
# CHANGE 8: just randomly initialize with some preference observation
init_vals = np.zeros([num_runs, num_init_prefs, num_choices, input_dims])
for run in range(num_runs):
for i in range(num_init_prefs):
init_vals[run,i] = PBO.models.learning_fullgp.get_random_inputs(
objective_low,
objective_high,
objective_dim,
delta,
size=num_choices,
with_replacement=False,
exclude_inputs=None)
# The following loops carry out the Bayesian optimization algorithm over a number of runs, with a fixed number of evaluations per run.
# In[ ]:
# CHANGE 9: need to store lengthscale and signal_variance from previous iteration to initialize the current iteration
lengthscale_init = None
signal_variance_init = None
for run in range(num_runs): # CHECK IF STARTING RUN IS CORRECT
print("")
print("==================")
print("Beginning run %s" % (run))
X = init_vals[run]
y = get_noisy_observation(X, objective)
model, inputs, u_mean, inducing_vars = train_and_visualize(X, y,
"Run_{}:_Initial_model".format(run))
# save optimized lengthscale and signal variance for next iteration
lengthscale_init = model.kernel.lengthscale.numpy()
signal_variance_init = model.kernel.variance.numpy()
for evaluation in range(num_evals):
print("Beginning evaluation %s" % (evaluation))
# Sample possible next queries
# CHANGE 10: use discrete grid
samples = PBO.models.learning_fullgp.sample_inputs(inputs.numpy(),
num_samples,
num_choices,
min_val=objective_low,
max_val=objective_high,
delta=delta)
# Sample maximizers
print("Evaluation %s: Sampling maximizers" % (evaluation))
maximizers = PBO.fourier_features.sample_maximizers(X=inducing_vars,
count=num_maximizers,
n_init=num_maximizers_init,
D=num_fourier_features,
model=model,
min_val=objective_low,
max_val=objective_high)
print(maximizers)
# Calculate PES value I for each possible next query
print("Evaluation %s: Calculating I" % (evaluation))
I_vals = PBO.acquisitions.pes.I_batch(samples, maximizers, model)
# Select query that maximizes I
next_idx = np.argmax(I_vals)
next_query = samples[next_idx]
print("Evaluation %s: Next query is %s with I value of %s" % (evaluation, next_query, I_vals[next_idx]))
X =
|
np.concatenate([X, [next_query]])
|
numpy.concatenate
|
"""
Base classes for all discretize meshes
"""
import numpy as np
import properties
import os
import json
from ..utils import mkvc
from ..mixins import InterfaceMixins
class BaseMesh(properties.HasProperties, InterfaceMixins):
"""
BaseMesh does all the counting you don't want to do.
BaseMesh should be inherited by meshes with a regular structure.
"""
_REGISTRY = {}
# Properties
_n = properties.Array(
"number of cells in each direction (dim, )",
dtype=int,
required=True,
shape=('*',)
)
x0 = properties.Array(
"origin of the mesh (dim, )",
dtype=(float, int),
shape=('*',),
required=True,
)
# Instantiate the class
def __init__(self, n=None, x0=None, **kwargs):
if n is not None:
self._n = n # number of dimensions
if x0 is None:
self.x0 = np.zeros(len(self._n))
else:
self.x0 = x0
super(BaseMesh, self).__init__(**kwargs)
# Validators
@properties.validator('_n')
def _check_n_shape(self, change):
if not (
not isinstance(change['value'], properties.utils.Sentinel) and
change['value'] is not None
):
raise Exception("Cannot delete n. Instead, create a new mesh")
change['value'] = np.array(change['value'], dtype=int).ravel()
if len(change['value']) > 3:
raise Exception(
"Dimensions of {}, which is higher than 3 are not "
"supported".format(change['value'])
)
if np.any(change['previous'] != properties.undefined):
# can't change dimension of the mesh
if len(change['previous']) != len(change['value']):
raise Exception(
"Cannot change dimensionality of the mesh. Expected {} "
"dimensions, got {} dimensions".format(
len(change['previous']), len(change['value'])
)
)
# check that if h has been set, sizes still agree
if getattr(self, 'h', None) is not None and len(self.h) > 0:
for i in range(len(change['value'])):
if len(self.h[i]) != change['value'][i]:
raise Exception(
"Mismatched shape of n. Expected {}, len(h[{}]), got "
"{}".format(
len(self.h[i]), i, change['value'][i]
)
)
# check that if nodes have been set for curvi mesh, sizes still
# agree
if (
getattr(self, 'nodes', None) is not None and
len(self.nodes) > 0
):
for i in range(len(change['value'])):
if self.nodes[0].shape[i]-1 != change['value'][i]:
raise Exception(
"Mismatched shape of n. Expected {}, len(nodes[{}]), "
"got {}".format(
self.nodes[0].shape[i]-1, i, change['value'][i]
)
)
@properties.validator('x0')
def _check_x0(self, change):
if not (
not isinstance(change['value'], properties.utils.Sentinel) and
change['value'] is not None
):
raise Exception("n must be set prior to setting x0")
if len(self._n) != len(change['value']):
raise Exception(
"Dimension mismatch. x0 has length {} != len(n) which is "
"{}".format(len(x0), len(n))
)
@property
def dim(self):
"""The dimension of the mesh (1, 2, or 3).
Returns
-------
int
dimension of the mesh
"""
return len(self._n)
@property
def nC(self):
"""Total number of cells in the mesh.
Returns
-------
int
number of cells in the mesh
Examples
--------
.. plot::
:include-source:
import discretize
import numpy as np
mesh = discretize.TensorMesh([np.ones(n) for n in [2,3]])
mesh.plotGrid(centers=True, show_it=True)
print(mesh.nC)
"""
return int(self._n.prod())
@property
def nN(self):
"""Total number of nodes
Returns
-------
int
number of nodes in the mesh
Examples
--------
.. plot::
:include-source:
import discretize
import numpy as np
mesh = discretize.TensorMesh([np.ones(n) for n in [2,3]])
mesh.plotGrid(nodes=True, show_it=True)
print(mesh.nN)
"""
return int((self._n+1).prod())
@property
def nEx(self):
"""Number of x-edges
Returns
-------
nEx : int
"""
return int((self._n + np.r_[0, 1, 1][:self.dim]).prod())
@property
def nEy(self):
"""Number of y-edges
Returns
-------
nEy : int
"""
if self.dim < 2:
return None
return int((self._n + np.r_[1, 0, 1][:self.dim]).prod())
@property
def nEz(self):
"""Number of z-edges
Returns
-------
nEz : int
"""
if self.dim < 3:
return None
return int((self._n + np.r_[1, 1, 0][:self.dim]).prod())
@property
def vnE(self):
"""Total number of edges in each direction
Returns
-------
vnE : numpy.ndarray = [nEx, nEy, nEz], (dim, )
.. plot::
:include-source:
import discretize
import numpy as np
M = discretize.TensorMesh([np.ones(n) for n in [2,3]])
M.plotGrid(edges=True, show_it=True)
"""
return np.array(
[x for x in [self.nEx, self.nEy, self.nEz] if x is not None],
dtype=int
)
@property
def nE(self):
"""Total number of edges.
Returns
-------
nE : int = sum([nEx, nEy, nEz])
"""
return int(self.vnE.sum())
@property
def nFx(self):
"""Number of x-faces
:rtype: int
:return: nFx
"""
return int((self._n + np.r_[1, 0, 0][:self.dim]).prod())
@property
def nFy(self):
"""Number of y-faces
:rtype: int
:return: nFy
"""
if self.dim < 2:
return None
return int((self._n + np.r_[0, 1, 0][:self.dim]).prod())
@property
def nFz(self):
"""Number of z-faces
:rtype: int
:return: nFz
"""
if self.dim < 3:
return None
return int((self._n + np.r_[0, 0, 1][:self.dim]).prod())
@property
def vnF(self):
"""Total number of faces in each direction
:rtype: numpy.ndarray
:return: [nFx, nFy, nFz], (dim, )
.. plot::
:include-source:
import discretize
import numpy as np
M = discretize.TensorMesh([np.ones(n) for n in [2,3]])
M.plotGrid(faces=True, show_it=True)
"""
return np.array(
[x for x in [self.nFx, self.nFy, self.nFz] if x is not None],
dtype=int
)
@property
def nF(self):
"""Total number of faces.
:rtype: int
:return: sum([nFx, nFy, nFz])
"""
return int(self.vnF.sum())
@property
def normals(self):
"""Face Normals
:rtype: numpy.ndarray
:return: normals, (sum(nF), dim)
"""
if self.dim == 2:
nX = np.c_[
np.ones(self.nFx), np.zeros(self.nFx)
]
nY = np.c_[
np.zeros(self.nFy), np.ones(self.nFy)
]
return np.r_[nX, nY]
elif self.dim == 3:
nX = np.c_[
np.ones(self.nFx), np.zeros(self.nFx), np.zeros(self.nFx)
]
nY = np.c_[
np.zeros(self.nFy), np.ones(self.nFy), np.zeros(self.nFy)
]
nZ = np.c_[
np.zeros(self.nFz),
|
np.zeros(self.nFz)
|
numpy.zeros
|
"""Analysis for the integrated gradients lines.
"""
import numpy as np
import svgwrite
from tqdm import tqdm
from pysyrenn import IntegratedGradients
from experiments.experiment import Experiment
import experiments.integral_approximations as integral_approximations
import gc
class IntegratedGradientsExperiment(Experiment):
"""Runs experiments from Section 4 of [1].
In that section, we report:
0. A figure going from black to a 1 with line ticks in place. (Figure 3)
1. Mean relative error of the m-tilde approach on CIFAR10
conv{small,medium,big}. (Table 1)
2. Average number of linear partitions for CIFAR10 conv{small,medium,big}.
(Table 2)
2. Average number of samples needed to get within 5% for the next 5 steps
on CIFAR10 conv{small,medium,big}. Outliers needing >1k samples removed.
Separate results for {left,right,trap} sampling. (Table 2)
Note that the cifar10_relu_convbig_diffai model can use large amounts of
memory when computing all desired gradients at once. To prevent the script
from running out of memory we have made sure that everywhere we compute a
potentially-large number of gradients that we batch the computations
effectively and explicitly free large chunks of memory as soon as possible.
"""
@staticmethod
def mean_error(attributions, reference):
"""Computes average relative error between attributions and reference.
@reference is taken to be the ground truth, while @attributions is
taken to be the "measurement."
"""
return np.mean(np.abs(attributions - reference)
/ np.abs((reference + 10e-12)))
def batched_IG(self, network, baseline, delta, label,
sample_ratios, weights):
"""Efficiently approximates IG with multiple approximation methods.
NOTE: This function actually returns multiple different approximations,
one for each list in @weights. The idea is that @weights will be a
list-of-lists-of-floats, with each sub-list corresponding to a single
sampling method. So you can compute the approximations for all methods
simultaneously.
Arguments
=========
- @network is the network to compute the IG for.
- @baseline is the baseline to use.
- @delta is (@image - @baseline), pixel-wise.
- @label is the index of the output to use for computing the gradient.
- @sample_ratios is a list of floats between 0.0 and 1.0, indicating
the ratios from baseline -> image that should be used for sampling.
- @weights is a list-of-list-of-floats. Each inner list should have the
same length as @sample_ratios, be non-negative, and (for most
approximation methods) sum to 1.0.
Return Value
============
A list of Numpy arrays of the same shape as @baseline, one per sub-list
in @weights. The ith return value corresponds to the approximation
using weights[i].
"""
attributions = [np.zeros_like(baseline) for _ in weights]
for batch_start in range(0, len(sample_ratios), self.batch_size):
batch_end = batch_start + self.batch_size
batch_ratios = sample_ratios[batch_start:batch_end]
sample_points = baseline + np.outer(batch_ratios, delta)
gradients = network.compute_gradients(sample_points, label)
for i in range(len(weights)):
batch_weights = weights[i][batch_start:batch_end]
attributions[i] += np.dot(batch_weights, gradients)
del gradients
gc.collect()
for i in range(len(attributions)):
attributions[i] *= delta
return attributions
def n_samples_to_5pct(self, network, baseline, image, label, exact_IG):
"""Returns number of samples needed to be "safely" within 5%.
Returns a dictionary of the form:
dict({method: n_{method} for method in self.sample_types})
Where n_{method} is the minimum integer such that sampling with
n_{method}, n_{method} + 1, ..., n_{method} + 4 samples using type
{method} all produce attribution maps within 5% mean relative error of
exact_IG.
"""
delta = image - baseline
def compute_IG_error(n_partitions, sample_types):
"""Returns the mean errors when using @n_partitions partitions.
@sample_types should be a list of strings, eg. ["left", "right"].
The return value is a list with the same length as @sample_types,
each entry being a float with the relative error of using that
sampling method with the specified number of partitions.
"""
ratios, weights = integral_approximations.parameters(n_partitions,
sample_types)
attributions = self.batched_IG(
network, baseline, delta, label, ratios, weights)
return [self.mean_error(approx_IG, exact_IG)
for approx_IG in attributions]
def check_for_stable(status_array, around_index):
"""Checks a status-array (see below) for a valid n_{type} index.
Assumes that any such index will be in the region around
@around_index +- 6.
Returns the index if found, otherwise False.
"""
in_a_row = 0
for i in range(around_index - 6, around_index + 6):
if i > (len(status_array) - 1):
break
if status_array[i] == +1:
in_a_row += 1
else:
in_a_row = 0
if in_a_row == 5:
return (i + 1) - in_a_row
return False
status = {
"left": np.zeros(1006, dtype=np.uint8),
"right": np.zeros(1006, dtype=np.uint8),
"trap": np.zeros(1006, dtype=np.uint8),
# If the user selects the option prompted in self.run() then these
# two entries will be ignored (as they won't be in
# self.sample_types).
"simpson":
|
np.zeros(1006, dtype=np.uint8)
|
numpy.zeros
|
import os
import warnings
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
import matplotlib.patches as patches
from sklearn.cluster import MeanShift
from sklearn.neighbors.kde import KernelDensity
latex_fonts = {
'mathtext.fontset': 'cm', # or 'stix'
'font.family': 'cmss10', # or 'STIXGeneral
"text.usetex": True,
"axes.labelsize": 10,
"font.size": 10,
"legend.fontsize": 10,
}
mpl.rcParams.update(latex_fonts)
class InverseBallisticsModel():
n_parameters = 4
n_observations = 1
name = 'inverse-ballistics'
def __init__(self, g=9.81, k=0.25, m=0.2):
self.name = 'inverse-ballistics'
self.g = g # gravity
self.k = k # drag coefficient dependent on object shape and traversed medium
self.m = m # object mass
self.xy_mu = np.array((0, 1.5))
self.xy_std = np.array((0.5, 0.5))
def sample_prior(self, N):
x = np.random.randn(N, 1) * self.xy_std[0] + self.xy_mu[0]
y = np.random.randn(N, 1) * self.xy_std[1] + self.xy_mu[1]
y = np.maximum(y, 0)
angle = np.random.rand(N, 1) * np.pi/2 * 0.8 + np.pi/2 * 0.1
v0 =
|
np.random.poisson(15, (N, 1))
|
numpy.random.poisson
|
# File: pf.py
# Functions for gnuradio-companion PAM p(t) generation
import numpy as np
def pampt(sps, ptype, pparms=[]):
"""
PAM pulse p(t) = p(n*TB/sps) generation
>>>>> pt = pampt(sps, ptype, pparms) <<<<<
where sps:
ptype: pulse type ('rect', 'tri', 'rcf')
pparms not used for 'rect', 'tri'
pparms = [k, beta] for 'rcf'
k: "tail" truncation parameter for 'sinc' (truncates p(t) to -k*sps <= n < k*sps)
beta: roll-off factor
pt: pulse p(t) at t=n*TB/sps
Note: In terms of sampling rate Fs and baud rate FB,
sps = Fs/FB
"""
if ptype.lower() == 'rect':
nn =
|
np.arange(0,sps)
|
numpy.arange
|
import numpy as np
from scipy.io import loadmat
from scipy.optimize import minimize
from sklearn.svm import SVC
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pickle
def preprocess():
"""
Input:
Although this function doesn't have any input, you are required to load
the MNIST data set from file 'mnist_all.mat'.
Output:
train_data: matrix of training set. Each row of train_data contains
feature vector of a image
train_label: vector of label corresponding to each image in the training
set
validation_data: matrix of training set. Each row of validation_data
contains feature vector of a image
validation_label: vector of label corresponding to each image in the
training set
test_data: matrix of training set. Each row of test_data contains
feature vector of a image
test_label: vector of label corresponding to each image in the testing
set
"""
mat = loadmat('mnist_all.mat') # loads the MAT object as a Dictionary
n_feature = mat.get("train1").shape[1]
n_sample = 0
for i in range(10):
n_sample = n_sample + mat.get("train" + str(i)).shape[0]
n_validation = 1000
n_train = n_sample - 10 * n_validation
# Construct validation data
validation_data = np.zeros((10 * n_validation, n_feature))
for i in range(10):
validation_data[i * n_validation:(i + 1) * n_validation, :] = mat.get("train" + str(i))[0:n_validation, :]
# Construct validation label
validation_label = np.ones((10 * n_validation, 1))
for i in range(10):
validation_label[i * n_validation:(i + 1) * n_validation, :] = i * np.ones((n_validation, 1))
# Construct training data and label
train_data =
|
np.zeros((n_train, n_feature))
|
numpy.zeros
|
"""
Fully Implicit, Relativistic, Covariant Particle-in-Cell - 2D3V Electromagnetic - 2 species
Authors: <NAME>, <NAME>, <NAME>
Date: 23 Jan 2021
Copyright 2020 KULeuven
MIT License.
"""
import numpy as np
from scipy.optimize import newton_krylov, minimize
from numpy import cosh, zeros_like, mgrid, zeros, ones
import matplotlib.pyplot as plt
import time
import sys
#TODO: Fix cart to gen and gen to cart
#TODO: Build geom needs to be fixed bc we currently use wrong map
#TODO: Fix curl (dual basis and normalisation)
PATH1 = '/Users/luca_pezzini/Documents/Code/cov_pic-2d/figures/'
# metric fla
perturb = True # perturbed metric tensor
# method flags
NK_method = False
Picard = True
# physics flags
electron_and_ion = True # background of ions when QM1=QM2=-1
stable_plasma = True # stable plasma set up
couter_stream_inst = False # counterstream inst. set up
landau_damping = False # landau damping set u
relativistic = False # relativisitc set up
# plot flags
log_file = True # to save the log file in PATH1
plot_dir = True # to save the plots in PATH1
plot_each_step = False # to visualise each time step (memory consuming)
plot_data = False # to plot data in PATH1
# parameters
nx, ny = 50, 50
nxc, nyc = nx, ny
nxn, nyn = nxc+1, nyc+1
Lx, Ly = 10., 10.
dx, dy = Lx/nxc, Ly/nyc
dt = 0.05
nt = 201
ndpi = 100 # number of dpi per img (stay low 100 for monitoring purpose!)
every = 10 # how often to plot
eps = 0.5 # amplitude of the pertutbation
n = 1. # mode of oscillation
B0 = 0.01 # B field perturbation
# Constaint: nppc must be a squerable number (4, 16, 64) because particles are
# spread over a squared grid
nppc = 0 # number particles per cell per species
V0 = 1. # stream velocity magnitude
alpha = 0.1 # attenuation of VT respect V0
# Species 1
npart1 = nx * ny * nppc
WP1 = 1. # Plasma frequency
QM1 = -1. # Charge/mass ratio
V0x1 = V0 # Stream velocity
V0y1 = V0 # Stream velocity
V0z1 = V0 # Stream velocity
VT1 = alpha*V0 # thermal velocity
# Species 2
npart2 = npart1
WP2 = 1. # Plasma frequency
QM2 = -1. # Charge/mass ratio
V0x2 = V0 # Stream velocity
V0y2 = V0 # Stream velocity
V0z2 = V0 # Stream velocity
VT2 = alpha*V0 # thermal velocity
npart = npart1 + npart2
QM = zeros(npart, np.float64)
QM[0:npart1] = QM1
QM[npart1:npart] = QM2
# INIT PARTICLES
np.random.seed(1)
if nppc==0:
dxp = 0.
dyp = 0.
else:
dxp = Lx/np.sqrt(npart1)
dyp = Ly/np.sqrt(npart1)
xp, yp = mgrid[dxp/2.:Lx-dxp/2.:(np.sqrt(npart1)*1j), dyp/2.:Ly-dyp/2.:(np.sqrt(npart1)*1j)]
x = zeros(npart, np.float64)
x[0:npart1] = xp.reshape(npart1)
x[0:npart1] = Lx*np.random.rand(npart1)
x[npart1:npart] = x[0:npart1]
y = zeros(npart, np.float64)
y[0:npart1] = yp.reshape(npart1)
y[0:npart1] = Ly*np.random.rand(npart1)
y[npart1:npart] = y[0:npart1]
u = zeros(npart, np.float64)
if stable_plasma:
u[0:npart1] = VT1*np.random.randn(npart1)
u[npart1:npart] = VT2*np.random.randn(npart2)
if couter_stream_inst:
u[0:npart1] = V0x1+VT1*np.random.randn(npart1)
u[npart1:npart] = V0x2+VT2*np.random.randn(npart2)
# velocity in the odd position are negative
u[1:npart:2] = - u[1:npart:2]
# to guarantee 50% of +u0 to e- and the other 50% to e+ and same fo -u0
np.random.shuffle(u)
v = zeros(npart, np.float64)
v[0:npart1] = VT1*np.random.randn(npart1)
v[npart1:npart] = VT2*np.random.randn(npart2)
if landau_damping:
v[0:npart1] = V0y1+VT1*np.sin(x[0:npart1]/Lx)
v[npart1:npart] = V0y2+VT2*np.sin(x[npart1:npart]/Lx)
w = zeros(npart, np.float64)
w[0:npart1] = VT1*np.random.randn(npart1)
w[npart1:npart] = VT2*np.random.randn(npart2)
q = zeros(npart, np.float64)
q[0:npart1] = np.ones(npart1)*WP1**2/(QM1*npart1/Lx/Ly)
q[npart1:npart] = np.ones(npart2)*WP2**2/(QM2*npart2/Lx/Ly)
if relativistic:
g = 1./np.sqrt(1.-(u**2+v**2+w**2))
u = u*g
v = v*g
w = w*g
# INIT LOGIC GRID
# grid of left-right faces LR
xiLR, etaLR = mgrid[0.:Lx:(nxn * 1j), dy / 2.:Ly - dy / 2.:(nyc * 1j)]
# grid of up-down faces UD
xiUD, etaUD = mgrid[dx / 2.:Lx - dx / 2.:(nxc * 1j), 0.:Ly:(nyn * 1j)]
# grid of centres c
xiC, etaC = mgrid[dx / 2.:Lx - dx / 2.:(nxc * 1j), dy / 2.:Ly - dy / 2.:(nyc * 1j)]
# grid of corners n
xiN, etaN = mgrid[0.:Lx:(nxn * 1j), 0.:Ly:(nyn * 1j)]
# INIT PHYS GRID
# Unperturbed, the physical grid and logical grid are identical
xLR = xiLR.copy()
xUD = xiUD.copy()
xC = xiC.copy()
xN = xiN.copy()
yLR = etaLR.copy()
yUD = etaUD.copy()
yC = etaC.copy()
yN = etaN.copy()
# INIT FIELDS
# defined on grid LR: Ex, Jx, By
# defined on grid UD: Ey, Jy, Bx
# defined on grid centres c: Ez, Jz, rho
# defined on grid corners n: Bz
E1 = zeros(np.shape(xiLR), np.float64)
E2 = zeros(np.shape(xiUD), np.float64)
E3 = zeros(np.shape(xiC), np.float64)
B1 = zeros(np.shape(xiUD), np.float64)
B2 = zeros(np.shape(xiLR), np.float64)
B3 = zeros(np.shape(xiN), np.float64)
#time series
E1time = zeros(nt+1, np.float64)
E2time = zeros(nt+1, np.float64)
E3time = zeros(nt+1, np.float64)
B1time = zeros(nt+1, np.float64)
B2time = zeros(nt+1, np.float64)
B3time = zeros(nt+1, np.float64)
if nppc==0:
# delta perturbation of magnetic field
B3[int((nx)/2), int((ny)/2)] = B0
# double sinusoidal perturbation
#B3 = B0 * np.sin(2. * np.pi * n * xiN / Lx) * np.sin(2. * np.pi * n * etaN / Ly)
rho = zeros(np.shape(xiC), np.float64)
rho_ion = zeros(np.shape(xiC), np.float64)
# INIT JACOBIAN MATRIX
# defined on grid LR: (j11e, j21e, j31e)E1, J1
# (j11b, j21b, j31b)B1
# defined on grid UD: (J12e, J22e, J32e)E2, J2
# (J12b, J22b, J32b)B2
# defined on grid centres c: (J13e, J23e, J33e)E3, J3
# defined on grid nodes n: (J13b, J23b, J33b)B3
J11_LR, J22_LR, J33_LR = (np.ones_like(xiLR) for i in range(3))
J12_LR, J13_LR, J21_LR, J23_LR, J31_LR, J32_LR = (np.zeros_like(xiLR, np.float64) for i in range(6))
J11_UD, J22_UD, J33_UD = (np.ones_like(xiUD) for i in range(3))
J12_UD, J13_UD, J21_UD, J23_UD, J31_UD, J32_UD = (np.zeros_like(xiUD, np.float64) for i in range(6))
J11_C, J22_C, J33_C = (np.ones_like(xiC) for i in range(3))
J12_C, J13_C, J21_C, J23_C, J31_C, J32_C = (np.zeros_like(xiC, np.float64) for i in range(6))
J11_N, J22_N, J33_N = (np.ones_like(xiN) for i in range(3))
J12_N, J13_N, J21_N, J23_N, J31_N, J32_N = (np.zeros_like(xiN, np.float64) for i in range(6))
"""
if perturb:
# simusoidal perturbation map
J11_LR = 1. + 2. * np.pi * eps * np.cos(2. * np.pi * xiLR / Lx) * np.sin(2. * np.pi * etaLR / Ly) / Lx
J12_LR = 2. * np.pi * eps * np.sin(2. * np.pi * xiLR / Lx) * np.cos(2. * np.pi * etaLR / Ly) / Ly
J13_LR = np.zeros(np.shape(xiLR), np.float64)
J21_LR = 2. * np.pi * eps * np.cos(2. * np.pi * xiLR / Lx) * np.sin(2. * np.pi * etaLR / Ly) / Lx
J22_LR = 1. + 2. * np.pi * eps * np.sin(2. * np.pi * xiLR / Lx) * np.cos(2. * np.pi * etaLR / Ly) / Ly
J23_LR = np.zeros(np.shape(xiLR), np.float64)
J31_LR = np.zeros(np.shape(xiLR), np.float64)
J32_LR = np.zeros(np.shape(xiLR), np.float64)
J33_LR = np.ones(np.shape(xiLR), np.float64)
J11_UD = 1. + 2. * np.pi * eps * np.cos(2. * np.pi * xiUD / Lx) * np.sin(2. * np.pi * etaUD / Ly) / Lx
J12_UD = 2. * np.pi * eps * np.sin(2. * np.pi * xiUD / Lx) * np.cos(2. * np.pi * etaUD / Ly) / Ly
J13_UD = np.zeros(np.shape(xiUD), np.float64)
J21_UD = 2. * np.pi * eps * np.cos(2. * np.pi * xiUD / Lx) * np.sin(2. * np.pi * etaUD / Ly) / Lx
J22_UD = 1. + 2. * np.pi * eps * np.sin(2. * np.pi * xiUD / Lx) * np.cos(2. * np.pi * etaUD / Ly) / Ly
J23_UD = np.zeros(np.shape(xiUD), np.float64)
J31_UD = np.zeros(np.shape(xiUD), np.float64)
J32_UD = np.zeros(np.shape(xiUD), np.float64)
J33_UD = np.ones(np.shape(xiUD), np.float64)
J11_C = 1. + 2. * np.pi * eps * np.cos(2. * np.pi * xiC / Lx) * np.sin(2. * np.pi * etaC / Ly) / Lx
J12_C = 2. * np.pi * eps * np.sin(2. * np.pi * xiC / Lx) * np.cos(2. * np.pi * etaC / Ly) / Ly
J13_C = np.zeros(np.shape(xiC), np.float64)
J21_C = 2. * np.pi * eps * np.cos(2. * np.pi * xiC / Lx) * np.sin(2. * np.pi * etaC / Ly) / Lx
J22_C = 1. + 2. * np.pi * eps * np.sin(2. * np.pi * xiC / Lx) * np.cos(2. * np.pi * etaC / Ly) / Ly
J23_C = np.zeros(np.shape(xiC), np.float64)
J31_C = np.zeros(np.shape(xiC), np.float64)
J32_C = np.zeros(np.shape(xiC), np.float64)
J33_C = np.ones(np.shape(xiC), np.float64)
J11_N = 1. + 2. * np.pi * eps * np.cos(2. * np.pi * xiN / Lx) * np.sin(2. * np.pi * etaN / Ly) / Lx
J12_N = 2. * np.pi * eps * np.sin(2. * np.pi * xiN / Lx) * np.cos(2. * np.pi * etaN / Ly) / Ly
J13_N = np.zeros(np.shape(xiN), np.float64)
J21_N = 2. * np.pi * eps * np.cos(2. * np.pi * xiN / Lx) * np.sin(2. * np.pi * etaN / Ly) / Lx
J22_N = 1. + 2. * np.pi * eps * np.sin(2. * np.pi * xiN / Lx) * np.cos(2. * np.pi * etaN / Ly) / Ly
J23_N = np.zeros(np.shape(xiN), np.float64)
J31_N = np.zeros(np.shape(xiN), np.float64)
J32_N = np.zeros(np.shape(xiN), np.float64)
J33_N = np.ones(np.shape(xiN), np.float64)
else:
# identity matrix
J11_LR = np.ones(np.shape(xiLR), np.float64)
J12_LR = np.zeros(np.shape(xiLR), np.float64)
J13_LR = np.zeros(np.shape(xiLR), np.float64)
J21_LR = np.zeros(np.shape(xiLR), np.float64)
J22_LR = np.ones(np.shape(xiLR), np.float64)
J23_LR = np.zeros(np.shape(xiLR), np.float64)
J31_LR = np.zeros(np.shape(xiLR), np.float64)
J32_LR = np.zeros(np.shape(xiLR), np.float64)
J33_LR = np.ones(np.shape(xiLR), np.float64)
J11_UD = np.ones(np.shape(xiUD), np.float64)
J12_UD = np.zeros(np.shape(xiUD), np.float64)
J13_UD = np.zeros(np.shape(xiUD), np.float64)
J21_UD = np.zeros(np.shape(xiUD), np.float64)
J22_UD = np.ones(np.shape(xiUD), np.float64)
J23_UD = np.zeros(np.shape(xiUD), np.float64)
J31_UD = np.zeros(np.shape(xiUD), np.float64)
J32_UD = np.zeros(np.shape(xiUD), np.float64)
J33_UD = np.ones(np.shape(xiUD), np.float64)
J11_C = np.ones(np.shape(xiC), np.float64)
J12_C = np.zeros(np.shape(xiC), np.float64)
J13_C = np.zeros(np.shape(xiC), np.float64)
J21_C = np.zeros(np.shape(xiC), np.float64)
J22_C = np.ones(np.shape(xiC), np.float64)
J23_C = np.zeros(np.shape(xiC), np.float64)
J31_C = np.zeros(np.shape(xiC), np.float64)
J32_C = np.zeros(np.shape(xiC), np.float64)
J33_C = np.ones(np.shape(xiC), np.float64)
J11_N = np.ones(np.shape(xiN), np.float64)
J12_N = np.zeros(np.shape(xiN), np.float64)
J13_N = np.zeros(np.shape(xiN), np.float64)
J21_N = np.zeros(np.shape(xiN), np.float64)
J22_N = np.ones(np.shape(xiN), np.float64)
J23_N = np.zeros(np.shape(xiN), np.float64)
J31_N = np.zeros(np.shape(xiN), np.float64)
J32_N = np.zeros(np.shape(xiN), np.float64)
J33_N = np.ones(np.shape(xiN), np.float64)
"""
# INIT JACOBIAN DETERMINANT
J_UD = np.ones(np.shape(xiUD), np.float64)
J_LR = np.ones(np.shape(xiLR), np.float64)
J_C = np.ones(np.shape(xiC), np.float64)
J_N = np.ones(np.shape(xiN), np.float64)
# DEFINE INVERSE JACOBIAN MATRIX
# defined on grid LR: (j11, j21, j31)E1, J1
# (j11, j21, j31)B1
# defined on grid UD: (j12, j22, j32)E2, J2
# (j12, j22, j32)B2
# defined on grid centres c: (j12, j23, j33)E3, J3
# defined on grid nodes n: (j12, j23, j33)B3
"""
j11_LR = np.zeros(np.shape(xiLR), np.float64)
j12_LR = np.zeros(np.shape(xiLR), np.float64)
j13_LR = np.zeros(np.shape(xiLR), np.float64)
j21_LR = np.zeros(np.shape(xiLR), np.float64)
j22_LR = np.zeros(np.shape(xiLR), np.float64)
j23_LR = np.zeros(np.shape(xiLR), np.float64)
j31_LR = np.zeros(np.shape(xiLR), np.float64)
j32_LR = np.zeros(np.shape(xiLR), np.float64)
j33_LR = np.zeros(np.shape(xiLR), np.float64)
j11_UD = np.zeros(np.shape(xiUD), np.float64)
j12_UD = np.zeros(np.shape(xiUD), np.float64)
j13_UD = np.zeros(np.shape(xiUD), np.float64)
j21_UD = np.zeros(np.shape(xiUD), np.float64)
j22_UD = np.zeros(np.shape(xiUD), np.float64)
j23_UD = np.zeros(np.shape(xiUD), np.float64)
j31_UD = np.zeros(np.shape(xiUD), np.float64)
j32_UD = np.zeros(np.shape(xiUD), np.float64)
j33_UD = np.zeros(np.shape(xiUD), np.float64)
j11_C = np.zeros(np.shape(xiC), np.float64)
j12_C = np.zeros(np.shape(xiC), np.float64)
j13_C = np.zeros(np.shape(xiC), np.float64)
j21_C = np.zeros(np.shape(xiC), np.float64)
j22_C = np.zeros(np.shape(xiC), np.float64)
j23_C = np.zeros(np.shape(xiC), np.float64)
j31_C = np.zeros(np.shape(xiC), np.float64)
j32_C = np.zeros(np.shape(xiC), np.float64)
j33_C = np.zeros(np.shape(xiC), np.float64)
j11_N = np.zeros(np.shape(xiN), np.float64)
j12_N = np.zeros(np.shape(xiN), np.float64)
j13_N = np.zeros(np.shape(xiN), np.float64)
j21_N = np.zeros(np.shape(xiN), np.float64)
j22_N = np.zeros(np.shape(xiN), np.float64)
j23_N = np.zeros(np.shape(xiN), np.float64)
j31_N = np.zeros(np.shape(xiN), np.float64)
j32_N = np.zeros(np.shape(xiN), np.float64)
j33_N = np.zeros(np.shape(xiN), np.float64)
"""
j11_LR, j22_LR, j33_LR = (np.ones_like(xiLR) for i in range(3))
j12_LR, j13_LR, j21_LR, j23_LR, j31_LR, j32_LR = (np.zeros_like(xiLR, np.float64) for i in range(6))
j11_UD, j22_UD, j33_UD = (np.ones_like(xiUD) for i in range(3))
j12_UD, j13_UD, j21_UD, j23_UD, j31_UD, j32_UD = (np.zeros_like(xiUD, np.float64) for i in range(6))
j11_C, j22_C, j33_C = (np.ones_like(xiC) for i in range(3))
j12_C, j13_C, j21_C, j23_C, j31_C, j32_C = (np.zeros_like(xiC, np.float64) for i in range(6))
j11_N, j22_N, j33_N = (np.ones_like(xiN) for i in range(3))
j12_N, j13_N, j21_N, j23_N, j31_N, j32_N = (np.zeros_like(xiN, np.float64) for i in range(6))
# INIT IVERSE JACOBIAN DETERMINANT
j_LR = np.ones(np.shape(xiLR), np.float64)
j_UD = np.ones(np.shape(xiUD), np.float64)
j_C = np.ones(np.shape(xiC), np.float64)
j_N = np.ones(np.shape(xiN), np.float64)
# INIT METRIC TENSOR
# defined on grid LR: (g11, g21, g31)E1
# (g11, g21, g31)B1
# defined on grid UD: (g12, g22, g32)E2
# (g12, g22, g32)B2
# defined on grid centres c: (g12, g23, g33)E3
# defined on grid corners n: (g12, g23, g33)B3
"""
g11_LR = np.zeros(np.shape(xiLR), np.float64)
g12_LR = np.zeros(np.shape(xiLR), np.float64)
g13_LR = np.zeros(np.shape(xiLR), np.float64)
g21_LR = np.zeros(np.shape(xiLR), np.float64)
g22_LR = np.zeros(np.shape(xiLR), np.float64)
g23_LR = np.zeros(np.shape(xiLR), np.float64)
g31_LR = np.zeros(np.shape(xiLR), np.float64)
g32_LR = np.zeros(np.shape(xiLR), np.float64)
g33_LR = np.zeros(np.shape(xiLR), np.float64)
g11_UD = np.zeros(np.shape(xiUD), np.float64)
g12_UD = np.zeros(np.shape(xiUD), np.float64)
g13_UD = np.zeros(np.shape(xiUD), np.float64)
g21_UD = np.zeros(np.shape(xiUD), np.float64)
g22_UD = np.zeros(np.shape(xiUD), np.float64)
g23_UD = np.zeros(np.shape(xiUD), np.float64)
g31_UD = np.zeros(np.shape(xiUD), np.float64)
g32_UD = np.zeros(np.shape(xiUD), np.float64)
g33_UD = np.zeros(np.shape(xiUD), np.float64)
g11_C = np.zeros(np.shape(xiC), np.float64)
g12_C = np.zeros(np.shape(xiC), np.float64)
g13_C = np.zeros(np.shape(xiC), np.float64)
g21_C = np.zeros(np.shape(xiC), np.float64)
g22_C = np.zeros(np.shape(xiC), np.float64)
g23_C = np.zeros(np.shape(xiC), np.float64)
g31_C = np.zeros(np.shape(xiC), np.float64)
g32_C = np.zeros(np.shape(xiC), np.float64)
g33_C = np.zeros(np.shape(xiC), np.float64)
g11_N = np.zeros(np.shape(xiN), np.float64)
g12_N = np.zeros(np.shape(xiN), np.float64)
g13_N = np.zeros(np.shape(xiN), np.float64)
g21_N = np.zeros(np.shape(xiN), np.float64)
g22_N = np.zeros(np.shape(xiN), np.float64)
g23_N = np.zeros(np.shape(xiN), np.float64)
g31_N = np.zeros(np.shape(xiN), np.float64)
g32_N = np.zeros(np.shape(xiN), np.float64)
g33_N = np.zeros(np.shape(xiN), np.float64)
"""
g11_LR, g22_LR, g33_LR = (np.ones_like(xiLR) for i in range(3))
g12_LR, g13_LR, g21_LR, g23_LR, g31_LR, g32_LR = (np.zeros_like(xiLR, np.float64) for i in range(6))
g11_UD, g22_UD, g33_UD = (np.ones_like(xiUD) for i in range(3))
g12_UD, g13_UD, g21_UD, g23_UD, g31_UD, g32_UD = (np.zeros_like(xiUD, np.float64) for i in range(6))
g11_C, g22_C, g33_C = (np.ones_like(xiC) for i in range(3))
g12_C, g13_C, g21_C, g23_C, g31_C, g32_C = (np.zeros_like(xiC, np.float64) for i in range(6))
g11_N, g22_N, g33_N = (np.ones_like(xiN) for i in range(3))
g12_N, g13_N, g21_N, g23_N, g31_N, g32_N = (np.zeros_like(xiN, np.float64) for i in range(6))
# Divergence
# defined on grid c:
divE = zeros(nt, np.float64)
divE_rho = zeros(nt, np.float64)
# defined on grid n:
divB = zeros(nt, np.float64)
# Energy
energyP = zeros(nt, np.float64) # particles
energyP1 = zeros(nt, np.float64) # particles1
energyP2 = zeros(nt, np.float64) # particles2
energyE = zeros(nt, np.float64) # Total E
energyE1 = zeros(nt, np.float64) # E1 field
energyE2 = zeros(nt, np.float64) # E2 field
energyE3 = zeros(nt, np.float64) # E3 field
energyB = zeros(nt, np.float64) # B field
energyB1 = zeros(nt, np.float64) # B1 field
energyB2 = zeros(nt, np.float64) # B2 field
energyB3 = zeros(nt, np.float64) # B3 field
energyTot = zeros(nt, np.float64) # B field
momentumTot = zeros(nt, np.float64) # Total momentum
if log_file == True:
f = open(PATH1 + 'log_file.txt', 'w')
print('* METRIC:', file=f)
print('- perturbation: ', perturb, file=f)
print('* METHOD:', file=f)
print('- NK method: ', NK_method, file=f)
print('- Picard iteration: ', Picard, file=f)
print('* PHYSICS:', file=f)
print('- perturbation amplitude B0 (if nppc=0): ', B0, file=f)
print('- mode of oscillation (if nppc=0): ', n, file=f)
print('- stable plasma: ', stable_plasma, file=f)
print('- electrons & ions: ', electron_and_ion, file=f)
print('- counter stream inst.: ', couter_stream_inst, file=f)
print('- landau damping: ', landau_damping, file=f)
print('- relativistic: ', relativistic, file=f)
print('* PARAMETER:', file=f)
print('- number nodes (x-axes): ', nx, file=f)
print('- number nodes (y-axes): ', ny, file=f)
print('- length of the domain (x-axes): ', Lx, file=f)
print('- length of the domain (y-axes): ', Ly, file=f)
print('- time steps: ', dt, file=f)
print('- number of time steps: ', nt, file=f)
print('- number of part. per cell: ', nppc, file=f)
print('* SPECIES 1:', file=f)
print('- number of particles : ', npart1, file=f)
print('- plasma frequency : ', WP1, file=f)
print('- charge to mass : ', QM1, file=f)
print('- velocity field: ', '(', V0x1, ',', V0y1, ',', V0z1, ')', file=f)
print('- thermal velocity: ', VT1, file=f)
print('* SPECIES 2:', file=f)
print('- number of particles : ', npart2, file=f)
print('- plasma frequency : ', WP2, file=f)
print('- charge to mass : ', QM2, file=f)
print('- velocity field: ', '(', V0x2, ',', V0y2, ',', V0z2, ')', file=f)
print('- thermal velocity: ', VT2, file=f)
f.close()
def myplot_map(xgrid, ygrid, field, title='a', xlabel='b', ylabel='c'):
'''
To plot the map of a vector fied over a grid.
'''
plt.figure()
plt.pcolor(xgrid, ygrid, field)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.colorbar()
def myplot_func(field, title= 'a', xlabel= 'b', ylabel= 'c'):
'''
To plot the behavior of a scalar fied in time.
'''
plt.figure()
plt.plot(field)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def myplot_particle_map(posx, posy):
'''
To plot particles position over the domain.
'''
plt.figure()
plt.plot(posx[0:npart1],posy[0:npart1],'b.')
plt.plot(posx[npart1:npart],posy[npart1:npart],'r.')
plt.xlim((0,Lx))
plt.ylim((0,Ly))
plt.title('Particles map')
plt.xlabel('x')
plt.ylabel('y')
def myplot_phase_space(pos, vel, limx=(0, 0), limy=(0, 0), xlabel='b', ylabel='c'):
'''To plot the phase space in one direction
'''
plt.figure()
plt.plot(pos[0:npart1], vel[0:npart1], 'b.')
plt.plot(pos[npart1:npart], vel[npart1:npart], 'r.')
plt.xlim(limx)
plt.ylim(limy)
plt.title('Particles map')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def perturbed_inverse_jacobian_elements(x, y):
j11 = 1. + 2. * np.pi * eps * np.cos(2. * np.pi * x / Lx) * np.sin(2. * np.pi * y / Ly) / Lx
j12 = 2. * np.pi * eps * np.sin(2. * np.pi * x / Lx) * np.cos(2. * np.pi * y / Ly) / Ly
j13 = np.zeros(np.shape(x), np.float64)
j21 = 2. * np.pi * eps * np.cos(2. * np.pi * x / Lx) * np.sin(2. * np.pi * y / Ly) / Lx
j22 = 1. + 2. * np.pi * eps * np.sin(2. * np.pi * x / Lx) * np.cos(2. * np.pi * y / Ly) / Ly
j23 = np.zeros(np.shape(x), np.float64)
j31 = np.zeros(np.shape(x), np.float64)
j32 = np.zeros(np.shape(x), np.float64)
j33 = np.ones(np.shape(x), np.float64)
return j11, j12, j13, j21, j22, j23, j31, j32, j33
def define_geometry():
'''To construct the structure of the general geometry (for each grid type):
- Get the Jacobian matrix and its determinant
- Get the inverse Jacobian matrix isolate the components and calculate its determinant
- Get the metric tensor components
'''
for i in range(np.shape(xiLR)[0]):
for j in range(np.shape(xiLR)[1]):
j11_LR[i, j], j12_LR[i, j], j13_LR[i, j], j21_LR[i, j], j22_LR[i, j], j23_LR[i, j], j31_LR[i, j], j32_LR[i, j], j33_LR[i, j] = perturbed_inverse_jacobian_elements(xLR[i,j], yLR[i, j])
inverse_jacobian_LR = np.array([[j11_LR[i, j], j12_LR[i, j], j13_LR[i, j]], [j21_LR[i, j], j22_LR[i, j], j23_LR[i, j]], [j31_LR[i, j], j32_LR[i, j], j33_LR[i, j]]])
jacobian_LR = np.linalg.inv(inverse_jacobian_LR)
j_LR[i, j] = np.linalg.det(inverse_jacobian_LR)
J_LR[i, j] = np.linalg.det(jacobian_LR)
J11_LR[i, j] = jacobian_LR[0, 0]
J21_LR[i, j] = jacobian_LR[1, 0]
J31_LR[i, j] = jacobian_LR[2, 0]
J12_LR[i, j] = jacobian_LR[0, 1]
J22_LR[i, j] = jacobian_LR[1, 1]
J32_LR[i, j] = jacobian_LR[2, 1]
J13_LR[i, j] = jacobian_LR[0, 2]
J23_LR[i, j] = jacobian_LR[1, 2]
J33_LR[i, j] = jacobian_LR[2, 2]
g11_LR[i, j] = jacobian_LR[0, 0] * jacobian_LR[0, 0] + jacobian_LR[1, 0] * jacobian_LR[1, 0] + jacobian_LR[2, 0] * jacobian_LR[2, 0]
g21_LR[i, j] = jacobian_LR[0, 0] * jacobian_LR[0, 1] + jacobian_LR[1, 0] * jacobian_LR[1, 1] + jacobian_LR[2, 0] * jacobian_LR[2, 1]
g31_LR[i, j] = jacobian_LR[0, 0] * jacobian_LR[0, 2] + jacobian_LR[1, 0] * jacobian_LR[1, 2] + jacobian_LR[2, 0] * jacobian_LR[2, 2]
g12_LR[i, j] = jacobian_LR[0, 0] * jacobian_LR[0, 1] + jacobian_LR[1, 0] * jacobian_LR[1, 1] + jacobian_LR[2, 0] * jacobian_LR[2, 1]
g22_LR[i, j] = jacobian_LR[0, 1] * jacobian_LR[0, 1] + jacobian_LR[1, 1] * jacobian_LR[1, 1] + jacobian_LR[2, 1] * jacobian_LR[2, 1]
g32_LR[i, j] = jacobian_LR[0, 1] * jacobian_LR[0, 2] + jacobian_LR[1, 1] * jacobian_LR[1, 2] + jacobian_LR[2, 1] * jacobian_LR[2, 2]
g13_LR[i, j] = jacobian_LR[0, 0] * jacobian_LR[0, 2] + jacobian_LR[1, 0] * jacobian_LR[1, 2] + jacobian_LR[2, 0] * jacobian_LR[2, 2]
g23_LR[i, j] = jacobian_LR[0, 1] * jacobian_LR[0, 2] + jacobian_LR[1, 1] * jacobian_LR[1, 2] + jacobian_LR[2, 1] * jacobian_LR[2, 2]
g33_LR[i, j] = jacobian_LR[0, 2] * jacobian_LR[0, 2] + jacobian_LR[1, 2] * jacobian_LR[1, 2] + jacobian_LR[2, 2] * jacobian_LR[2, 2]
for i in range(np.shape(xiUD)[0]):
for j in range(np.shape(xiUD)[1]):
j11_UD[i, j], j12_UD[i, j], j13_UD[i, j], j21_UD[i, j], j22_UD[i, j], j23_UD[i, j], j31_UD[i, j], j32_UD[i, j], j33_UD[i, j] = perturbed_inverse_jacobian_elements(xUD[i, j], yUD[i, j])
inverse_jacobian_UD = np.array([[j11_UD[i, j], j12_UD[i, j], j13_UD[i, j]], [j21_UD[i, j], j22_UD[i, j], j23_UD[i, j]],[j31_UD[i, j], j32_UD[i, j], j33_UD[i, j]]])
jacobian_UD = np.linalg.inv(inverse_jacobian_UD)
j_UD[i, j] = np.linalg.det(inverse_jacobian_UD)
J_UD[i, j] = np.linalg.det(jacobian_UD)
J11_UD[i, j] = jacobian_UD[0, 0]
J21_UD[i, j] = jacobian_UD[1, 0]
J31_UD[i, j] = jacobian_UD[2, 0]
J12_UD[i, j] = jacobian_UD[0, 1]
J22_UD[i, j] = jacobian_UD[1, 1]
J32_UD[i, j] = jacobian_UD[2, 1]
J13_UD[i, j] = jacobian_UD[0, 2]
J23_UD[i, j] = jacobian_UD[1, 2]
J33_UD[i, j] = jacobian_UD[2, 2]
g11_UD[i, j] = jacobian_UD[0, 0] * jacobian_UD[0, 0] + jacobian_UD[1, 0] * jacobian_UD[1, 0] + jacobian_UD[2, 0] * jacobian_UD[2, 0]
g21_UD[i, j] = jacobian_UD[0, 0] * jacobian_UD[0, 1] + jacobian_UD[1, 0] * jacobian_UD[1, 1] + jacobian_UD[2, 0] * jacobian_UD[2, 1]
g31_UD[i, j] = jacobian_UD[0, 0] * jacobian_UD[0, 2] + jacobian_UD[1, 0] * jacobian_UD[1, 2] + jacobian_UD[2, 0] * jacobian_UD[2, 2]
g12_UD[i, j] = jacobian_UD[0, 0] * jacobian_UD[0, 1] + jacobian_UD[1, 0] * jacobian_UD[1, 1] + jacobian_UD[2, 0] * jacobian_UD[2, 1]
g22_UD[i, j] = jacobian_UD[0, 1] * jacobian_UD[0, 1] + jacobian_UD[1, 1] * jacobian_UD[1, 1] + jacobian_UD[2, 1] * jacobian_UD[2, 1]
g32_UD[i, j] = jacobian_UD[0, 1] * jacobian_UD[0, 2] + jacobian_UD[1, 1] * jacobian_UD[1, 2] + jacobian_UD[2, 1] * jacobian_UD[2, 2]
g13_UD[i, j] = jacobian_UD[0, 0] * jacobian_UD[0, 2] + jacobian_UD[1, 0] * jacobian_UD[1, 2] + jacobian_UD[2, 0] * jacobian_UD[2, 2]
g23_UD[i, j] = jacobian_UD[0, 1] * jacobian_UD[0, 2] + jacobian_UD[1, 1] * jacobian_UD[1, 2] + jacobian_UD[2, 1] * jacobian_UD[2, 2]
g33_UD[i, j] = jacobian_UD[0, 2] * jacobian_UD[0, 2] + jacobian_UD[1, 2] * jacobian_UD[1, 2] + jacobian_UD[2, 2] * jacobian_UD[2, 2]
for i in range(np.shape(xiC)[0]):
for j in range(np.shape(xiC)[1]):
j11_C[i, j], j12_C[i, j], j13_C[i, j], j21_C[i, j], j22_C[i, j], j23_C[i, j], j31_C[i, j], j32_C[i, j], j33_C[i, j] = perturbed_inverse_jacobian_elements(xC[i, j], yC[i, j])
inverse_jacobian_C = np.array([[j11_C[i, j], j12_C[i, j], j13_C[i, j]], [j21_C[i, j], j22_C[i, j], j23_C[i, j]],[j31_C[i, j], j32_C[i, j], j33_C[i, j]]])
jacobian_C = np.linalg.inv(inverse_jacobian_C)
j_C[i, j] = np.linalg.det(inverse_jacobian_C)
J_C[i, j] = np.linalg.det(jacobian_C)
J11_C[i, j] = jacobian_C[0, 0]
J21_C[i, j] = jacobian_C[1, 0]
J31_C[i, j] = jacobian_C[2, 0]
J12_C[i, j] = jacobian_C[0, 1]
J22_C[i, j] = jacobian_C[1, 1]
J32_C[i, j] = jacobian_C[2, 1]
J13_C[i, j] = jacobian_C[0, 2]
J23_C[i, j] = jacobian_C[1, 2]
J33_C[i, j] = jacobian_C[2, 2]
g11_C[i, j] = jacobian_C[0, 0] * jacobian_C[0, 0] + jacobian_C[1, 0] * jacobian_C[1, 0] + jacobian_C[2, 0] * jacobian_C[2, 0]
g21_C[i, j] = jacobian_C[0, 0] * jacobian_C[0, 1] + jacobian_C[1, 0] * jacobian_C[1, 1] + jacobian_C[2, 0] * jacobian_C[2, 1]
g31_C[i, j] = jacobian_C[0, 0] * jacobian_C[0, 2] + jacobian_C[1, 0] * jacobian_C[1, 2] + jacobian_C[2, 0] * jacobian_C[2, 2]
g12_C[i, j] = jacobian_C[0, 0] * jacobian_C[0, 1] + jacobian_C[1, 0] * jacobian_C[1, 1] + jacobian_C[2, 0] * jacobian_C[2, 1]
g22_C[i, j] = jacobian_C[0, 1] * jacobian_C[0, 1] + jacobian_C[1, 1] * jacobian_C[1, 1] + jacobian_C[2, 1] * jacobian_C[2, 1]
g32_C[i, j] = jacobian_C[0, 1] * jacobian_C[0, 2] + jacobian_C[1, 1] * jacobian_C[1, 2] + jacobian_C[2, 1] * jacobian_C[2, 2]
g13_C[i, j] = jacobian_C[0, 0] * jacobian_C[0, 2] + jacobian_C[1, 0] * jacobian_C[1, 2] + jacobian_C[2, 0] * jacobian_C[2, 2]
g23_C[i, j] = jacobian_C[0, 1] * jacobian_C[0, 2] + jacobian_C[1, 1] * jacobian_C[1, 2] + jacobian_C[2, 1] * jacobian_C[2, 2]
g33_C[i, j] = jacobian_C[0, 2] * jacobian_C[0, 2] + jacobian_C[1, 2] * jacobian_C[1, 2] + jacobian_C[2, 2] * jacobian_C[2, 2]
for i in range(np.shape(xiN)[0]):
for j in range(np.shape(xiN)[1]):
j11_N[i, j], j12_N[i, j], j13_N[i, j], j21_N[i, j], j22_N[i, j], j23_N[i, j], j31_N[i, j], j32_N[i, j], j33_N[i, j] = perturbed_inverse_jacobian_elements(xN[i, j], yN[i, j])
inverse_jacobian_N = np.array([[j11_N[i, j], j12_N[i, j], j13_N[i, j]], [j21_N[i, j], j22_N[i, j], j23_N[i, j]],[j31_N[i, j], j32_N[i, j], j33_N[i, j]]])
jacobian_N = np.linalg.inv(inverse_jacobian_N)
j_N[i, j] = np.linalg.det(inverse_jacobian_N)
J_N[i, j] = np.linalg.det(jacobian_N)
J11_N[i, j] = jacobian_N[0, 0]
J21_N[i, j] = jacobian_N[1, 0]
J31_N[i, j] = jacobian_N[2, 0]
J12_N[i, j] = jacobian_N[0, 1]
J22_N[i, j] = jacobian_N[1, 1]
J32_N[i, j] = jacobian_N[2, 1]
J13_N[i, j] = jacobian_N[0, 2]
J23_N[i, j] = jacobian_N[1, 2]
J33_N[i, j] = jacobian_N[2, 2]
g11_N[i, j] = jacobian_N[0, 0] * jacobian_N[0, 0] + jacobian_N[1, 0] * jacobian_N[1, 0] + jacobian_N[2, 0] * jacobian_N[2, 0]
g21_N[i, j] = jacobian_N[0, 0] * jacobian_N[0, 1] + jacobian_N[1, 0] * jacobian_N[1, 1] + jacobian_N[2, 0] * jacobian_N[2, 1]
g31_N[i, j] = jacobian_N[0, 0] * jacobian_N[0, 2] + jacobian_N[1, 0] * jacobian_N[1, 2] + jacobian_N[2, 0] * jacobian_N[2, 2]
g12_N[i, j] = jacobian_N[0, 0] * jacobian_N[0, 1] + jacobian_N[1, 0] * jacobian_N[1, 1] + jacobian_N[2, 0] * jacobian_N[2, 1]
g22_N[i, j] = jacobian_N[0, 1] * jacobian_N[0, 1] + jacobian_N[1, 1] * jacobian_N[1, 1] + jacobian_N[2, 1] * jacobian_N[2, 1]
g32_N[i, j] = jacobian_N[0, 1] * jacobian_N[0, 2] + jacobian_N[1, 1] * jacobian_N[1, 2] + jacobian_N[2, 1] * jacobian_N[2, 2]
g13_N[i, j] = jacobian_N[0, 0] * jacobian_N[0, 2] + jacobian_N[1, 0] * jacobian_N[1, 2] + jacobian_N[2, 0] * jacobian_N[2, 2]
g23_N[i, j] = jacobian_N[0, 1] * jacobian_N[0, 2] + jacobian_N[1, 1] * jacobian_N[1, 2] + jacobian_N[2, 1] * jacobian_N[2, 2]
g33_N[i, j] = jacobian_N[0, 2] * jacobian_N[0, 2] + jacobian_N[1, 2] * jacobian_N[1, 2] + jacobian_N[2, 2] * jacobian_N[2, 2]
def cartesian_to_general(cartx, carty, cartz, fieldtype):
''' To convert fields from Cartesian coord. (x, y, z) to General Skew coord. (xi, eta, zeta)
fieltype=='E' or 'J': input -> LR,UD,c, output -> LR,UD,c
fieltype=='B': input -> UD,LR,n, output -> UD,LR,n
'''
if (fieldtype == 'E') or (fieldtype == 'J'):
carty_LR = avg(avg(carty, 'UD2C'), 'C2LR')
cartz_LR = avg(cartz, 'C2LR')
cartx_UD = avg(avg(cartx, 'LR2C'), 'C2UD')
cartz_UD = avg(cartz, 'C2UD')
cartx_C = avg(cartx, 'LR2C')
carty_C = avg(carty, 'UD2C')
genx1 = J11_LR * cartx + J12_LR * carty_LR + J13_LR * cartz_LR
genx2 = J21_UD * cartx_UD + J22_UD * carty + J23_UD * cartz_UD
genx3 = J31_C * cartx_C + J32_C * carty_C + J33_C * cartz
elif fieldtype == 'B':
carty_UD = avg(avg(carty, 'LR2C'), 'C2UD')
cartz_UD = avg(cartz, 'N2UD')
cartx_LR = avg(avg(cartx, 'UD2C'), 'C2LR')
cartz_LR = avg(cartz, 'N2LR')
cartx_N = avg(cartx, 'UD2N')
carty_N = avg(carty, 'LR2N')
genx1 = J11_UD * cartx + J12_UD * carty_UD + J13_UD * cartz_UD
genx2 = J21_LR * cartx_LR + J22_LR * carty + J23_LR * cartz_LR
genx3 = J31_N * cartx_N + J32_N * carty_N + J33_N * cartz
return genx1, genx2, genx3
def general_to_cartesian(genx1, genx2, genx3, fieldtype):
''' To convert fields from General coord. (xi, eta, zeta) to Cartesian coord (x, y, z)
fieltype=='E' or 'J': input -> LR,UD,c, output -> LR,UD,c
fieltype=='B': input -> UD,LR,n, output -> UD,LR,n
'''
if (fieldtype == 'E') or (fieldtype == 'J'):
genx2_LR = avg(avg(genx2, 'UD2C'), 'C2LR')
genx3_LR = avg(genx3, 'C2LR')
genx1_UD = avg(avg(genx1, 'LR2C'), 'C2UD')
genx3_UD = avg(genx3, 'C2UD')
genx1_C = avg(genx1, 'LR2C')
genx2_C = avg(genx2, 'UD2C')
cartx = j11_LR * genx1 + j12_LR * genx2_LR + j13_LR * genx3_LR
carty = j21_UD * genx1_UD + j22_UD * genx2 + j23_UD * genx3_UD
cartz = j31_C * genx1_C + j32_C * genx2_C + j33_C * genx3
elif fieldtype == 'B':
genx2_UD = avg(avg(genx2, 'LR2C'), 'C2UD')
genx3_UD = avg(genx3, 'N2UD')
genx1_LR = avg(avg(genx1, 'UD2C'), 'C2LR')
genx3_LR = avg(genx3, 'N2LR')
genx1_N = avg(genx1, 'UD2N')
genx2_N = avg(genx2, 'LR2N')
cartx = j11_UD * genx1 + j12_UD * genx2_UD + j13_UD * genx3_UD
carty = j21_LR * genx1_LR + j22_LR * genx2 + j23_LR * genx3_LR
cartz = j31_N * genx1_N + j32_N * genx2_N + j33_N * genx3
return cartx, carty, cartz
def cartesian_to_general_particle(cartx, carty):
'''To convert the particles position from Cartesian geom. to General geom.
'''
genx1 = cartx + eps*np.sin(2*np.pi*cartx/Lx)*np.sin(2*np.pi*carty/Ly)
genx2 = carty + eps*np.sin(2*np.pi*cartx/Lx)*np.sin(2*np.pi*carty/Ly)
return genx1, genx2
def diff_for_inversion(param, target):
xi, eta = cartesian_to_general_particle(param[0], param[1])
return (xi - target[0]) ** 2 + (eta - target[1]) ** 2
def cart_grid_calculator(xi, eta):
if xi.shape != eta.shape:
raise ValueError
x = np.zeros_like(xi)
y = np.zeros_like(eta)
init0 = xi.copy()
init1 = eta.copy()
init = np.stack((init0, init1), axis=-1)
target = np.stack((xi, eta), axis=-1)
bnds = ((None, None), (None, None)) #use this to set bounds on the values of x and y
for i in range(xi.shape[0]):
for j in range(xi.shape[1]):
res = minimize(lambda param: diff_for_inversion(param, target[i, j, :]), init[i, j, :], bounds=bnds, tol=1e-16)
x[i, j] = res.x[0]
y[i, j] = res.x[1]
return x, y
def dirder(field, dertype):
''' To take the directional derivative of a quantity
dertype defines input/output grid type and direction
'''
global nxn, nyn, nxc, nyc, dx, dy
if dertype == 'C2UD': # centres to UD faces, y-derivative
derfield = zeros((nxc, nyn), np.float64)
derfield[0:nxc, 1:nyn-1] = (field[0:nxc, 1:nyc]-field[0:nxc, 0:nyc-1])/dy
derfield[0:nxc, 0] = (field[0:nxc, 0]-field[0:nxc, nyc-1])/dy
derfield[0:nxc, nyn-1] = derfield[0:nxc, 0]
elif dertype == 'C2LR': # centres to LR faces, x-derivative
derfield = zeros((nxn, nyc), np.float64)
derfield[1:nxn-1, 0:nyc] = (field[1:nxc, 0:nyc]-field[0:nxc-1, 0:nyc])/dx
derfield[0, 0:nyc] = (field[0, 0:nyc]-field[nxc-1, 0:nyc])/dx
derfield[nxn-1, 0:nyc] = derfield[0, 0:nyc]
elif dertype == 'UD2N': # UD faces to nodes, x-derivative
derfield = zeros((nxn, nyn), np.float64)
derfield[1:nxn-1, 0:nyn] = (field[1:nxc, 0:nyn]-field[0:nxc-1, 0:nyn])/dx
derfield[0, 0:nyn] = (field[0, 0:nyn]-field[nxc-1, 0:nyn])/dx
derfield[nxn-1, 0:nyn] = derfield[0, 0:nyn]
elif dertype == 'LR2N': # LR faces to nodes, y-derivative
derfield = zeros((nxn, nyn), np.float64)
derfield[0:nxn, 1:nyn-1] = (field[0:nxn, 1:nyc]-field[0:nxn, 0:nyc-1])/dy
derfield[0:nxn, 0] = (field[0:nxn, 0]-field[0:nxn, nyc-1])/dy
derfield[0:nxn, nyn-1] = derfield[0:nxn, 0]
elif dertype == 'N2LR': # nodes to LR faces, y-derivative
derfield = zeros((nxn, nyc), np.float64)
derfield[0:nxn, 0:nyc] = (field[0:nxn, 1:nyn]-field[0:nxn, 0:nyn-1])/dy
elif dertype == 'N2UD': # nodes to UD faces, x-derivative
derfield = zeros((nxc, nyn), np.float64)
derfield[0:nxc, 0:nyn] = (field[1:nxn, 0:nyn]-field[0:nxn-1, 0:nyn])/dx
elif dertype == 'LR2C': # LR faces to centres, x-derivative
derfield = zeros((nxc, nyc), np.float64)
derfield[0:nxc, 0:nyc] = (field[1:nxn, 0:nyc]-field[0:nxn-1, 0:nyc])/dx
elif dertype == 'UD2C': # UD faces to centres, y-derivative
derfield = zeros((nxc, nyc), np.float64)
derfield[0:nxc, 0:nyc] = (field[0:nxc, 1:nyn]-field[0:nxc, 0:nyn-1])/dy
return derfield
def avgC2N(fieldC):
''' To average a 2D field defined on centres to the nodes
'''
global nx,ny
fieldN = zeros((nx,ny),np.float64)
fieldN[1:nx-1,1:ny-1] = (fieldC[0:nx-2,0:ny-2]+fieldC[1:nx-1,0:ny-2]+fieldC[0:nx-2,1:ny-1]+fieldC[1:nx-1,1:ny-1])/4.
fieldN[0,1:ny-1] = (fieldC[0,0:ny-2]+fieldC[0,1:ny-1]+fieldC[nx-2,0:ny-2]+fieldC[nx-2,1:ny-1])/4.
fieldN[nx-1,1:ny-1] = fieldN[0,1:ny-1]
fieldN[1:nx-1,0] = (fieldC[0:nx-2,0]+fieldC[1:nx-1,0]+fieldC[0:nx-2,ny-2]+fieldC[1:nx-1,ny-2])/4.
fieldN[1:nx-1,ny-1] = fieldN[1:nx-1,0]
fieldN[0,0] = (fieldC[0,0]+fieldC[0,ny-2]+fieldC[nx-2,0]+fieldC[nx-2,ny-2])/4.
fieldN[0,ny-1] = fieldN[0,0]
fieldN[nx-1,0] = fieldN[0,0]
fieldN[nx-1,ny-1] = fieldN[0,0]
return fieldN
def avg(field, avgtype):
''' To take the average of a quantity
avgtype defines input/output grid type and direction
'''
global nxn, nyn, nxc, nyc, dx, dy
if avgtype == 'C2UD': # centres to UD faces, y-average
avgfield = zeros((nxc, nyn), np.float64)
avgfield[0:nxc, 1:nyn-1] = (field[0:nxc, 1:nyc]+field[0:nxc, 0:nyc-1])/2.
avgfield[0:nxc, 0] = (field[0:nxc, 0]+field[0:nxc, nyc-1])/2.
avgfield[0:nxc, nyn-1] = avgfield[0:nxc, 0]
elif avgtype == 'C2LR': # centres to LR faces, x-average
avgfield = zeros((nxn, nyc), np.float64)
avgfield[1:nxn-1, 0:nyc] = (field[1:nxc, 0:nyc]+field[0:nxc-1, 0:nyc])/2.
avgfield[0, 0:nyc] = (field[0, 0:nyc]+field[nxc-1, 0:nyc])/2.
avgfield[nxn-1, 0:nyc] = avgfield[0, 0:nyc]
elif avgtype == 'UD2N': # UD faces to nodes, x-average
avgfield = zeros((nxn, nyn), np.float64)
avgfield[1:nxn-1, 0:nyn] = (field[1:nxc, 0:nyn]+field[0:nxc-1, 0:nyn])/2.
avgfield[0, 0:nyn] = (field[0, 0:nyn]+field[nxc-1, 0:nyn])/2.
avgfield[nxn-1, 0:nyn] = avgfield[0, 0:nyn]
elif avgtype == 'LR2N': # LR faces to nodes, y-average
avgfield = zeros((nxn, nyn), np.float64)
avgfield[0:nxn, 1:nyn-1] = (field[0:nxn, 1:nyc]+field[0:nxn, 0:nyc-1])/2.
avgfield[0:nxn, 0] = (field[0:nxn, 0]+field[0:nxn, nyc-1])/2.
avgfield[0:nxn, nyn-1] = avgfield[0:nxn, 0]
elif avgtype == 'N2LR': # nodes to LR faces, y-average
avgfield = zeros((nxn, nyc), np.float64)
avgfield[0:nxn, 0:nyc] = (field[0:nxn, 1:nyn]+field[0:nxn, 0:nyn-1])/2.
elif avgtype == 'N2UD': # nodes to UD faces, x-average
avgfield = zeros((nxc, nyn), np.float64)
avgfield[0:nxc, 0:nyn] = (field[1:nxn, 0:nyn]+field[0:nxn-1, 0:nyn])/2.
elif avgtype == 'LR2C': # LR faces to centres, x-average
avgfield = zeros((nxc, nyc), np.float64)
avgfield[0:nxc, 0:nyc] = (field[1:nxn, 0:nyc]+field[0:nxn-1, 0:nyc])/2.
elif avgtype == 'UD2C': # UD faces to centres, y-average
avgfield = zeros((nxc, nyc), np.float64)
avgfield[0:nxc, 0:nyc] = (field[0:nxc, 1:nyn]+field[0:nxc, 0:nyn-1])/2.
return avgfield
def curl(fieldx, fieldy, fieldz, fieldtype):
''' To take the curl of either E or B in General coord.
curl^i = 1/J·(d_j·g_kq·A^q - d_k·g_jq·A^q)
fieltype=='E': input -> LR,UD,c, output -> UD,LR,n
fieltype=='B': input -> UD,LR,n, output -> LR,UD,c
'''
if fieldtype == 'E':
fieldx_C = avg(fieldx, 'LR2C')
fieldy_C = avg(fieldy, 'UD2C')
fieldx_UD = avg(avg(fieldx, 'LR2C'), 'C2UD')
fieldz_UD = avg(fieldz, 'C2UD')
fieldy_LR = avg(avg(fieldy, 'UD2C'), 'C2LR')
fieldz_LR = avg(fieldz, 'C2LR')
curl_x = dirder(g31_C * fieldx_C + g32_C * fieldy_C + g33_C * fieldz, 'C2UD')/J_UD
curl_y = - dirder(g31_C * fieldx_C + g32_C * fieldy_C + g33_C * fieldz, 'C2LR')/J_LR
curl_z = dirder(g21_UD * fieldx_UD + g22_UD * fieldy + g23_UD * fieldz_UD, 'UD2N')/J_N\
- dirder(g11_LR * fieldx + g12_LR * fieldy_LR + g13_LR * fieldz_LR, 'LR2N')/J_N
elif fieldtype == 'B':
fieldx_N = avg(fieldx, 'UD2N')
fieldy_N = avg(fieldy, 'LR2N')
fieldx_LR = avg(avg(fieldx, 'UD2N'), 'N2LR')
fieldz_LR = avg(fieldz, 'N2LR')
fieldy_UD = avg(avg(fieldy, 'LR2N'), 'N2UD')
fieldz_UD = avg(fieldz, 'N2UD')
curl_x = dirder(g31_N * fieldx_N + g32_N * fieldy_N + g33_N * fieldz, 'N2LR')/J_LR
curl_y = - dirder(g31_N * fieldx_N + g32_N * fieldy_N + g33_N * fieldz, 'N2UD')/J_UD
curl_z = dirder(g21_LR * fieldx_LR + g22_LR * fieldy + g23_LR * fieldz_LR, 'LR2C')/J_C\
- dirder(g11_UD * fieldx + g12_UD * fieldy_UD + g13_UD * fieldz_UD, 'UD2C')/J_C
return curl_x, curl_y, curl_z
def div(fieldx, fieldy, fieldz, fieldtype):
''' To take the divergence of either E or B in in General coord.
div = 1/J·d_i(J·A^i)
fieltype=='E': input -> LR,UD,c, output -> c,c,c
fieltype=='B': input -> UD,LR,n, output -> n,n,n
'''
if fieldtype == 'E':
div = (dirder(J_LR * fieldx, 'LR2C') + dirder(J_UD * fieldy, 'UD2C'))/J_C
elif fieldtype == 'B':
div = (dirder(J_UD * fieldx, 'UD2N') + dirder(J_LR * fieldy, 'LR2N'))/J_N
return div
def phys_to_krylov(E1k, E2k, E3k, uk, vk, wk):
''' To populate the Krylov vector using physiscs vectors
E1,E2,E3 are 2D arrays
u,v,w of dimensions npart
'''
global nxc,nyc,nxn,nyn,npart
ykrylov = zeros(nxn*nyc+nxc*nyn+nxc*nyc+3*npart,np.float64)
ykrylov[0:nxn*nyc] = E1k.reshape(nxn*nyc)
ykrylov[nxn*nyc:nxn*nyc+nxc*nyn] = E2k.reshape(nxc*nyn)
ykrylov[nxn*nyc+nxc*nyn:nxn*nyc+nxc*nyn+nxc*nyc] = E3k.reshape(nxc*nyc)
ykrylov[nxn*nyc+nxc*nyn+nxc*nyc:nxn*nyc+nxc*nyn+nxc*nyc+npart] = uk
ykrylov[nxn*nyc+nxc*nyn+nxc*nyc+npart:nxn*nyc+nxc*nyn+nxc*nyc+2*npart] = vk
ykrylov[nxn*nyc+nxc*nyn+nxc*nyc+2*npart:nxn*nyc+nxc*nyn+nxc*nyc+3*npart] = wk
return ykrylov
def krylov_to_phys(xkrylov):
''' To populate the physiscs vectors using the Krylov space vector
E1,E2,E3 are 2D arrays of dimension (nx,ny)
unew,vnew,wnew of dimensions npart1+npart2
'''
global nx,ny,npart
E1k = np.reshape(xkrylov[0:nxn*nyc],(nxn,nyc))
E2k = np.reshape(xkrylov[nxn*nyc:nxn*nyc+nxc*nyn],(nxc,nyn))
E3k = np.reshape(xkrylov[nxn*nyc+nxc*nyn:nxn*nyc+nxc*nyn+nxc*nyc],(nxc,nyc))
uk = xkrylov[nxn*nyc+nxc*nyn+nxc*nyc:nxn*nyc+nxc*nyn+nxc*nyc+npart]
vk = xkrylov[nxn*nyc+nxc*nyn+nxc*nyc+npart:nxn*nyc+nxc*nyn+nxc*nyc+2*npart]
wk = xkrylov[nxn*nyc+nxc*nyn+nxc*nyc+2*npart:nxn*nyc+nxc*nyn+nxc*nyc+3*npart]
return E1k, E2k, E3k, uk, vk, wk
def residual(xkrylov):
''' Calculation of the residual of the equations
This is the most important part: the definition of the problem
'''
#global E1, E2, E3, B1, B2, B3, x, y, u, v, w, QM, q, npart, dt
E1new, E2new, E3new, unew, vnew, wnew = krylov_to_phys(xkrylov)
# t: -1/2 0 1/2 1
# B Bbar Bnew
# E Enew
# u ubar unew
# x xbar xnew
# curlE curlB
# J/xgen1
# Ep/Bp/xgen2
xnew = x + unew*dt
ynew = y + vnew*dt
xbar = (xnew + x)/2.
ybar = (ynew + y)/2.
xbar = xbar % Lx
ybar = ybar % Ly
if perturb:
xgen1, ygen1 = cartesian_to_general_particle(xbar, ybar)
else:
xgen1, ygen1 = xbar, ybar
Jx, Jy, Jz = particle_to_grid_J(xgen1, ygen1, unew, vnew, wnew, q)
J1, J2, J3 = cartesian_to_general(Jx, Jy, Jz, 'J')
curlE1, curlE2, curlE3 = curl(E1, E2, E3, 'E')
B1new = B1 - dt * curlE1
B2new = B2 - dt * curlE2
B3new = B3 - dt * curlE3
curlB1, curlB2, curlB3 = curl(B1new, B2new, B3new,'B')
resE1 = E1new - E1 - dt * curlB1 + dt * J1
resE2 = E2new - E2 - dt * curlB2 + dt * J2
resE3 = E3new - E3 - dt * curlB3 + dt * J3
B1bar = (B1new + B1) / 2.
B2bar = (B2new + B2) / 2.
B3bar = (B3new + B3) / 2.
ubar = (unew + u) / 2.
vbar = (vnew + v) / 2.
wbar = (wnew + w) / 2.
Ex, Ey, Ez = general_to_cartesian(E1, E2, E3, 'E')
Bx, By, Bz = general_to_cartesian(B1bar, B2bar, B3bar, 'B')
if perturb:
xgen2, ygen2 = cartesian_to_general_particle(x, y)
else:
xgen2, ygen2 = x, y
Exp = grid_to_particle(xgen2, ygen2, Ex, 'LR')
Eyp = grid_to_particle(xgen2, ygen2, Ey, 'UD')
Ezp = grid_to_particle(xgen2, ygen2, Ez, 'C')
Bxp = grid_to_particle(xgen2, ygen2, Bx, 'UD')
Byp = grid_to_particle(xgen2, ygen2, By, 'LR')
Bzp = grid_to_particle(xgen2, ygen2, Bz, 'N')
resu = unew - u - QM * (Exp + vbar * Bzp - wbar * Byp) * dt
resv = vnew - v - QM * (Eyp - ubar * Bzp + wbar * Bxp) * dt
resw = wnew - w - QM * (Ezp + ubar * Byp - vbar * Bxp) * dt
ykrylov = phys_to_krylov(resE1, resE2, resE3, resu, resv, resw)
return ykrylov
"""
global E1, E2, E3, B1, B2, B3, x, y, u, v, w, QM, q, npart, dt
E1new, E2new, E3new, unew, vnew, wnew = krylov_to_phys(xkrylov)
# method: YEE Fabio's YEE
# u: t = n+1/2 -> t = n
# unew: t = n+3/2 -> t = n+1
# ubar: t = n+1 -> t = n+1/2 (=J1)
ubar = (unew + u)/2.
vbar = (vnew + v)/2.
wbar = (wnew + w)/2.
if relativistic:
gold = np.sqrt(1.+u**2+v**2+w**2)
gnew = np.sqrt(1.+unew**2+vnew**2+wnew**2)
gbar = (gold+gnew)/2.
else:
gbar = np.ones(npart)
# x: t = n -> t = n
# xnew: t = n+1 -> t = n+1
# xbar : t = n+1/2 -> t = n+1/2
xbar = x + ubar/gbar*dt/2.
ybar = y + vbar/gbar*dt/2.
# periodic BC: modulo operator "%" which finds the reminder (ex. 10.1%10=0.1)
xbar = xbar%Lx
ybar = ybar%Ly
# conversion to general geom.
if perturb:
xgenbar, ygenbar = cartesian_to_general_particle(xbar, ybar)
else:
xgenbar, ygenbar = xbar, ybar
Jx, Jy, Jz = particle_to_grid_J(xgenbar,ygenbar,ubar/gbar,vbar/gbar,wbar/gbar,q)
# J1: t = n+1/2 -> t = n+1/2 (=curlB1)
J1, J2, J3 = cartesian_to_general(Jx, Jy, Jz, 'J')
# E1: t = n+1/2 -> t = n
# E1new: t = n+3/2 -> t = n+1
# E1bar: t = n+1 -> t = n+1/2
E1bar = (E1new + E1)/2.
E2bar = (E2new + E2)/2.
E3bar = (E3new + E3)/2.
# curlE1: t = n -> t = n+1/2 (=E1bar)
curlE1, curlE2, curlE3 = curl(E1bar,E2bar,E3bar,'E')
# B1: t = -1/2 -> t = n
# B1bar: t = 1/2 -> t = n+1/2
B1bar = B1 - dt/2.*curlE1
B2bar = B2 - dt/2.*curlE2
B3bar = B3 - dt/2.*curlE3
#curlB1: t = n+1/2 -> t = n+1/2 (=B1bar)
curlB1, curlB2, curlB3 = curl(B1bar,B2bar,B3bar,'B')
#res: t = n+1/2 -> t = n+1/2 (=curlB1,J1)
resE1 = E1new - E1 - dt*curlB1 + dt*J1
resE2 = E2new - E2 - dt*curlB2 + dt*J2
resE3 = E3new - E3 - dt*curlB3 + dt*J3
# Ex: t = n -> t = n+1/2 (=E1bar)
Ex, Ey, Ez = general_to_cartesian(E1bar, E2bar, E3bar, 'E')
# Bx: t = n+1/2 -> t = n+1/2 (=B1bar)
Bx, By, Bz = general_to_cartesian(B1bar, B2bar, B3bar, 'B')
# Exp t = n -> t = n+1/2 (=E1bar)
Exp = grid_to_particle(xgenbar,ygenbar,Ex,'LR')
Eyp = grid_to_particle(xgenbar,ygenbar,Ey,'UD')
Ezp = grid_to_particle(xgenbar,ygenbar,Ez,'C')
# Bxp t = n+1/2 -> t = n+1/2 (=B1bar)
Bxp = grid_to_particle(xgenbar,ygenbar,Bx,'UD')
Byp = grid_to_particle(xgenbar,ygenbar,By,'LR')
Bzp = grid_to_particle(xgenbar,ygenbar,Bz,'N')
# resu: t = n -> t = n+1/2 (=Exp,Bxp,ubar)
resu = unew - u - QM * (Exp + vbar/gbar*Bzp - wbar/gbar*Byp)*dt
resv = vnew - v - QM * (Eyp - ubar/gbar*Bzp + wbar/gbar*Bxp)*dt
resw = wnew - w - QM * (Ezp + ubar/gbar*Byp - vbar/gbar*Bxp)*dt
ykrylov = phys_to_krylov(resE1,resE2,resE3,resu,resv,resw)
return ykrylov
"""
def grid_to_particle(xk, yk, f, gridtype):
''' Interpolation of grid quantity to particle
'''
global dx, dy, nx, ny, npart
fp = zeros(npart,np.float64)
fx, fy = 0., 0.
if gridtype=='LR':
fy = dy/2.
elif gridtype=='UD':
fx = dx/2.
elif gridtype=='C':
fx, fy = dx/2., dy/2.
for i in range(npart):
# interpolate field f from grid to particle */
xa = (xk[i]-fx)/dx
ya = (yk[i]-fy)/dy
i1 = int(np.floor(xa))
i2 = i1 + 1
j1 = int(np.floor(ya))
j2 = j1 + 1
wx2 = xa - np.float64(i1)
wx1 = 1.0 - wx2
wy2 = ya - np.float64(j1)
wy1 = 1.0 - wy2
if gridtype=='LR':
j1, j2 = j1%nyc, j2%nyc
elif gridtype=='UD':
i1, i2 = i1%nxc, i2%nxc
elif gridtype=='C':
i1, i2 = i1%nxc, i2%nxc
j1, j2 = j1%nyc, j2%nyc
fp[i] = wx1* wy1 * f[i1,j1] + wx2* wy1 * f[i2,j1] + wx1* wy2 * f[i1,j2] + wx2* wy2 * f[i2,j2]
return fp
def particle_to_grid_rho(xk, yk, q):
''' Interpolation particle to grid - charge rho -> c
'''
global dx, dy, nx, ny, npart, rho_ion
rho = zeros(np.shape(xiC), np.float64)
for i in range(npart):
xa = (xk[i]-dx/2.)/dx
ya = (yk[i]-dy/2.)/dy
i1 = int(np.floor(xa))
i2 = i1 + 1
j1 = int(np.floor(ya))
j2 = j1 + 1
wx2 = xa - np.float64(i1)
wx1 = 1.0 - wx2
wy2 = ya - np.float64(j1)
wy1 = 1.0 - wy2
i1, i2 = i1%nxc, i2%nxc
j1, j2 = j1%nyc, j2%nyc
rho[i1, j1] += wx1 * wy1 * q[i]
rho[i2, j1] += wx2 * wy1 * q[i]
rho[i1, j2] += wx1 * wy2 * q[i]
rho[i2, j2] += wx2 * wy2 * q[i]
if electron_and_ion:
rho += rho_ion
return rho
def particle_to_grid_J(xk, yk, uk, vk, wk, qk):
''' Interpolation particle to grid - current -> LR, UD, c
'''
global dx, dy, nxc, nyc, nxn, nyn, npart
Jx = zeros(np.shape(xiLR), np.float64)
Jy = zeros(np.shape(xiUD), np.float64)
Jz = zeros(np.shape(xiC), np.float64)
for i in range(npart):
# interpolate p -> LR
xa = xk[i]/dx
ya = (yk[i]-dy/2.)/dy
i1 = int(np.floor(xa))
i2 = i1 + 1
if i2==nxn-1:
i2=0
j1 = int(np.floor(ya))
j2 = j1 + 1
wx2 = xa - np.float64(i1)
wx1 = 1.0 - wx2
wy2 = ya - np.float64(j1)
wy1 = 1.0 - wy2
j1, j2 = j1%nyc, j2%nyc
Jx[i1,j1] += wx1* wy1 * qk[i] * uk[i]/dx/dy
Jx[i2,j1] += wx2* wy1 * qk[i] * uk[i]/dx/dy
Jx[i1,j2] += wx1* wy2 * qk[i] * uk[i]/dx/dy
Jx[i2,j2] += wx2* wy2 * qk[i] * uk[i]/dx/dy
# interpolate p -> UD
xa = (xk[i]-dx/2.)/dx
ya = yk[i]/dy
i1 = int(np.floor(xa))
i2 = i1 + 1
j1 = int(np.floor(ya))
j2 = j1 + 1
if j2==nyn-1:
j2=0
wx2 = xa - np.float64(i1)
wx1 = 1.0 - wx2
wy2 = ya - np.float64(j1)
wy1 = 1.0 - wy2
i1, i2 = i1%nxc, i2%nxc
Jy[i1,j1] += wx1* wy1 * qk[i] * vk[i]/dx/dy
Jy[i2,j1] += wx2* wy1 * qk[i] * vk[i]/dx/dy
Jy[i1,j2] += wx1* wy2 * qk[i] * vk[i]/dx/dy
Jy[i2,j2] += wx2* wy2 * qk[i] * vk[i]/dx/dy
# interpolate p -> c
xa = (xk[i]-dx/2.)/dx
ya = (yk[i]-dy/2.)/dy
i1 = int(np.floor(xa))
i2 = i1 + 1
j1 = int(np.floor(ya))
j2 = j1 + 1
wx2 = xa - np.float64(i1)
wx1 = 1.0 - wx2
wy2 = ya - np.float64(j1)
wy1 = 1.0 - wy2
i1, i2 = i1%nxc, i2%nxc
j1, j2 = j1%nyc, j2%nyc
Jz[i1,j1] += wx1* wy1 * qk[i] * wk[i]/dx/dy
Jz[i2,j1] += wx2* wy1 * qk[i] * wk[i]/dx/dy
Jz[i1,j2] += wx1* wy2 * qk[i] * wk[i]/dx/dy
Jz[i2,j2] += wx2* wy2 * qk[i] * wk[i]/dx/dy
Jx[nxn-1,:] = Jx[0,:]
Jy[:,nyn-1] = Jy[:,0]
return Jx, Jy, Jz
# Initialisation of geometry
print('Initialising geometry ...')
start_geom = time.time()
if perturb:
xLR, yLR = cart_grid_calculator(xiLR, etaLR)
xUD, yUD = cart_grid_calculator(xiUD, etaUD)
xC, yC = cart_grid_calculator(xiC, etaC)
xN, yN = cart_grid_calculator(xiN, etaN)
define_geometry()
stop_geom = time.time()
print(g11_C)
print(g12_C)
print(g13_C)
print(g22_C)
print(g23_C)
print(g33_C)
#if perturb:
# xgen, ygen = cartesian_to_general_particle(x, y)
#else:
# xgen, ygen = x, y
#
#Jx, Jy, Jz = particle_to_grid_J(xgen, ygen, u, v, w, q)
#J1, J2, J3 = cartesian_to_general(Jx, Jy, Jz, 'J')
#
#if relativistic:
# histEnergyP1 = [np.sum((g[0:npart1]-1.)*abs(q[0:npart1]/QM[0:npart1]))]
# histEnergyP2 = [np.sum((g[npart1:npart]-1.)*abs(q[npart1:npart]/QM[npart1:npart]))]
##elif perturb:
## histEnergyP1 = [0.5*np.sum((J_C * g11_C * avg(E1 * J1, 'LR2C') + 2. * J_C * g12_C * avg(E1, 'LR2C') * avg(J2, 'UD2C')
## + J_C * g22_C * avg(E2 * J2, 'UD2C') + J_C * g33_C * E3 * J3)/2.*dx*dy)]
## histEnergyP2 = [0.5*np.sum((J_C * g11_C * avg(E1 * J1, 'LR2C') + 2. * J_C * g12_C * avg(E1, 'LR2C') * avg(J2, 'UD2C')
## + J_C * g22_# * avg(E2 * J2, 'UD2C') + J_C * g33_C * E3 * J3)/2.*dx*dy)]
##elif (not relativistic) and (not perturb):
## histEnergyP1 = [np.sum((u[0:npart1]**2+v[0:npart1]**2+w[0:npart1]**2)/2.*abs(q[0:npart1]/QM[0:npart1]))]
## histEnergyP2 = [np.sum((u[npart1:npart]**2+v[npart1:npart]**2 +w[npart1:npart]**2)/2.*abs(q[npart1:npart]/QM[npart1:npart]))]
#else:
# histEnergyP1 = [np.sum((u[0:npart1]**2+v[0:npart1]**2+w[0:npart1]**2)/2.*abs(q[0:npart1]/QM[0:npart1]))]
# histEnergyP2 = [np.sum((u[npart1:npart]**2+v[npart1:npart]**2 +w[npart1:npart]**2)/2.*abs(q[npart1:npart]/QM[npart1:npart]))]
#
##if perturb:
## # Energy -> defined in C
## histEnergyE = [(np.sum(J_C * g11_C * avg(E1**2, 'LR2C')) + 2.*np.sum(J_C * g12_C * avg(E1, 'LR2C') * avg(E2, 'UD2C'))\
## + np.sum(J_C * g22_C * avg(E2**2, 'UD2C')) + np.sum(J_C * g33_C * E3**2))/2.*dx*dy]
## histEnergyB = [(np.sum(J_C * g11_C * avg(B1**2, 'UD2C')) + 2.*np.sum(J_C * g12_C * avg(B1, 'UD2C') * avg(B2, 'LR2C'))
## + np.sum(J_C * g22_C * avg(B2**2, 'LR2C')) + np.sum(J_C * g33_C * avg(avg(B3**2, 'N2LR'), 'LR2C')))/2.*dx*dy]
##
## histEnergyTot=[histEnergyE[0]+histEnergyB[0]+histEnergyP1[0]+histEnergyP2[0]]
##
## histMomentumx = [np.sum(u[0:npart])]
## histMomentumy = [np.sum(v[0:npart])]
## histMomentumz = [np.sum(w[0:npart])]
## histMomentumTot = [histMomentumx[0] + histMomentumy[0] + histMomentumz[0]]
##
## print('cycle 0, energy=',histEnergyTot[0])
## print('energyP1=',histEnergyP1[0],'energyP2=',histEnergyP2[0])
## print('energyE=',histEnergyE[0])
## print('energyB=',histEnergyB[0])
## print('Momentumx=',histMomentumx[0],'Momentumy=',histMomentumy[0],'Momentumz=',histMomentumz[0])
#if perturb:
# # Energy -> defined in C
# histEnergyE1=[np.sum(J_C * g11_C * avg(E1**2, 'LR2C') \
# + J_C * g12_C * avg(E1, 'LR2C') * avg(E2, 'UD2C'))/2.*dx*dy] #\
# #+ J_C * g13_C * avg(E1, 'LR2C') * E3)/2.*dx*dy]
# histEnergyE2=[np.sum(J_C * g21_C * avg(E2, 'UD2C') * avg(E1, 'LR2C') \
# + J_C * g22_C * avg(E2**2, 'UD2C'))/2.*dx*dy]# \
# #+ J_C * g23_C * avg(E2, 'UD2C') * E3)/2.*dx*dy]
# histEnergyE3=[np.sum(#J_C * g31_C * E3 * avg(E1, 'LR2C') \
# #+ J_C * g32_C * E3 * avg(E2, 'UD2C') \
# + J_C * g33_C * E3**2)/2.*dx*dy]
# histEnergyB1=[np.sum(J_C * g11_C * avg(B1**2, 'UD2C')
# + J_C * g12_C * avg(B1, 'UD2C') * avg(B2, 'LR2C'))/2.*dx*dy]# \
# #+ J_C * g13_C * avg(B1, 'UD2C') * avg(avg(B3, 'N2LR'), 'LR2C'))/2.*dx*dy]
# histEnergyB2=[np.sum(J_C * g21_C * avg(B2, 'LR2C') * avg(B1, 'UD2C')\
# + J_C * g22_C * avg(B2**2, 'LR2C'))/2.*dx*dy]# \
# #+ J_C * g23_C * avg(B2, 'LR2C') * avg(avg(B3, 'N2LR'), 'LR2C'))/2.*dx*dy]
# histEnergyB3=[np.sum(#J_C * g31_C * avg(avg(B3, 'N2LR'), 'LR2C') * avg(B1, 'UD2C') \
# #+ J_C * g32_C * avg(avg(B3, 'N2LR'), 'LR2C') * avg(B2, 'LR2C') \
# + J_C * g33_C * avg(avg(B3**2, 'N2LR'), 'LR2C'))/2.*dx*dy]
#else:
# histEnergyE1=[np.sum(E1[0:nxn-1,:]**2)/2.*dx*dy]
# histEnergyE2=[np.sum(E2[:,0:nyn-1]**2)/2.*dx*dy]
# histEnergyE3=[np.sum(E3[:,:]**2)/2.*dx*dy]
# histEnergyB1=[np.sum(B1[:,0:nyn-1]**2)/2.*dx*dy]
# histEnergyB2=[np.sum(B2[0:nxn-1,:]**2)/2.*dx*dy]
# histEnergyB3=[np.sum(B3[0:nxn-1,0:nyn-1]**2)/2.*dx*dy]
#
#histEnergyTot=[histEnergyP1[0]+histEnergyP2[0]+histEnergyE1[0]+histEnergyE2[0]+histEnergyE3[0]+histEnergyB1[0]+histEnergyB2[0]+histEnergyB3[0]]
#
#histMomentumx = [np.sum(u[0:npart])]
#histMomentumy = [np.sum(v[0:npart])]
#histMomentumz = [np.sum(w[0:npart])]
#histMomentumTot = [histMomentumx[0] + histMomentumy[0] + histMomentumz[0]]
#
#energyP[0] = histEnergyP1[0] + histEnergyP2[0]
#energyE[0] = histEnergyE1[0] + histEnergyE2[0] + histEnergyE3[0]
#energyB[0] = histEnergyB1[0] + histEnergyB2[0] + histEnergyB3[0]
#
#
#
#if perturb:
# xgen, ygen = cartesian_to_general_particle(x, y)
#else:
# xgen, ygen = x, y
#
#rho_ion = - particle_to_grid_rho(xgen, ygen, q)
#rho = particle_to_grid_rho(xgen, ygen, q)
#temp = 0
#
#
#if plot_dir == True:
# myplot_map(xn, yn, B3, title='B3', xlabel='x', ylabel='y')
# filename1 = PATH1 + 'B3_' + '%04d'%temp + '.png'
# plt.savefig(filename1, dpi=ndpi)
#
# if nppc!=0:
# myplot_particle_map(x, y)
# filename1 = PATH1 + 'part_' + '%04d'%temp + '.png'
# plt.savefig(filename1, dpi=ndpi)
#
# myplot_phase_space(x, v, limx=(0, Lx), limy=(-2*V0x1, 2*V0x1), xlabel='x', ylabel='vx')
# filename1 = PATH1 + 'phase_' + '%04d'%temp + '.png'
# plt.savefig(filename1, dpi=ndpi)
#
# myplot_map(xc, yc, rho, title='rho', xlabel='x', ylabel='y')
# filename1 = PATH1 + 'rho_' + '%04d'%temp + '.png'
# plt.savefig(filename1, dpi=ndpi)
#
# myplot_map(xc, yc, div(E1, E2, E3, 'E') - rho, title='div(E)-rho map', xlabel='x', ylabel='y')
# filename1 = PATH1 + 'div_rho_' + '%04d'%temp + '.png'
# plt.savefig(filename1, dpi=ndpi)
#
#cpu_time = zeros(nt+1, np.float64)
#print('cycle 0, energy=',histEnergyTot[0])
#print('energyP1=',histEnergyP1[0],'energyP2=',histEnergyP2[0])
#print('energyEx=',histEnergyE1[0],'energyEy=',histEnergyE2[0],'energyEz=',histEnergyE3[0])
#print('energyBx=',histEnergyB1[0],'energyBy=',histEnergyB2[0],'energyBz=',histEnergyB3[0])
#print('Momentumx=',histMomentumx[0],'Momentumy=',histMomentumy[0],'Momentumz=',histMomentumz[0])
print('Main loop ...')
start_loop = time.time()
# main cycle
for it in range(0,nt):
plt.clf()
#start = time.time()
if NK_method:
# The following is python's NK methods
#guess = zeros(2*nxn*nyc+2*nxc*nyn+nxc*nxc+nxn*nxn+3*2*part,np.float64)
guess = phys_to_krylov(E1, E2, E3, u, v, w)
sol = newton_krylov(residual, guess, method='lgmres', verbose=1, f_tol=1e-14)#, f_rtol=1e-7)
print('Residual: %g' % abs(residual(sol)).max())
elif Picard:
# The following is a Picard iteration
guess = phys_to_krylov(E1, E2, E3, u, v, w)
err = 1.
tol = 1e-14
kmax = 100
k=0
xkrylov = guess
while err > tol and k<=kmax:
k+=1
xkold = xkrylov
xkrylov = xkrylov - residual(xkrylov)
err = np.linalg.norm(xkrylov-xkold)
print(k, err)
sol = xkrylov
"""
#stop = time.time()
#cpu_time[it] = stop - start
#if relativistic:
# gnew = np.sqrt(1.+unew**2+vnew**2+wnew**2)
# gold = np.sqrt(1.+u**2+v**2+w**2)
# gbar = (gold+gnew)/2.
#else:
# gbar = np.ones(npart)
# position is evolved in physical space
# pushed by general geom. fields converted
#ubar = (unew + u)/2.
#vbar = (vnew + v)/2.
"""
# t: -1/2 0 1/2 1
# B Bbar Bnew
# E/Eold Enew
# u unew
# x xnew
# curlE
# Energy
E1new, E2new, E3new, unew, vnew, wnew = krylov_to_phys(sol)
xnew = x + unew * dt
ynew = y + vnew * dt
xnew = xnew % Lx
ynew = ynew % Ly
curlE1, curlE2, curlE3 = curl(E1, E2, E3, 'E')
B1new = B1 - dt * curlE1
B2new = B2 - dt * curlE2
B3new = B3 - dt * curlE3
B1bar = (B1new + B1)/2.
B2bar = (B2new + B2)/2.
B3bar = (B3new + B3)/2.
E1old = E1
E2old = E2
E3old = E3
x = xnew
y = ynew
u = unew
v = vnew
w = wnew
E1 = E1new
E2 = E2new
E3 = E3new
B1 = B1new
B2 = B2new
B3 = B3new
"""
x += u*dt
y += v*dt
x = x%Lx
y = y%Ly
u = unew
v = vnew
w = wnew
E1bar = (E1new + E1)/2.
E2bar = (E2new + E2)/2.
E3bar = (E3new + E3)/2.
curlE1, curlE2, curlE3 = curl(E1bar, E2bar, E3bar,'E')
B1 = B1 - dt*curlE1
B2 = B2 - dt*curlE2
B3 = B3 - dt*curlE3
if perturb:
xgen, ygen = cartesian_to_general_particle(x, y)
else:
xgen, ygen = x, y
"""
if perturb:
xgen, ygen = cartesian_to_general_particle(x, y)
else:
xgen, ygen = x, y
rho = particle_to_grid_rho(xgen, ygen, q)
divE[it] = np.sum(div(E1, E2, E3, 'E'))
divB[it] = np.sum(div(B1, B2, B3, 'B'))
divE_rho[it] = np.sum(np.abs(div(E1new, E2new, E3new, 'E')) - np.abs(rho))
E1time[it] = np.sum(E1)
E2time[it] = np.sum(E2)
E3time[it] = np.sum(E3)
B1time[it] = np.sum(B1)
B2time[it] = np.sum(B2)
B3time[it] = np.sum(B3)
if relativistic:
energyP1 = np.sum((g[0:npart1]-1.)*abs(q[0:npart1]/QM[0:npart1]))
energyP2 = np.sum((g[npart1:npart]-1.)*abs(q[npart1:npart]/QM[npart1:npart]))
#elif perturb:
# energyP1 = 0.5*np.sum((J_C * g11_C * avg(E1 * J1, 'LR2C') + 2. * J_C * avg(g12_LR, 'LR2C') * avg(E1, 'LR2C') * avg(J2, 'UD2C')
# + J_C * g22_C * avg(E2 * J2, 'UD2C') + J_C * g33_C * E3 * J3)/2.*dx*dy)
# energyP2 = 0.5*np.sum((J_C * g11_C * avg(E1 * J1, 'LR2C') + 2. * J_C * avg(g12_LR, 'LR2C') * avg(E1, 'LR2C') * avg(J2, 'UD2C')
# + J_C * g22_C * avg(E2 * J2, 'UD2C') + J_C * g33_C * E3 * J3)/2.*dx*dy)
#elif (not relativistic) and (not perturb):
else:
energyP1[it] = np.sum((u[0:npart1]**2+v[0:npart1]**2+w[0:npart1]**2)/2.*abs(q[0:npart1]/QM[0:npart1]))
energyP2[it] = np.sum((u[npart1:npart]**2+v[npart1:npart]**2 +w[npart1:npart]**2)/2.*abs(q[npart1:npart]/QM[npart1:npart]))
# if perturb:
# # Energy -> defined in C
# energyE = (np.sum(J_C * g11_C * avg(E1**2, 'LR2C')) + 2.*np.sum(J_C * g12_C * avg(E1, 'LR2C') * avg(E2, 'UD2C'))\
# + np.sum(J_C * g22_C * avg(E2**2, 'UD2C')) + np.sum(J_C * g33_C * E3**2))/2.*dx*dy
# energyB = (np.sum(J_C * g11_C * avg(B1**2, 'UD2C')) + 2.*np.sum(J_C * g12_C * avg(B1, 'UD2C') * avg(B2, 'LR2C'))\
# + np.sum(J_C * g22_C * avg(B2**2, 'LR2C')) + np.sum(J_C * g33_C * avg(avg(B3**2, 'N2LR'), 'LR2C')))/2.*dx*dy
#
# energyTot = energyE + energyB + energyP1 + energyP2
#
# momentumx = np.sum(unew[0:npart])
# momentumy = np.sum(vnew[0:npart])
# momentumz = np.sum(wnew[0:npart])
# momentumTot = momentumx + momentumy + momentumz
#
# histEnergyP1.append(energyP1)
# histEnergyP2.append(energyP2)
# histEnergyE.append(energyE)
# histEnergyB.append(energyB)
# histEnergyTot.append(energyTot)
#
# histMomentumx.append(momentumx)
# histMomentumy.append(momentumy)
# histMomentumz.append(momentumz)
# histMomentumTot.append(momentumTot)
#
# energyP[it] = histEnergyP1[it] + histEnergyP2[it]
#
# print('cycle',it,'energy =',histEnergyTot[it])
# print('energyP1=',histEnergyP1[it],'energyP2=',histEnergyP2[it])
# print('energyE=',histEnergyE[it])
# print('energyB=',histEnergyB[it])
# print('relative energy change=',(histEnergyTot[it]-histEnergyTot[0])/histEnergyTot[0])
# print('momento totale= ', histMomentumTot[it])
if perturb:
# Energy -> defined in C
#energyE1[it]= np.sum(J_C * g11_C * avg(E1old**2, 'LR2C') \
# + J_C * g12_C * avg(E1old, 'LR2C') * avg(E2old, 'UD2C'))/2.*dx*dy# \
# #+ J_C * g13_C * avg(E1, 'LR2C') * E3)/2.*dx*dy
#energyE2[it]= np.sum(J_C * g21_C * avg(E2old, 'UD2C') * avg(E1old, 'LR2C') \
# + J_C * g22_C * avg(E2old**2, 'UD2C'))/2.*dx*dy# \
# #+ J_C * g23_C * avg(E2, 'UD2C') * E3)/2.*dx*dy
#energyE3[it]= np.sum(#J_C * g31_C * E3 * avg(E1, 'LR2C') \
# #+ J_C * g32_C * E3 * avg(E2, 'UD2C') \
# + J_C * g33_C * E3old**2)/2.*dx*dy
#energyB1[it]= np.sum(J_C * g11_C * avg(B1bar, 'UD2C')**2 \
# + J_C * g12_C * avg(B1bar, 'UD2C') * avg(B2bar, 'LR2C'))/2.*dx*dy# \
# #+ J_C * g13_C * avg(B1, 'UD2C') * avg(avg(B3, 'N2LR'), 'LR2C'))/2.*dx*dy
#energyB2[it]= np.sum(J_C * g21_C * avg(B2bar, 'LR2C') * avg(B1bar, 'UD2C')\
# + J_C * g22_C * avg(B2bar**2, 'LR2C'))/2.*dx*dy # \
# #+ J_C * g23_C * avg(B2, 'LR2C') * avg(avg(B3, 'N2LR'), 'LR2C'))/2.*dx*dy
#energyB3[it]= np.sum(#J_C * g31_C * avg(avg(B3, 'N2LR'), 'LR2C') * avg(B1, 'UD2C') \
# #+ J_C * g32_C * avg(avg(B3, 'N2LR'), 'LR2C') * avg(B2, 'LR2C') \
# + J_C * g33_C * avg(avg(B3bar**2, 'N2LR'), 'LR2C'))/2.*dx*dy
energyE1[it] = np.sum(J_C * g11_C * avg(E1**2, 'LR2C')
+ J_C * g12_C * avg(E1, 'LR2C') * avg(E2, 'UD2C')
+ J_C * g13_C * avg(E1, 'LR2C') * E3)/2.*dx*dy
energyE2[it] = np.sum(J_C * g21_C * avg(E2, 'UD2C') * avg(E1, 'LR2C')
+ J_C * g22_C * avg(E2**2, 'UD2C')
+ J_C * g23_C * avg(E2, 'UD2C') * E3)/2.*dx*dy
energyE3[it] = np.sum(J_C * g31_C * E3 * avg(E1, 'LR2C')
+ J_C * g32_C * E3 * avg(E2, 'UD2C')
+ J_C * g33_C * E3**2)/2.*dx*dy
energyB1[it] = np.sum(J_C * g11_C * avg(B1bar**2, 'UD2C')
+ J_C * g12_C *
avg(B1bar, 'UD2C') * avg(B2bar, 'LR2C')
+ J_C * g13_C * avg(B1, 'UD2C') * avg(avg(B3, 'N2LR'), 'LR2C'))/2.*dx*dy
energyB2[it] = np.sum(J_C * g21_C * avg(B2bar, 'LR2C') * avg(B1bar, 'UD2C')
+ J_C * g22_C * avg(B2bar**2, 'LR2C')
+ J_C * g23_C * avg(B2, 'LR2C') * avg(avg(B3, 'N2LR'), 'LR2C'))/2.*dx*dy
energyB3[it] = np.sum(J_C * g31_C * avg(avg(B3, 'N2LR'), 'LR2C') * avg(B1, 'UD2C')
+ J_C * g32_C *
avg(avg(B3, 'N2LR'), 'LR2C') * avg(B2, 'LR2C')
+ J_C * g33_C * avg(avg(B3bar**2, 'N2LR'), 'LR2C'))/2.*dx*dy
else:
energyE1[it] = np.sum(E1old[0:nxn-1,:]**2)/2.*dx*dy
energyE2[it] = np.sum(E2old[:,0:nyn-1]**2)/2.*dx*dy
energyE3[it] = np.sum(E3old[:,:]**2)/2.*dx*dy
energyB1[it] = np.sum(B1bar[:,0:nyn-1]**2)/2.*dx*dy
energyB2[it] = np.sum(B2bar[0:nxn-1,:]**2)/2.*dx*dy
energyB3[it] = np.sum(B3bar[0:nxn-1,0:nyn-1]**2)/2.*dx*dy
energyTot[it] = energyP1[it] + energyP2[it] + energyE1[it] + energyE2[it] + energyE3[it] + energyB1[it] + energyB2[it] + energyB3[it]
momentumx = np.sum(unew[0:npart])
momentumy =
|
np.sum(vnew[0:npart])
|
numpy.sum
|
import numpy as np
from configs.DataPath import TRAIN_PATH, ROOT_PATH, DET_PATH, TRAIN_JSON_PATH
from utils.rand import random_sys
import cv2
import json
import random
class DataLoader(object):
def __init__(self, data_settings, read_all_boxes=False):
self.dataset_trained = []
self.data_num = 0
self.num_train = 0
self.num_val = 0
self.sub_datasets = {}
self.val_index = []
self.read_all_boxes = read_all_boxes
for sub_dataset in data_settings['dataset_used']:
self.sub_datasets[sub_dataset] = data_settings[sub_dataset]
with open(TRAIN_JSON_PATH + self.sub_datasets[sub_dataset]['label_path']) as f:
data = json.load(f)
f.close()
self.sub_datasets[sub_dataset]['data'] = data
num_data = self.sub_datasets[sub_dataset]['num_data']
assert num_data == len(data)
multiply = self.sub_datasets[sub_dataset]['multiply']
num_train = self.sub_datasets[sub_dataset]['num_train']
num_val = self.sub_datasets[sub_dataset]['num_val']
num_train_objects = self.sub_datasets[sub_dataset]['num_train_objects']
num_val_objects = self.sub_datasets[sub_dataset]['num_val_objects']
assert num_train_objects <= num_train * multiply
assert num_val_objects <= num_val * multiply
dataset = [sub_dataset] * num_data
keys = list(data.keys())
index = list(zip(dataset, keys))
random.shuffle(index)
if num_val > 0:
train_index = index[:-num_val]
val_index = index[-num_val:]
val_index = val_index * multiply
random.shuffle(val_index)
val_index = val_index[:num_val_objects]
else:
train_index = index
val_index = []
self.sub_datasets[sub_dataset].update(dict(train_index=train_index, val_index=val_index))
self.val_index += val_index
self.num_train += num_train_objects
self.num_val += num_val_objects
print('load ' + sub_dataset + ' done, train: %d, val: %d' % (num_train_objects, num_val_objects))
print('Dataloader done. Total train number: %d, Total val number: %d' % (self.num_train, self.num_val))
random.shuffle(self.val_index)
self.build_train_index()
def build_train_index(self):
self.train_index = []
for sub_dataset in self.sub_datasets:
sub_index = self.sub_datasets[sub_dataset]['train_index'].copy()
if sub_index:
random.shuffle(sub_index)
sub_index = sub_index[:self.sub_datasets[sub_dataset]['num_train']]
sub_index *= self.sub_datasets[sub_dataset]['multiply']
random.shuffle(sub_index)
sub_index = sub_index[:self.sub_datasets[sub_dataset]['num_train_objects']]
self.dataset_trained.append(sub_dataset)
self.train_index += sub_index
random.shuffle(self.train_index)
def get_random_data(self, read_all_boxes=False):
random_dataset = random.choice(self.dataset_trained)
random_index = random.choice(self.sub_datasets[random_dataset]['train_index'])
return self.get_data(random_index, read_pair=False, read_all_boxes=read_all_boxes)
def read(self, idx, validate, positive):
if validate:
index = self.val_index[idx]
else:
index = self.train_index[idx]
if positive:
all_boxes, search_img, search_box, template_img, template_box = self.get_data(index, read_pair=True, read_all_boxes=self.read_all_boxes)
else:
all_boxes, search_img, search_box = self.get_data(index, read_pair=False, read_all_boxes=self.read_all_boxes)
_, template_img, template_box = self.get_random_data(read_all_boxes=False)
return all_boxes, search_img, search_box, template_img, template_box
def get_data(self, index, read_pair=True, read_all_boxes=False):
dataset = index[0]
index = index[1]
data = self.sub_datasets[dataset]['data'][index]
match_range = self.sub_datasets[dataset]['match_range']
path = TRAIN_PATH[dataset] + '/' + index
all_boxes = []
if dataset in ['DET', 'DET_val', 'COCO', 'COCO_val']:
if dataset == 'DET' or dataset == 'DET_val':
if index[0] == 'a':
search_path = ROOT_PATH + DET_PATH + index[:index.index('_')] + '/' + index[2:] + '.JPEG'
else:
search_path = path + '.JPEG'
else:
search_path = path + '.jpg'
samples = list(data.keys())
num_sample = len(data)
if num_sample > 1:
search_index = random.randint(0, num_sample - 1)
else:
search_index = 0
search_box = data[samples[search_index]]['000000']
if read_pair:
template_path = search_path
if read_all_boxes:
for i in range(num_sample):
if i != search_index:
all_boxes.append(
|
np.array(data[samples[i]]['000000'], dtype=np.float32)
|
numpy.array
|
import numpy as np
from skimage import measure
import time
import math
# Game constants
CHAIN_POWER = np.array([0, 8, 16, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448, 480, 512, 544, 576, 608, 640, 672])
COLOR_BONUS = np.array([0, 3, 6, 12, 24])
GROUP_BONUS = np.array([0, 2, 3, 4, 5, 6, 7, 10])
# Constants for simulator
color_codes = np.array(['0', 'J', 'R', 'G', 'B', 'Y', 'P']) # Mask for converting field to code representation
fallable_nums = np.array([1, 2, 3, 4, 5, 6])
no_garbage = np.array([0, 0, 2, 3, 4, 5, 6], dtype=np.int) # Mask for seeing the field without garbage
PUYO_TYPE = {
'NONE': 0,
'GARBAGE': 1,
'RED': 2,
'GREEN': 3,
'BLUE': 4,
'YELLOW': 5,
'PURPLE': 6
}
def _apply_drop(field: np.ndarray):
for i in range(6):
col = field[:, i]
not_zero = col[col != 0]
if len(not_zero) > 0:
field[:, i] = 0
field[-len(not_zero):, i] = not_zero
return field
def _calculate_garbage_pops(field, color_pop_mask):
garbages = np.argwhere(np.isin(field, PUYO_TYPE['GARBAGE']))
garbage_to_clear = []
for i, pos in enumerate(garbages):
y, x = pos
if ((y > 1 and color_pop_mask[y - 1, x] == True) or (y < 12 and color_pop_mask[y + 1, x] == True) or (x > 0 and color_pop_mask[y, x - 1] == True) or (x < 5 and color_pop_mask[y, x + 1] == True)):
garbage_to_clear.append(i)
return garbages[garbage_to_clear]
def _analyze_pops(field: np.ndarray, puyo_to_pop=4):
# Don't include hidden row
valid_area = no_garbage[field]
valid_area[0, valid_area[0, :] > 1] = 0
# Calculated connected components
labels = measure.label(valid_area, background=0, connectivity=1)
# Groups that are big enough to pop
groups, count = np.unique(labels[labels != 0], return_counts=True)
pop_labels = groups[count >= puyo_to_pop]
pop_counts = count[count >= puyo_to_pop]
# Has pop? Boolean variable
has_pops = pop_labels.shape[0] > 0
# Get where the colors are popping
color_pop_mask =
|
np.isin(labels, pop_labels)
|
numpy.isin
|
"""
This file contains code for running all experiments in the paper
<NAME>, <NAME>, <NAME>. "Online Learning for Active Cache Synchronization." ICML-2020.
To reproduce the experiments in Figures 1 and 2 in the main text and Figures 3 and 4 in the Appendix,
run exp1(), exp2(), exp1a(), and exp2a() from this file, respectively. See the comments for these
methods for more details.
Method param_search() allows inspecting the performance of the algorithms from the experiments with
different hyperparameter value combinations. Hyperparamete ranges for the paper's experiments are
provided in that method's comments.
"""
from abc import ABC, abstractmethod
import scipy.stats
from scipy.stats import poisson
import numpy as np;
import scipy.integrate as integrate
import scipy.special as special
from scipy.optimize import minimize
import pprint
import time;
import math;
import matplotlib.pyplot as plt
import sys
np.set_printoptions(threshold=sys.maxsize)
PROBE_PLAY_IDX = 0
SYNC_PLAY_IDX = 1
# Abstract class for a cost generation process
class CostGen(ABC):
@abstractmethod
def __init__(self, num_arms, distrib_params):
super().__init__()
# The instantaneous cost
@abstractmethod
def c(self, intervals):
pass
"""Computes the average infinite-horizon cost of a play-rate-parameterized policy for the specified subset of arms.
Parameters
----------
r : float array
Play rates to use for all arms.
Returns
-------
float
Policy cost for the specified subset of arms, normalized by the number of arms in arms_filter.
"""
@abstractmethod
def J(self, r):
pass
"""Simulates arm plays for selected arms until a fixed horizon (epoch_start_time + epoch_len) and records the generated cost samples.
Parameters
----------
r : float array
Play rates to use for all arms.
arms_latest_play_times : int array
The timestamps of the latest simulated play times for all arms.
NOTE: arms_latest_play_times contents are modified by this method!
epoch_start_time : int
The start time of the epoch during which the arm plays are to be simulated.
epoch_len : int
The length of the epoch during which the arm plays are to be simulated.
epsilon : float
The probability of doing a probe play between any two consecutive sync plays.
Returns
-------
array of lists
An array of lists of [probe play cost, sync play cost] pairs.
"""
def sample_costs(self, r, arms_latest_play_times, epoch_start_time, epoch_len, epsilon):
histories = np.empty(len(r), dtype = object)
for k in range(len(r)):
arm_k_play_hist = list()
"""
For arm k, each iteration of this loop schedules a sync play time (1 / r[k]) after the previous
sync play, until the time of the next scheduled sync play is past the end of the current scheduling
epoch (epoch_start_time + epoch_len). With probability epsilon it also schedules a probe play between
the previous sync play and the one scheduled in this iteration, at a time chosen uniformly between
the two.
"""
while arms_latest_play_times[k] + 1/r[k] <= epoch_start_time + epoch_len:
# With prob. epsilon, schedule a probe play.
if np.random.binomial(1, epsilon, 1) > 0:
probe_play_time = np.random.uniform(0, 1/r[k])
# probe_and_sync_play_times's time stamps are relative to the previous scheduled sync play
probe_and_sync_play_times = np.array([probe_play_time, 1/r[k]])
# Sample a cost for the scheduled probe play and sync play.
values = self.c(k, probe_and_sync_play_times)
arm_k_play_hist.append(values)
else:
# If we happened to schedule no probe play before the next sync play, insert the "0" indicator
# instead of a probe play timestamp, and the indicator "-1" cost for it.
probe_and_sync_play_times = np.array([0, 1/r[k]])
values = self.c(k, probe_and_sync_play_times)
values[PROBE_PLAY_IDX] = -1
arm_k_play_hist.append(values)#list(values))
arms_latest_play_times[k] += 1/r[k]
histories[k] = arm_k_play_hist
return histories
"""Estimates the gradient of the cost functions for the selected arms.
Parameters
----------
r : float array
Play rates to use for all arms.
arms_latest_play_times : int array
The timestamps of the latest simulated play times for all arms.
NOTE: arms_latest_play_times contents are modified by this method!
epoch_start_time : int
The start time of the epoch during which the arm plays are to be simulated.
epoch_len : int
The length of the epoch during which the arm plays are to be simulated.
epsilon : float
The probability of doing a probe play between any two consecutive sync plays.
Returns
-------
est_grad_J : array of floats
An array representing the gradient. Value of 0 indicates that this dimension hasn't been reestimated
arms_with_new_grad : array of ints
An array of arm indices whose partial derivatives got estimated in this function call.
All other arms' partial derivative estimates are 0 and should be ignored.
"""
def estimate_grad_J(self, r, arms_latest_play_times, epoch_start_time, epoch_len, epsilon):
est_grad_J = np.zeros_like(r)
histories = self.sample_costs(r, arms_latest_play_times, epoch_start_time, epoch_len, epsilon)
arms_with_new_grad = []
for k in range(len(r)):
sum_est_grad_k = 0
# For each sync play, compute a J_k gradient estimate and add them all up.
for h in range(len(histories[k])):
if histories[k][h][PROBE_PLAY_IDX] != -1:
sum_est_grad_k += 1 / (epsilon * r[k]) * (histories[k][h][PROBE_PLAY_IDX] - histories[k][h][SYNC_PLAY_IDX])
else:
sum_est_grad_k += 0
# Average the gradient estimates and record the arms for which gradient estimates have been computed
if len(histories[k]) > 0:
est_grad_J[k] = (sum_est_grad_k / len(histories[k]))
arms_with_new_grad.append(k)
else:
est_grad_J[k] = 0
return est_grad_J / len(arms_with_new_grad), arms_with_new_grad
def sigmoid(x):
return 1/(1 + np.exp(-x))
class SublinPoly(CostGen):
A_K = 0
P_K = 1
NOISE = 2
"""
For each arm, initialization uses a prior to choose a distribution over time-dependent cost functions.
Namely, arm k will have a distribution over "capped" polynomials
c_k(t) = a_k * (t^p_k)
where
- a_k will be sampled from Uniform(mu_c_k - mu_c_k * noise, mu_c_k + mu_c_k * noise) at the cost function query time (in method c(.)),
"noise" from [0,1] is a parameter shared by all arms, and mu_c_k is a parameter chosen at initialization time from the prior Uniform(0, 1).
- p_k is a parameter chosen at initialization time from the prior sigmoid(scale * Uniform[0,1]), where "scaling" is a parameter shared by all arms.
I.e., p_k is from (0, 1), but p_k values > 0.5 are more likely.
"""
def __init__(self, num_arms, distrib_params):
assert(distrib_params["noise"] <= 1)
self.params = np.zeros((num_arms, 3))
# mu_c_k, uniform in [0,1)
self.params[:, SublinPoly.A_K] = np.random.rand(num_arms,)
# p_k, biased towards exponents between 0.5 and 1
self.params[:, SublinPoly.P_K] = sigmoid(distrib_params["scaling"] * np.random.rand(num_arms,))
self.params[:, SublinPoly.NOISE] = distrib_params["noise"]
super().__init__(num_arms, distrib_params)
def c(self, arm, intervals):
# c(t) = a_k*t^p_k, where p_k is in (0, 1)
noisy_scaling = np.random.uniform(self.params[arm, SublinPoly.A_K] - self.params[arm, SublinPoly.NOISE] * self.params[arm, SublinPoly.A_K], self.params[arm, SublinPoly.A_K] + self.params[arm, SublinPoly.NOISE] * self.params[arm, SublinPoly.A_K])
return noisy_scaling * intervals ** self.params[arm, SublinPoly.P_K]
def J(self, r):
# J(r) = sum_{k in arms_filter}[1 / |arms_filter| * r_k * a_k * (1 / p_k + 1) * (1 / r_k)^(p_k + 1)]
pol_cost = 1 / self.params.shape[0] * np.dot(r, self.params[:, SublinPoly.A_K] * (1 / (self.params[:, SublinPoly.P_K] + 1) * ((1 / r) ** (self.params[:, SublinPoly.P_K] + 1))))
return pol_cost
class BinaryPoisson(CostGen):
IMPORTANCE = 0
CHANGE_RATE = 1
def __init__(self, num_arms, distrib_params):
self.params = np.zeros((num_arms, 2))
# Web page importance scores; set them all to 1.0 for simplicity.
self.params[:, BinaryPoisson.IMPORTANCE] = np.full((num_arms,), 1.0)
# Web page change rates. Sample them uniformly from the interval [chrate_lo, chrate_hi].
self.params[:, BinaryPoisson.CHANGE_RATE] = np.random.uniform(low=distrib_params['chrate_lo'], high=distrib_params['chrate_hi'], size=(num_arms,))
super().__init__(num_arms, distrib_params)
def c(self, arm, intervals):
assert(len(intervals) == 2)
"""
Assume that the values in the "intervals" array are sorted in the
increasing order. This will ensure that we will essentially sample
a monotonically increasing function of inteval samples.
"""
results = np.zeros((len(intervals),))
samples = (0 if intervals[PROBE_PLAY_IDX] == 0 else poisson.rvs(self.params[arm, BinaryPoisson.CHANGE_RATE] * intervals[PROBE_PLAY_IDX], size=1))
results[PROBE_PLAY_IDX] = self.params[arm, BinaryPoisson.IMPORTANCE] * (1.0 if samples > 0 else 0.0)
if intervals[SYNC_PLAY_IDX] < intervals[PROBE_PLAY_IDX]:
raise ValueError("Intervals aren't in the increasing order of length")
elif intervals[SYNC_PLAY_IDX] == intervals[PROBE_PLAY_IDX]:
results[SYNC_PLAY_IDX] = samples
else:
samples = samples + poisson.rvs(self.params[arm, 1] * (intervals[SYNC_PLAY_IDX] - intervals[PROBE_PLAY_IDX]), size=1)
results[SYNC_PLAY_IDX] = self.params[arm, BinaryPoisson.IMPORTANCE] * (1.0 if samples > 0 else 0.0)
return results
def _C_exp(self, r):
# The expectation of a binary indicator over a Poisson distribution = mu_k * e^(delta_k / r_k) + 1 / r_k - 1 / delta_k for each arm (page) k, where mu_k is the page importance and delta_k is the page change rate.
return self.params[:, BinaryPoisson.IMPORTANCE] * (1.0 / r + np.exp(- self.params[:, BinaryPoisson.CHANGE_RATE] / r) / self.params[:, BinaryPoisson.CHANGE_RATE] - 1.0 / self.params[:, BinaryPoisson.CHANGE_RATE])
def J(self, r):
# J(r) = sum_{k in arms_filter}[1 / |arms_filter| * r_k * _C_exp(arms_filter, r)]
pol_cost = 1.0 / self.params.shape[0] * np.dot(r, self._C_exp(r))
return pol_cost
"""Computes the optimal policy cost. Since the policy cost function and constraint region is convex, this can be done via simple convex optimization.
Parameters
----------
arms : int array
Indices corresponding to arms that should be taken into account in this computation.
c : CostGen
A description of a family of cost-generating processes.
rmin : float
Minimum allowed arm sync play rate.
rmax : float
Maximum allowed arm sync play rate.
B : float
Maximum total sync play rate.
Returns
-------
array of lists
An array of lists of [probe play cost, sync play cost] pairs.
"""
def compute_opt_policy_cost(c, rmin, rmax, B):
num_arms = c.params.shape[0]
bnds=[[rmin, rmax] for k in range(num_arms)]
obj_func = (lambda r : c.J(r))
start_time = time.time()
argm = minimize(obj_func, x0 = np.full((num_arms, 1), B / num_arms), args=(), method="SLSQP", bounds=bnds, constraints={"fun": (lambda r : B - sum(r)), "type": "ineq"}, tol=None, callback=None, options=None)
end_time = time.time()
return c.J(argm.x)
"""The MirrorSync algorithm. See the ICML-2020 paper for a detailed description.
Parameters
----------
c : CostGen
A description of a family of cost-generating processes.
num_arms : int
Number of arms.
learning_rate : float
Learning rate.
num_rounds : int
Number of learning rounds (\mathcal{T}_max in the ICML-2020 paper).
epsilon : float
The fraction of total bandwidth allocated to probe plays.
B_frac : float
Maximum total sync play rate as a fraction of the number of arms.
rmin : float
Minimum allowed arm sync play rate.
rmax : float
Maximum allowed arm sync play rate.
silent: boolean
If True, suppresses most of the output.
Returns
-------
array of floats
Policy costs before the first learning round and at the end of each learning round, (num_rounds + 1) values in total.
"""
def mirror_sync(c, num_arms = 100, learning_rate = 0.05, num_rounds = 50, epsilon = 0.1, B_frac = 0.2, rmin = 0.001, rmax = 100, silent = True):
print("Running MirrorSync")
"""
Since we will be doing exploratory probe plays in addition to regular sync plays, we need to adjust the maximum play rate, which will apply to sync plays only.
"""
rmax = 1 / (1 + epsilon) * rmax
B_total = B_frac * num_arms
"""
The total play rate constraint will apply only to sync plays too, we we need to adjust it to account for probe plays as well, as in the case with rmax.
"""
B = 1 / (1 + epsilon) * B_total
results = np.zeros((num_rounds + 1,))
est_grad_J = np.zeros((num_arms,))
r = np.zeros((num_arms,))
r = mirror_descent_step(r, est_grad_J / num_arms, learning_rate, rmin, rmax, B)
J = c.J(r)
if not silent:
print('Initial J value is ', J)
results[0] = J
for i in range(num_rounds):
est_grad_J, arms_with_new_grads = c.estimate_grad_J(r, np.zeros_like(r), 0, 1 / rmin, epsilon)
assert num_arms == len(arms_with_new_grads), "In MirrorSync, every arm must get a new gradient estimate in every round! Num arms: %r, num arms with new gradient estimates: %r" % (len(arms), len(arms_temp))
r = mirror_descent_breg_step(r, est_grad_J, range(num_arms), learning_rate, rmin, rmax, B)
J = c.J(r)
results[i + 1] = J
if (not silent) or (i == num_rounds - 1):
print('Update round %r: J_round is %r'%(i + 1, J))
if not silent:
print('Per-update-round Js are ')
pprint.pprint(results)
return results
"""The AsynMirrorSync/AsyncPSGDSync algorithm. See the ICML-2020 paper for a detailed description.
Parameters
----------
c : CostGen
A description of a family of cost-generating processes.
alg : string
'ms' for AsynMirrorSync, 'psgd' for AsyncPSGDSync.
num_arms : int
Number of arms.
learning_rate : float
Learning rate.
num_rounds : int
Number of equivalent MirrorSync learning rounds (\mathcal{T}_max in the ICML-2020 paper).
epsilon : float
The fraction of total bandwidth allocated to probe plays.
update_cycle_length : int
Length of time between rate update attempts.
NOTE: For ease of comparison to MirrorSync's performance, we assume that (1 / rmin) is a multiple of update_cycle_length.
B_frac : float
Maximum total sync play rate as a fraction of the number of arms.
rmin : float
Minimum allowed arm sync play rate.
rmax : float
Maximum allowed arm sync play rate.
silent: boolean
If True, suppresses most of the output.
Returns
-------
array of floats
Policy costs before the first learning round and at the end of each learning round, (num_rounds + 1) values in total.
"""
def async_mirror_sync(c, alg, num_arms = 100, learning_rate = 0.05, num_rounds = 50, epsilon = 0.1, update_cycle_length = 2, B_frac = 0.2, rmin = 0.001, rmax = 100, silent = True):
if alg == 'ms':
alg_name = 'AsyncMirrorSync'
elif alg == 'psgd':
alg_name = 'AsyncPSGDSync'
else:
raise ValueError('Invalid flavor of AsyncMirrorSync %r.'%(alg))
print('Running', alg_name)
rmax = 1 / (1 + epsilon) * rmax
B_total = B_frac * num_arms
B = 1 / (1 + epsilon) * B_total
if not silent:
print('Update cycle length: %r'%(update_cycle_length))
"""
We will record policy cost after every update_cycle_length, which can be much smaller than MirrorSync's round length = 1 / rmin.
For ease of comparison to MirrorSync's performance, we assume that (1 / rmin) is a multiple of update_cycle_length.
"""
results = np.zeros((num_rounds * math.ceil((1 / rmin) / update_cycle_length) + 1,))
r =
|
np.zeros((num_arms,))
|
numpy.zeros
|
import centrosome.cpmorphology
import numpy
import scipy.ndimage
import six.moves
import cellprofiler_core.image
import cellprofiler_core.measurement
import cellprofiler_core.measurement
from cellprofiler_core.constants.measurement import C_COUNT, M_LOCATION_CENTER_X, M_LOCATION_CENTER_Y, COLTYPE_INTEGER, \
COLTYPE_FLOAT, M_NUMBER_OBJECT_NUMBER, FF_COUNT
import cellprofiler.modules.identifydeadworms
import cellprofiler_core.object
import cellprofiler_core.pipeline
import cellprofiler_core.workspace
IMAGE_NAME = "myimage"
OBJECTS_NAME = "myobjects"
def test_load_v1():
data = """CellProfiler Pipeline: http://www.cellprofiler.org
Version:1
SVNRevision:10479
IdentifyDeadWorms:[module_num:1|svn_version:\'Unknown\'|variable_revision_number:1|show_window:True|notes:\x5B\x5D]
Input image:BinaryWorms
Objects name:DeadWorms
Worm width:6
Worm length:114
Number of angles:180
"""
pipeline = cellprofiler_core.pipeline.Pipeline()
def callback(caller, event):
assert not isinstance(event, cellprofiler_core.pipeline.event.LoadException)
pipeline.load(six.moves.StringIO(data))
assert len(pipeline.modules()) == 1
module = pipeline.modules()[0]
assert isinstance(module, cellprofiler.modules.identifydeadworms.IdentifyDeadWorms)
assert module.image_name == "BinaryWorms"
assert module.object_name == "DeadWorms"
assert module.worm_width == 6
assert module.worm_length == 114
assert module.angle_count == 180
assert module.wants_automatic_distance
def test_load_v2():
data = """CellProfiler Pipeline: http://www.cellprofiler.org
Version:1
SVNRevision:10479
IdentifyDeadWorms:[module_num:1|svn_version:\'Unknown\'|variable_revision_number:2|show_window:True|notes:\x5B\x5D]
Input image:BinaryWorms
Objects name:DeadWorms
Worm width:6
Worm length:114
Number of angles:180
Automatically calculate distance parameters?:No
Spatial distance:6
Angular distance:45
"""
pipeline = cellprofiler_core.pipeline.Pipeline()
def callback(caller, event):
assert not isinstance(event, cellprofiler_core.pipeline.event.LoadException)
pipeline.load(six.moves.StringIO(data))
assert len(pipeline.modules()) == 1
module = pipeline.modules()[0]
assert isinstance(module, cellprofiler.modules.identifydeadworms.IdentifyDeadWorms)
assert module.image_name == "BinaryWorms"
assert module.object_name == "DeadWorms"
assert module.worm_width == 6
assert module.worm_length == 114
assert module.angle_count == 180
assert not module.wants_automatic_distance
assert module.space_distance == 6
assert module.angular_distance == 45
def make_workspace(pixel_data, mask=None):
image = cellprofiler_core.image.Image(pixel_data, mask)
image_set_list = cellprofiler_core.image.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.add(IMAGE_NAME, image)
module = cellprofiler.modules.identifydeadworms.IdentifyDeadWorms()
module.set_module_num(1)
module.image_name.value = IMAGE_NAME
module.object_name.value = OBJECTS_NAME
pipeline = cellprofiler_core.pipeline.Pipeline()
def callback(caller, event):
assert not isinstance(event, cellprofiler_core.pipeline.event.LoadException)
assert not isinstance(event, cellprofiler_core.pipeline.event.RunException)
pipeline.add_listener(callback)
pipeline.add_module(module)
workspace = cellprofiler_core.workspace.Workspace(
pipeline,
module,
image_set,
cellprofiler_core.object.ObjectSet(),
cellprofiler_core.measurement.Measurements(),
image_set_list,
)
return workspace, module
def test_zeros():
"""Run the module with an image of all zeros"""
workspace, module = make_workspace(numpy.zeros((20, 10), bool))
module.run(workspace)
count = workspace.measurements.get_current_image_measurement(
"_".join((C_COUNT, OBJECTS_NAME))
)
assert count == 0
def test_one_worm():
"""Find a single worm"""
image = numpy.zeros((20, 20), bool)
index, count, i, j = centrosome.cpmorphology.get_line_pts(
numpy.array([1, 6, 19, 14]),
numpy.array([5, 0, 13, 18]),
numpy.array([6, 19, 14, 1]),
numpy.array([0, 13, 18, 5]),
)
image[i, j] = True
image = scipy.ndimage.binary_fill_holes(image)
workspace, module = make_workspace(image)
module.worm_length.value = 12
module.worm_width.value = 5
module.angle_count.value = 16
module.run(workspace)
m = workspace.measurements
assert isinstance(m,cellprofiler_core.measurement.Measurements)
count = m.get_current_image_measurement(
"_".join((C_COUNT, OBJECTS_NAME))
)
assert count == 1
x = m.get_current_measurement(
OBJECTS_NAME, M_LOCATION_CENTER_X
)
assert len(x) == 1
assert round(abs(x[0] - 9.0), 1) == 0
y = m.get_current_measurement(
OBJECTS_NAME, M_LOCATION_CENTER_Y
)
assert len(y) == 1
assert round(abs(y[0] - 10.0), 1) == 0
a = m.get_current_measurement(
OBJECTS_NAME, cellprofiler.modules.identifydeadworms.M_ANGLE
)
assert len(a) == 1
assert round(abs(a[0] - 135), 0) == 0
def test_crossing_worms():
"""Find two worms that cross"""
image = numpy.zeros((20, 20), bool)
index, count, i, j = centrosome.cpmorphology.get_line_pts(
numpy.array([1, 4, 19, 16]),
numpy.array([3, 0, 15, 18]),
numpy.array([4, 19, 16, 1]),
numpy.array([0, 15, 18, 3]),
)
image[i, j] = True
index, count, i, j = centrosome.cpmorphology.get_line_pts(
numpy.array([0, 3, 18, 15]),
numpy.array([16, 19, 4, 1]),
numpy.array([3, 18, 15, 0]),
numpy.array([19, 4, 1, 16]),
)
image[i, j] = True
image = scipy.ndimage.binary_fill_holes(image)
workspace, module = make_workspace(image)
module.worm_length.value = 17
module.worm_width.value = 5
module.angle_count.value = 16
module.run(workspace)
m = workspace.measurements
assert isinstance(m,cellprofiler_core.measurement.Measurements)
count = m.get_current_image_measurement(
"_".join((C_COUNT, OBJECTS_NAME))
)
assert count == 2
a = m.get_current_measurement(
OBJECTS_NAME, cellprofiler.modules.identifydeadworms.M_ANGLE
)
assert len(a) == 2
if a[0] > 90:
order = numpy.array([0, 1])
else:
order = numpy.array([1, 0])
assert round(abs(a[order[0]] - 135), 0) == 0
assert round(abs(a[order[1]] - 45), 0) == 0
x = m.get_current_measurement(
OBJECTS_NAME, M_LOCATION_CENTER_X
)
assert len(x) == 2
assert round(abs(x[order[0]] - 9.0), 0) == 0
assert round(abs(x[order[1]] - 10.0), 0) == 0
y = m.get_current_measurement(
OBJECTS_NAME, M_LOCATION_CENTER_Y
)
assert len(y) == 2
assert round(abs(y[order[0]] - 10.0), 0) == 0
assert round(abs(y[order[1]] - 9.0), 0) == 0
def test_measurement_columns():
"""Test get_measurement_columns"""
workspace, module = make_workspace(numpy.zeros((20, 10), bool))
assert isinstance(module, cellprofiler.modules.identifydeadworms.IdentifyDeadWorms)
columns = module.get_measurement_columns(workspace.pipeline)
expected = (
(
OBJECTS_NAME,
M_LOCATION_CENTER_X,
COLTYPE_INTEGER,
),
(
OBJECTS_NAME,
M_LOCATION_CENTER_Y,
COLTYPE_INTEGER,
),
(
OBJECTS_NAME,
cellprofiler.modules.identifydeadworms.M_ANGLE,
COLTYPE_FLOAT,
),
(
OBJECTS_NAME,
M_NUMBER_OBJECT_NUMBER,
COLTYPE_INTEGER,
),
(
"Image",
FF_COUNT % OBJECTS_NAME,
COLTYPE_INTEGER,
),
)
assert len(columns) == len(expected)
for e in expected:
assert any(
all([x == y for x, y in zip(c, e)]) for c in columns
), "could not find " + repr(e)
def test_find_adjacent_by_distance_empty():
workspace, module = make_workspace(numpy.zeros((20, 10), bool))
assert isinstance(module, cellprofiler.modules.identifydeadworms.IdentifyDeadWorms)
first, second = module.find_adjacent_by_distance(
numpy.zeros(0), numpy.zeros(0),
|
numpy.zeros(0)
|
numpy.zeros
|
#!/usr/bin/env python3
import logging
import os
from pathlib import Path
import click
import cv2
import numpy as np
import pandas as pd
# Specify opencv optimization
from dotenv import find_dotenv, load_dotenv
cv2.setUseOptimized(True)
def image(mapfile, img_shape=None, grayscale=False, force=True):
"""Accept str with full filepath to the mapfile, compute mean over all images,
and write output mean image as png file."""
assert type(mapfile) is str, TypeError(f"MAPFILE must be type STR")
# assert img_shape is None or (img_shape is tuple and len(img_shape) == 3), TypeError(
# f"IMG_SHAPE must be tuple with a length of 3"
# )
# setup logging
logger = logging.getLogger(__name__)
# set image flag
FORMAT = cv2.IMREAD_GRAYSCALE if grayscale else cv2.IMREAD_COLOR
# check if file exists
if not force and Path(mapfile).parent.joinpath("mean_image.png").exists():
mean_img_path = Path(mapfile).parent.joinpath("mean_image.png")
logger.info(f"Using existing mean image:\n{mean_img_path}")
return cv2.imread(mean_img_path, FORMAT)
# read mapfile
mapfile_df = pd.read_csv(mapfile, sep=",", header=0, index_col=0)
if img_shape is None:
# use shape of first image if none given
img_shape = cv2.imread(mapfile_df["filename"][0], FORMAT).shape
# compute mean and std
mean_img = mean_subfun(mapfile_df, img_shape, FORMAT)
std_img = std_subfun(mean_img, mapfile_df, img_shape, FORMAT)
# save images
mean_filename = str(Path(mapfile).parent.joinpath("mean_image.png"))
cv2.imwrite(mean_filename, mean_img)
std_filename = str(Path(mapfile).parent.joinpath("std_image.png"))
cv2.imwrite(std_filename, std_img)
return mean_img, std_img
def mean_subfun(mapfile_df, img_shape, FORMAT):
""" """
logger = logging.getLogger(__name__)
logging_flag = False
mean_img = np.zeros(img_shape, dtype=np.float32)
# process files
logger.info("Computing image mean")
for idx, (filename, label) in mapfile_df.iterrows():
# print(f"{idx}\t{filename}\t{label}")
img = cv2.imread(filename, FORMAT)
if img is None:
logging.warning(
f"Error opening file:\t{Path('./').joinpath(*Path(filename).parts[-3:])}"
)
if img.shape[0:2] != img_shape[0:2]:
if not logging_flag:
logger.info(f"Resizing images to: {img_shape}") # print once
logging_flag = True
img = cv2.resize(img, img_shape[0:2], interpolation=cv2.INTER_AREA)
if img.shape != mean_img.shape:
img =
|
np.reshape(img, mean_img.shape)
|
numpy.reshape
|
""" One dimensional KDE demo """
import numpy as np
from scipy.stats import norm, iqr
from bcipy.signal.model.mach_learning.generative_mods.function_density_estimation \
import KernelDensityEstimate
import matplotlib.pyplot as plt
def _demo_kde():
n = 100
np.random.seed(1)
# generate some dummy data
x = np.concatenate((np.random.normal(0, 1, int(0.3 * n)),
np.random.normal(5, 1, int(0.7 * n))))[:, np.newaxis]
# append 0 label to all data as we are interested in a single class case
y = np.zeros(x.shape)
# a subset of domain of the random variable x
x_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
# generate a dummy density function to sample data from
true_dens = (0.3 * norm(0, 1).pdf(x_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(x_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(x_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
# thumb up rule for bandwidth selection
bandwidth = 1.06 * min(
|
np.std(x)
|
numpy.std
|
import pandas as pd
import time
import copy
import numpy as np
from argotools.forecastlib.functions import *
from argotools.config import *
import os
class Load:
"""
The Load class generates an object that aids in the management of data throughout the whole ARGO methodology forecastingself.
If you're working with a database or a dataset that already has a format, you can try used LoadFromTemplate class, which generates
objects that import data from already used templates.
The Load class imports information from both the Target of interest and the input features used in modelling based
on a few assumptions of the file paths indicated for each.
The period parameters are fixed to every id in the data to avoid confusion.
Parameters
__________
start_period : str, optional
String of the form YYYY-MM-DD or the specific index type used to specify the
lower limit of the domain of the data sample index.
end_period : str, optional
String of the form YYYY-MM-DD or the specific index type used to specify the
upper limit of the domain of the data sample index.
Attributes
__________
id : list of str
List containing the id names for the objects of interest to study
target: dict
dictionary that contains the target information for each id organized in a pandas dataframe.
features: dict
dictionary that contains the features information for each id organized in a pandas dataframe.
Features are considered any type of information that counts as a proxy to the target activity.
They're uploaded using the new_id() function or .add_features() function.
ar_lags: dict
dictionary that contains the autoreggresive models information for each id organized in a pandas dataframe.
Each column corresponds to a different AR lag and is identified with a number in their name. (E.G.
If a column title for the dataframe is 'AR_3', this is corresponds to the target information at a time
t-3)
"""
def __init__(self, start_period=None, end_period=None, ids = None):
self.id = []
self.data = {}
self.target = {}
self.file_paths = {}
self.features = {}
self.ar_lags = {}
self.ar_lag_ids = {}
self.start_period = start_period
self.end_period = end_period
if ids:
for id_ in ids:
self.new_id(id_)
print('Load object initialized with the following ids : {0}'.format(ids))
def new_id(self, id_):
if id_ not in self.id:
self.id.append(id_)
self.target[id_] = None
self.features[id_] = None
self.ar_lags[id_] = None
self.ar_lag_ids[id_] = None
else:
print('id:{0} already initialized'.format(id_))
return
def read_formatted_data(self,name=None, path_to_target=None, path_to_features=None,\
merge_indices=None, verbose=True, col_name=None):
"""
The new_id function is the main option to upload information to the Load object. It loads
data from two different sources specified from the user. NOTE: new_id assumes that the data
read in has already been formatted to a csv format readable by pandas (no skiprows, index in the first column,
only timeseries data after the first column). To load data from unformatted (such as google correlate)
please see "read_from_source" and its possible readings)
Parameters
__________
name : str
Name to identify the object to analyze.
path_to_target : str
The path to the file containing the target information.
The file is assumed be a csv that's pandas readable, the index of the
dataframe being the very first column of the file and that the target
can be identified as a column with the name parameter as identifier.
path_to_features : list
The list of paths to the feature information.
The file is assumed be a csv that's pandas readable, the index of the
dataframe being the very first column of the file and that the rest of
the columns can be identified as features of interest for the target
merge_indices : Boolean, optional (default None)
If indices between pandas dataframe of the target and the features is not equal
but equal in length, it replaces the feature's index with the target indexself.
If set to None, the it asks directly the user for the input.
col_name : Str, optional (default None)
Column name to look for in the target data. If set to None, function
looks up the region name.
"""
sp = self.start_period
ep = self.end_period
if col_name:
pass
else:
col_name = name
if os.path.isfile(path_to_target):
try:
target_df = pd.read_csv(path_to_target, index_col=[0])
series = copy.copy(target_df[col_name][sp:ep].to_frame())
#Renaming series to avoid naming problems
series.columns = [name]
self.target[name] = series
except Exception as t:
print('Could not load target data successfully:{0}'.format(t))
else:
print('Path to target information is not recognized as a real file path. \
Please verify this information')
return
features = None
for p in path_to_features: #Repeating same process for all paths
if os.path.isfile(p):
try:
feature_df = pd.read_csv(p, index_col=[0])
if features is None:
features = copy.copy(feature_df[sp:ep])
else:
features = pd.concat([features, feature_df[sp:ep]], axis=1)
except Exception as t:
print('Could not load feature data successfully:{0}'.format(t))
else:
print('Path {0} is not recognized as a real feature file path. \
Please verify this information'.format(p))
target_index = self.target[name].index.values
features_index = features.index.values
#Check if indices are identical
if len(target_index) == len(features_index):
equal_indices = [i == j for i,j in zip(target_index, features_index)]
if all(equal_indices):
pass
else:
print('WARNING! Some indices values in the features and target do not \
coincide. Please revise the following')
for i, is_equal in enumerate(equal_indices):
if not is_equal:
print('Index row {0} with features index = {1} and target index = {2} \n'.format(i, features_index[i], target_index[i]))
if merge_indices:
features.set_index(self.target[name].index, inplace=True)
else:
s =input('Use the same index in the features? (Y/N)')
if s == 'Y':
features.set_index(self.target[name].index, inplace=True)
else:
print('WARNING! Feature and target indices do not have the the same length ({0}, {1}). \
\n First indices: target={2} features={3} \n Last indices: target{4} \
features = {5}'.format(len(target_index), len(features_index), target_index[0],\
features_index[0], target_index[-1], features_index[-1]))
time.sleep(2)
self.features[name] = features
self.id.append(name)
if verbose: print('Successfully loaded id ({0}) data.'.format(name))
def add_target(self, id_, path_to_file, target_name=None, verbose=False):
'''
Function that individuallly adds target data to the load object. NOTE:
add_target assumes data has a basic readable pandas csv format (no skiprows, index at 0th column,
only features after the 0th column). To read data with a different style, please refer to add_target_customSource
Parameters
__________
id_ : string
Identifier (must be initialized)
path_to_file : string
String contianing the path to the csv
target_name : string, optional (default is None)
column name for the target in the pandas DataFrame. If set to None, add_target assumes the data is named
'id_'
'''
if id_ in self.id:
df = pd.read_csv(path_to_file, index_col=0)
if target_name:
self.target[id_] = df[target_name][self.start_period:self.end_period].to_frame()
else:
self.target[id_] = df[id_][self.start_period:self.end_period].to_frame()
else:
print("ID not found within object. Please initialize it using 'new_id' function ")
def add_features_customSource(self, id_, path_to_file, source='GC', overwrite=False, verbose=True, autoanswer=None):
'''
Function designed to read data from a specific source and add it as features for the specific ID.
Parameters
__________
id_ : string
Name of the location of interest
path_to_file : string
Path to file to read in.
source : string, optional (default is 'GC')
Specify the source of the data to select reading format. Please see available options in handlers
overwite : Boolean, optional (Default is False)
If data already exists within id_ features, then erase and only use this data as input. Otherwise, merge
data
autoanswer = String, optional (default is None):
Automate response to the input query that might arise. answer can be 'y' for yes or 'n' for no.
'''
reader_functions = {
'GC': load_gc_features,
'standard':read_standard_features
}
features = reader_functions[source](path_to_file, self.start_period, self.end_period)
if verbose: print("Following features read from file : {0}".format(features.iloc[0:5,:]))
if isinstance(self.features[id_], pd.DataFrame):
old_features = self.features[id_]
if old_features.index.equals(features.index):
print('WARNING! New features index have a different index. \n \
the following indices from the new features are not within the old index')
for ind in list(features.index.values):
if ind not in list(old_features.index.values):
print(ind)
answer = input('Would you still like to merge both sources of data? y/n')
if answer == 'y':
new_features = pd.concat([old_features, features], axis=1)
self.features[id_] = new_features
print('New features generated for {0} : \n {1}'.format(id_, new_features))
if answer == 'n':
print('New data ignored.')
return
elif self.features[id_] is None:
self.features[id_] = features
if isinstance(self.target[id_], pd.DataFrame):
if not self.features[id_].index.equals(self.target[id_].index):
print('WARNING! features and target indices for {0} are not equal. Indices from \
features not within the target are the following:'.format(id_))
for ind in list(self.features[id_].index.values):
if ind not in list(self.target[id_].index.values):
print(ind)
print('Index sizes: features = {0} target = {1}'.format(np.size(self.features[id_].index),np.size(self.target[id_].index)))
#check if data from features already exist
def add_features(self, id_=None, file_to_features=None, append=True):
try:
if append:
features = self.features[id_]
else:
features = None
except Exception as t:
print('An error ocurrer when checking the append parameter : {0}'.format(t))
return
if os.path.isfile(file_to_features):
try:
feature_df = pd.read_csv(file_to_features, index_col=[0])
if features is None:
features = copy.copy(feature_df[sp:ep])
else:
features = pd.concat([features, feature_df[sp:ep]], axis=1)
self.features[id_] =features
print('Successfully added the specified features')
except Exception as t:
print('Could not load feature data successfully:{0}'.format(t))
return
else:
print('Path {0} is not recognized as a real feature file path. \
Please verify this information'.format(p))
return
def add_feature_lags(self, id_, feature_name, which_lags, verbose = False, store=True):
'''
Generate autoreggresive lags for an specific data id.
Inputs are the specific Id (must exist within data.ids)
and the lag terms (list containing the integer numbers)
generate_ar_lags outputs a dataFrame containing the lags within
a column id format of 'AR' + lag_number (e.g. AR14)
'''
lags = {}
success = []
for i, lag_number in enumerate(which_lags):
try:
lag = generate_lag(self.features[id_][feature_name].values, lag_number)
lags['{1}{0}'.format(lag_number, feature_name)] = lag.ravel()
success.append(lag_number)
except IOError:
print('There was a problem while generating lag {0} for feature : {1}. Please review your data'.format(lag_number, feature_name))
lag_df = pd.DataFrame(lags, index = self.features[id_].index.values)
if store == True:
self.features[id_] = pd.concat([self.features[id_], lag_df], axis=1)
else:
return lag_df
if verbose == True:
print('Successfully generated the following feature terms : {0}'.format(success) )
def generate_ar_lags(self, which_id, which_lags, verbose = False, store=True):
'''
Generate autoreggresive lags for an specific data id.
Inputs are the specific Id (must exist within data.ids)
and the lag terms (list containing the integer numbers)
generate_ar_lags outputs a dataFrame containing the lags within
a column id format of 'AR' + lag_number (e.g. AR14)
'''
lags = {}
success = []
for i, lag_number in enumerate(which_lags):
try:
lag = generate_lag(self.target[which_id].values, lag_number)
lags['AR{}'.format(lag_number)] = lag.ravel()
success.append(lag_number)
except IOError:
print('There was a problem while generating autoregressive term {0}. Please review your data'.format(lag_number))
if store == True:
self.ar_lags[which_id] = pd.DataFrame(lags, index = self.features[which_id].index.values)
else:
return pd.DataFrame(lags, index = self.features[which_id].index)
if verbose == True:
print('Successfully generated the following AR terms : {0}'.format(success) )
def interpolate_single_values(self, id_=None, target = True, features=False):
"""
This function finds missing values within dataset
that have no neighbor observations with missing values as well and performs
linear interpolation to fill them out.
"""
# Check for id
if id_ in self.id:
pass
else:
print('Id not recognized. Please check.')
return
target = self.target[id_]
nan_locs = np.isnan(target).values
nan_locs = [i for i,v in enumerate(nan_locs) if v == True]
multiple_nans = []
if 1 in nan_locs:
nan_locs.remove(1)
multiple_nans.append(1)
if len(target)-1 in nan_locs:
nan_locs.remove(len(target)-1)
multiple_nans.append(len(target)-1)
for loc in nan_locs:
if not np.isnan(target.iloc[loc+1].values) and not np.isnan(target.iloc[loc-1].values):
target.iloc[loc] = (target.iloc[loc-1].values + target.iloc[loc+1].values )/2
else:
multiple_nans.append(loc)
print('Interpolating data for {3}, {0} NaNs found in timeseries. {1} were single, {2} were multiple'.format(\
len(nan_locs), len(nan_locs) - len(multiple_nans), len(multiple_nans), id_))
def interpolate_double_values(self, id_=None, target = True, features=False):
"""
This function finds missing values within dataset
that have only one neighbor observations with missing values as well and performs
linear interpolation to fill them out.
"""
# Check for id
if id_ in self.id:
pass
else:
print('Id not recognized. Please check.')
return
target = self.target[id_]
nan_locs = np.isnan(target).values
nan_locs = [i for i,v in enumerate(nan_locs) if v == True]
multiple_nans = []
n_locs = len(nan_locs)
#checking boundaries
if 1 in nan_locs:
if 2 in nan_locs:
if 3 not in nan_locs:
target.iloc[1] = target.iloc[3].values
target.iloc[2] = target.iloc[3].values
nan_locs.remove(1)
nan_locs.remove(2)
else:
nan_locs.remove(1)
nan_locs.remove(2)
multiple_nans.append(1)
multiple_nans.append(2)
else:
target.iloc[1] = target.iloc[2].values
nan_locs.remove(1)
last_index = len(target)-1
if last_index in nan_locs:
if last_index -1 in nan_locs:
if last_index -2 in nan_locs:
multiple_nans.append(last_index)
multiple_nans.append(last_index-1)
else:
target.iloc[last_index] = target.iloc[last_index - 2 ].values
target.iloc[last_index-1] = target.iloc[last_index - 2].values
nan_locs.remove(last_index)
nan_locs.remove(last_index-1)
else:
target.iloc[last_index] = target.iloc[last_index-1].values
nan_locs.remove(last_index)
multiple_nans.append(last_index)
elif last_index -1 in nan_locs:
if last_index - 2 in nan_locs and last_index - 3 not in nan_locs:
delta = (target.iloc[last_index]-target.iloc[last_index-3])/3
target.iloc[last_index-2] = target.iloc[last_index - 3].values + delta
target.iloc[last_index-1] = target.iloc[last_index - 3].values + delta*2
nan_locs.remove(last_index-2)
nan_locs.remove(last_index-1)
else:
nan_locs.remove(last_index-2)
multiple_nans.append(last_index)
for loc in nan_locs:
left_neighbor = target.iloc[loc-1].values
right_neighbor = target.iloc[loc+1].values
second_right_neighbor = target.iloc[loc+2].values
if np.isnan(right_neighbor) and not np.isnan(left_neighbor) and not np.isnan(second_right_neighbor):
delta = (second_right_neighbor-left_neighbor)/3
target.iloc[loc] = left_neighbor + delta
target.iloc[loc+1] = left_neighbor + delta*2
nan_locs.remove(loc)
nan_locs.remove(loc+1)
elif not np.isnan(left_neighbor and not np.isnan(right_neighbor)):
multiple_nans.append(loc)
elif np.isnan(left_neighbor):
multiple_nans.append(loc)
nan_locs.removes(loc)
else:
multiple_nans.append(loc)
multiple_nans.append(loc+1)
nan_locs.remove(loc)
nan_locs.remove(loc+1)
print('Interpolating data for {3}, {0} NaNs found in timeseries. {1} were double, {2} were either single or above 2 missing values'.format(\
n_locs, n_locs - len(multiple_nans), len(multiple_nans), id_))
def remove_sparse_features(self, id_=None, sparse_value=0, threshold=.80):
if id_ in self.id:
pass
else:
print('Id not recognized. Please check.')
return
features = self.features[id_]
removed_cols = []
for col_name in features:
n_nans = np.equal(features[col_name].values, sparse_value).sum()
if n_nans > len(features.index)*threshold:
del features[col_name]
removed_cols.append(col_name)
print('Columns removed based on a {0} threshold : {1}'.format(threshold, col_name))
def patch_missing_values(self, id_=None, value=None):
'''
This function maps missing values (nans) within dataframe to an specific value.
The idea of mapping a missing value is to avoid LASSO from breaking out (it doesn't run on NAN data)
This idea is usable only if the number of missing values is low. (Most likely used in the areas where there's more
than two missing values). WARNING!: Having too many mapped values could make your model to overshoot pretty easily
Please use this as your last resort.
'''
if value is None:
value = NAN_TO_VALUE
if id_ in self.id:
pass
else:
print('Id not recognized. Please check.')
return
target = self.target[id_]
nan_locs = np.isnan(target).values
target[nan_locs]=value
print('{0} NANs mapped within data.'.format(nan_locs.sum()))
class LoadFromTemplate:
'''
meta_data = {
data_type = geo, finance, etc
ids = identifier (used in load functions to extract data correctly)
per_id_file_path = []
per_id_fileformat = []
index_type = index type (numeric, date, etc)
index_label =
start_period =
end_period =
single_output_dir =
condensed_output_dir =
}
'''
def __init__(self, meta_data = None):
load_handler = {
'standard': load_standard,
'latam': load_latam,
}
self.id = []
self.data = {}
self.target = {}
self.file_paths = {}
self.features = {}
self.benchmarks = {}
self.ar_lags = {}
self.ar_lag_ids = {}
if meta_data is not None:
self.id = meta_data['ids']
for i, ID in enumerate(self.id):
file_format = meta_data['per_id_file_format'][i]
try:
target, features, benchmarks = load_handler[file_format](meta_data['per_id_file_path'][i], \
meta_data['index_type'], \
meta_data['per_id_index_label'][i], \
meta_data['per_id_start_period'][i], \
meta_data['per_id_end_period'][i])
except IOError:
print('Error ocurred while using load_handler. Please verify data')
self.target[ID] = target
self.features[ID] = features
self.ar_lags[ID] = None
self.benchmarks[ID] = benchmarks
self.file_paths[ID] = meta_data['per_id_file_path'][i]
print('Data object initialized with dictionary data.')
else:
print('Data object initialized. Please add your data.')
def add_id(self, id_tag=None):
return
def generate_ar_lags(self, which_id, which_lags, verbose = False, store=True):
'''
Generate autoreggresive lags for an specific data id.
Inputs are the specific Id (must exist within data.ids)
and the lag terms (list containing the integer numbers)
generate_ar_lags outputs a dataFrame containing the lags within
a column id format of 'AR' + lag_number (e.g. AR14)
'''
lags = {}
success = []
for i, lag_number in enumerate(which_lags):
try:
lag = generate_lag(self.target[which_id], lag_number)
lags['AR{}'.format(lag_number)] = lag.transpose()
success.append(lag_number)
except IOError:
print('There was a problem while generating autoregressive term {0}. Please review your data'.format(lag_number))
if store == True:
self.ar_lags[which_id] = pd.DataFrame(lags, index = self.features[which_id].index.values)
else:
return pd.DataFrame(lags, index = self.features[which_id].index.values)
if verbose == True:
print('Successfully generated the following AR terms : {0}'.format(success) )
def add_features(self):
return
def add_target(self):
return
def add_benchmark(self):
return
def find_kinds(dataFrame):
return
def generate_lag(timeseries, lag_number):
#Transforming to numpy array.
timeseries =
|
np.array(timeseries)
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 23 13:52:02 2019
@author: srappold
"""
import pandas as pd
import numpy as np
#from wordcloud import WordCloud, STOPWORDS
from sklearn import preprocessing
from faker import Faker
#convert objects / non-numeric data types into numeric
def convertStringColsToInts(all_data):
for f in all_data.columns:
if all_data[f].dtype=='object':
lbl = preprocessing.LabelEncoder()
lbl.fit(list(all_data[f].values))
all_data[f] = lbl.transform(list(all_data[f].values))
return all_data
def split_join(df, field_name, split_string):
temp_df = df.drop(field_name, axis=1).join(
df[field_name].str.split(split_string, expand=True).stack()
.reset_index(level=1, drop=True).rename(field_name))
return temp_df
def getElbowPoint(df):
# THIS SECTION IS TO GET THE ELBOW (DETERMINE THE BEST NUMBER OF CLUSTERS TO USE)
#elbow_data = df.iloc[:, [3,4,12]].values # this was selecting specific columns from the previous version
elbow_data = df._get_numeric_data().dropna(axis=1)
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters = i, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0)
kmeans.fit(elbow_data)
wcss.append(kmeans.inertia_)
#Plotting the results onto a line graph, allowing us to observe 'The elbow'
plt.plot(range(1, 11), wcss)
plt.title('The elbow method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS') #within cluster sum of squares
return plt.show()
# END ELBOW SECTION
def rmissingvaluecol(dff,threshold):
#Remove Missing Value Columns
#dff = Data Frame passed into
#threshold = numeric value to determine percentage of missing values acceptable
l = []
l = list(dff.drop(dff.loc[:,list((100*(dff.isnull().sum()/len(dff.index))>=threshold))].columns, 1).columns.values)
print("# Columns having more than %s percent missing values:"%threshold,(dff.shape[1] - len(l)))
print("Columns:\n",list(set(list((dff.columns.values))) - set(l)))
return l
def score_in_percent (a,b):
return (sum(a==b)*100)/len(a)
def obfuscateName(df):
for i, row in df.iterrows():
#print(row)
print(i)
#two ways to do this.
# for i, row in df.iterrows():
# if <something>:
# row['ifor'] = x
# else:
# row['ifor'] = y
# df.ix[i]['ifor'] = x
#df[columnName] = df.apply(lambda row: x if something else y, axis=1)
def removeCorrAndMissing(df):
#REMOVE COLUMNS WITH 100% OF VALUES MISSING
bad_columns = rmissingvaluecol(df, 100)
df = df[bad_columns]
#REMOVE COLUMNS WITH 95% CORRELATION TO OTHER FIELDS
# Create correlation matrix
corr_matrix = df.corr().abs()
# Select upper triangle of correlation matrix
upper = corr_matrix.where(np.triu(
|
np.ones(corr_matrix.shape)
|
numpy.ones
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 21 15:41:13 2018
@author: SilverDoe
"""
'''
>> numpy provides n-dimensional array objects(arrays).
>> Supports indexing, slicing and many other operations as in pandas as it is
built over numpy
>> The ndarray object consists of contiguous one-dimensional segment of computer
memory, combined with an indexing scheme that maps each item to a location in the memory block.
numpy.array(object, dtype = None, copy = True, order = None, subok = False, ndmin = 0)
================= Parameters ==================================================
object : Any object exposing the array interface method returns an array, or any (nested) sequence.
dtype : Desired data type of array, optional.
copy : Optional. By default (true), the object is copied.
order : C (row major) or F (column major) or A (any) (default).
subok : By default, returned array forced to be a base class array. If true, sub-classes passed through.
ndmin : Specifies minimum dimensions of resultant array.
================== Attributes =================================================
1. T : Same as self.transpose(), except that self is returned if self.ndim < 2.
2. data : Python buffer object pointing to the start of the array’s data.
3. dtype : Data-type of the array’s elements.
4. flags : Information about the memory layout of the array.
5. flat : A 1-D iterator over the array.
6. imag : The imaginary part of the array.
7. real : The real part of the array.
8. size : Number of elements in the array.
9. itemsize : Length of one array element in bytes.
10. nbytes : Total bytes consumed by the elements of the array.
11. ndim : Number of array dimensions.
12. shape : Tuple of array dimensions.
13. strides : Tuple of bytes to step in each dimension when traversing an array.
14. ctypes : An object to simplify the interaction of the array with the ctypes module.
15. base : Base object if memory is from some other object.
'''
#================= creating a 1D array from a list ============================
import numpy as np
list1 = [0,1,2,3,4]
arr1d = np.array(list1)
print(type(arr1d))
print(arr1d)
'''=============== Arrays vs List =============================================
>> Arrays are designed to handle vectorized operations while a python list is not.
i.e if you apply a function it is performed on every item in the array, rather
than on the whole array object.
'''
list1 + 2 # error
arr1d + 2
'''
>> Numpy arrays are size immutable. You need to initialize a new array to increase the size.
>> Stores data of the same type. array function automatically promotes all of the numbers
to the type of the most general entry in the list
>> Numpy arrays can be made value immutable/ write protected.
'''
# making array unwriteable
a = np.arange(10)
a.flags.writeable = True
a.flags['WRITEABLE'] = True
print(a)
a[0] = 1
#=============== Create a 2d array from a list of lists(matrix) ===============
import numpy as np
list2 = [[0,1,2], [3,4,5], [6,7,8]]
# default dtype
arr2d = np.array(list2)
print(arr2d)
# dtype as float
arr2d1 =
|
np.array(list2,dtype='float')
|
numpy.array
|
import functools
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from src.utils.camera import Camera
depth_image_cmap = 'gist_yarg'
def plotlive(func):
"""
A decorator that updates the current figure
instead of plotting into a new one.
It requires that the figure is created before calling
the plot function that is decorated.
The figure should be passed as a parameter to the plot function.
"""
plt.ion()
@functools.wraps(func)
def new_func(*args, **kwargs):
# Clear current axis
# plt.cla()
# Clear all axes in the current figure.
axes = plt.gcf().get_axes()
for axis in axes:
axis.cla()
result = func(*args, **kwargs)
plt.draw()
plt.pause(0.01)
return result
return new_func
def plot_depth_image(image, fig_location=None, figsize=(3, 3)):
"""
Plots the depth image in a new figure.
"""
fig, ax = plt.subplots(1, figsize=figsize)
_plot_depth_image(ax, image)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.02, top=0.98)
# fig.tight_layout()
save_show_fig(fig, fig_location, show_figure=True)
@plotlive
def plot_skeleton_with_jre_live(fig, axes, image, joints, jres, label=None,
norm_vec=None, mean_vec=None):
"""
Plots the depth image, hand's skeleton, and a colorbar
for the user, showing Joint Relation Errors.
Replaces the contents of the currently active plot.
"""
_plot_skeleton_with_jre(fig, axes, image, joints, jres, label, norm_vec, mean_vec)
def plot_skeleton_with_jre(image, joints, jres, label=None, show_fig=True, fig_location=None,
norm_vec=None, mean_vec=None):
"""
Creates a new figure into which it plots the depth image,
hand's skeleton, and a colorbar for the user, showing Joint Relation Errors.
"""
fig, axes = plot_skeleton_with_jre_subplots()
_plot_skeleton_with_jre(fig, axes, image, joints, jres, label, norm_vec, mean_vec)
save_show_fig(fig, fig_location, show_fig)
def plot_skeleton_with_jre_subplots():
"""
Creates a new figure for plotting the result
of gesture recognition.
The left axis contains the estimated hand pose,
and the right axis displays colorbar.
"""
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(6.5, 5),
gridspec_kw={"width_ratios": [5.0 / 6, 1.5 / 6]})
bar_axis = axes[1]
ax_inset = inset_axes(bar_axis, width="100%", height="100%", loc='upper center',
bbox_to_anchor=(0, 0.35, 0.15, 0.4), bbox_transform=bar_axis.transAxes)
axes = [axes[0], axes[1], ax_inset]
return fig, axes
def _plot_skeleton_with_jre(fig, axes, image, joints, jres, label=None,
norm_vec=None, mean_vec=None):
"""
Plots the depth image, hand's skeleton, and a colorbar
for the user, showing Joint Relation Errors.
"""
hand_axis = axes[0]
bar_axis = axes[2]
_plot_depth_image(hand_axis, image)
_plot_hand_skeleton(hand_axis, joints, wrist_index=0, scatter=False)
_plot_joint_errors(fig, hand_axis, bar_axis, joints, joint_errors=jres)
if norm_vec is not None and mean_vec is not None:
_plot_hand_orientation(hand_axis, mean_vec, norm_vec)
if label is not None:
hand_axis.set_title(label)
for ax in axes[:2]:
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_axis_off()
def _plot_hand_orientation(ax, mean, norm):
"""
Plots an arrow (a vector) from the 'mean' position
in the direction of the 'norm' vector.
"""
ax.arrow(mean[0], mean[1], dx=norm[0] - mean[0], dy=norm[1] - mean[1],
color='orange', head_length=5, shape='full', head_width=4, zorder=1000,
width=1)
def cmap_subset(cmap, min, max):
""" Create a subset of a cmap. """
return colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=min, b=max),
cmap(np.linspace(min, max, 256)))
def _plot_joint_errors(fig, hand_axis, bar_axis, joints, joint_errors):
"""
Plots a colorbar in the given axis, displaying the
range of Joint Relation Errors.
"""
plt.rcParams.update({"font.size": 18})
min_err, max_err = joint_errors.min(), joint_errors.max()
min_s, max_s = min_err * 4, max_err * 4 # 20., 200.
scaled_errors = joint_errors / (max_err / (max_s - min_s)) + min_s
cmap = cmap_subset(cm.get_cmap('Reds'), 0.4, 1.0)
hand_axis.scatter(joints[..., 0], joints[..., 1],
c=joint_errors, cmap=cmap, s=scaled_errors, alpha=1, zorder=100)
cbar = fig.colorbar(cm.ScalarMappable(cmap=cmap), cax=bar_axis, format='%.2f')
colorbar_tick_labels = np.linspace(min_err, max_err, 5, dtype=float)
colorbar_tick_labels = np.round(colorbar_tick_labels, 1)
cbar.set_ticks(np.linspace(0, 1, 5))
cbar.set_ticklabels(colorbar_tick_labels)
cbar.ax.set_ylabel('Joint relation error [mm]', labelpad=15, fontsize=18)
def _plot_hand_skeleton(ax, joints, wrist_index=0, s=20, alpha=1, marker='o', scatter=True,
finger_colors='rmcgb', linewidth=2, linestyle='solid'):
"""
Plots the hand's skeleton from joints in image coordinates.
"""
joints = np.squeeze(joints) # get rid of surplus dimensions
if joints.ndim != 2:
raise ValueError(F"joints.ndim should be 2, but is {joints.ndim}")
fingers_bases = np.arange(wrist_index + 1, wrist_index + 20, 4)
wrist_joint = joints[wrist_index]
if scatter:
ax.scatter(wrist_joint[..., 0], wrist_joint[..., 1], c=finger_colors[-1], marker=marker, s=s, alpha=alpha)
for i, finger_base in enumerate(fingers_bases):
finger_joints = joints[finger_base:finger_base + 4]
if scatter:
ax.scatter(finger_joints[..., 0], finger_joints[..., 1], c=finger_colors[i], marker=marker, s=s,
alpha=alpha)
xs = np.concatenate([wrist_joint[0:1], finger_joints[:, 0]])
ys = np.concatenate([wrist_joint[1:2], finger_joints[:, 1]])
ax.plot(xs, ys, c=finger_colors[i], linewidth=linewidth, linestyle=linestyle)
def _plot_depth_image(ax, image):
"""
Plot a depth image in an existing axis.
"""
ax.imshow(image, cmap=depth_image_cmap)
@plotlive
def _plot_depth_image_live(ax, image):
"""
Plot a depth image in an existing axis
by replacing its previous content.
"""
ax.imshow(image, cmap=depth_image_cmap)
def plot_image_with_skeleton(image, joints2d, show_fig=True, fig_location=None, figsize=(4, 3)):
"""
Plots the depth image and the skeleton in a new figure.
"""
fig, ax = plt.subplots(1, figsize=figsize)
_plot_image_with_skeleton(fig, ax, image, joints2d)
save_show_fig(fig, fig_location, show_fig)
@plotlive
def plot_image_with_skeleton_live(fig, ax, image, joints2d):
"""
Plots the depth image and the skeleton in an existing figure
by replacing its content.
"""
_plot_image_with_skeleton(fig, ax, image, joints2d)
def _plot_image_with_skeleton(fig, ax, image, joints2d):
"""
Plots the depth image and the skeleton in an existing figure.
"""
_plot_depth_image(ax, image)
_plot_hand_skeleton(ax, joints2d)
ax.set_axis_off()
fig.tight_layout()
def plot_joints_with_annotations(image, joints_pred, joints_true, show_fig=True, fig_location=None, figsize=(4, 3)):
"""
Plots a depth image with predicted and ground truth skeletons.
"""
fig, ax = plt.subplots(1, figsize=figsize)
_plot_depth_image(ax, image)
_plot_hand_skeleton(ax, joints_true, scatter=False, linewidth=2, linestyle=(0, (2, 2)))
_plot_hand_skeleton(ax, joints_pred, scatter=False, linewidth=2)
ax.set_axis_off()
fig.tight_layout()
save_show_fig(fig, fig_location, show_fig)
@plotlive
def plot_skeleton_with_label_live(fig, ax, image, joints2d, label):
"""
Plots a depth image with skeleton and
a label above the axis.
"""
_plot_depth_image(ax, image)
_plot_hand_skeleton(ax, joints2d)
ax.set_title(label)
ax.set_axis_off()
fig.tight_layout()
def plot_proportion_below_threshold(proportions, show_figure=True, fig_location=None):
"""
Plots the Proportion of joints below threshold metric.
"""
if np.max(proportions) <= 1:
proportions *= 100
fig, ax = plt.subplots(1, 1)
ax.plot(proportions)
ax.set_ylim(0, 100)
ax.set_ylabel('Proportion of frames within distnace (%)')
ax.set_xlabel('Max joint error threshold (mm)')
fig.tight_layout()
save_show_fig(fig, fig_location, show_figure)
def plot_depth_image_histogram(image, show_fig=True, fig_location=None):
"""
Plots a histogram of depth values in the image.
"""
plt.rcParams.update({"font.size": 24})
fig = plt.figure(figsize=(10, 7))
min, max = np.min(image), np.max(image)
plt.hist(image, bins=np.arange(min, max + 1, step=1), histtype='stepfilled')
plt.xlabel('Depth [mm]', labelpad=20)
plt.ylabel('Frequency', labelpad=20)
plt.margins(x=0, y=0)
plt.tick_params(axis='x', pad=15)
plt.tick_params(axis='y', pad=15)
plt.tight_layout()
save_show_fig(fig, fig_location, show_fig)
def plot_image_comparison(original, filtered, filter_name):
"""
Plots two depth images next to each other.
"""
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4), sharex=True,
sharey=True)
_plot_depth_image(ax1, original)
ax1.set_title('original')
ax1.axis('off')
_plot_depth_image(ax2, filtered)
ax2.set_title(filter_name)
ax2.axis('off')
fig.show()
def plot_bounding_cube(image, bcube, cam: Camera, fig_location=None, show_fig=True):
"""
Projects a bounding cube in 3d coordinates
onto the given depth image.
"""
def get_four_points(P, dx, dy):
P = np.array(P)
P1 = P.copy()
P2 = P.copy()
P3 = P.copy()
P4 = P.copy()
P2[0] += dx
P3[1] += dy
P4[:2] += [dx, dy]
Ps_xyz =
|
np.array([P1, P2, P4, P3])
|
numpy.array
|
from cili.util import load_eyelink_dataset
from Internal.qol_functions import kwarget
import numpy as np
# This is a root-class of a SubjectType, it's not meant to be used directly, your subject is a subclass of an
# Eyelink subject.
# If you need to change how to handle something (like getfix) you can override functions in a subclass.
# TODO: In the future I'll make it possible to get a file from a cloud storage like google or aws
class EyelinkType(object):
# example:
# filepath = /dir/myname.asc (this includes the file name)
# name = myname (this doesn't include .asc extension)
def __init__(self, **kwargs):
self.filepath = kwarget('file', None, **kwargs)
self.name = kwarget('name', self.getname(), **kwargs)
# Subjects have samples and events as data columns and event tags (like FIX, BLINK) respectively
self.samples, self.events = self.parse_eyetracker_data()
# As default (or commodity) we assume subject ID is dir/"myname".asc, override this if you want to specify
def getname(self):
if self.filepath is None:
return None
start = max(self.filepath.rfind('/'), self.filepath.rfind('\\')) + 1 # filepath could use slash or bslash
end = self.filepath.rfind('.')
if start == -1 or end == -1:
raise ValueError("Could not extract a valid name from filepath on file: " + self.filepath)
return self.filepath[start:end]
# TODO: For now we use (github) beOn/cili toolbox to parse data, in the future we could streamline this
def parse_eyetracker_data(self):
if self.filepath is None:
return None, None
samples, events = load_eyelink_dataset(self.filepath)
return samples, events
def get_blinks(self):
if self.events is None:
raise ValueError("Cannot get_blinks if no parsed events on filepath:" + self.filepath)
blink_data = []
for i_blink, a_blink in self.events.dframes['EBLINK'].iterrows():
blink_on = i_blink
blink_off = i_blink + a_blink.duration
blink_data.append((blink_on, blink_off))
# Blink columns: blink start and end time-stamp
# TODO: Maybe pandas dataframe is a better structure to use instead, must discuss
return np.array(blink_data, dtype=[('t_on', int), ('t_off', int)])
# get_saccades may use blinkdata to clean anidated saccades by a blink, which is a common noise in eyelink data
def get_saccades(self, _blinkdata=None):
if self.events is None:
raise ValueError("Cannot get saccades with no parsed event data on file" + self.filepath)
saccade_data = []
for i_sac, a_saccade in self.events.dframes['ESACC'].iterrows():
saccade_on = i_sac
saccade_off = i_sac + a_saccade.duration
# If blink data given reject saccades that contain a blink
if _blinkdata is not None:
blink_mask = (saccade_on <= _blinkdata['t_on']) & (_blinkdata['t_off'] <= saccade_off)
else:
blink_mask = 0 # A mask with no data to eliminate
if np.any(blink_mask): # Then it's a false sacade, we ignore it
continue
amplitude = np.hypot(a_saccade.x_end - a_saccade.x_start, a_saccade.y_end - a_saccade.y_start)
saccade_data.append((saccade_on, saccade_off, a_saccade.x_start, a_saccade.y_start, a_saccade.x_end,
a_saccade.y_end, a_saccade.peak_velocity, amplitude))
# Saccade columns: timestamps start-end, x-y position start and end, velocity and amplitude in pixels
return np.array(saccade_data, dtype=[('t_on', int), ('t_off', int), ('x_on', float), ('y_on', float),
('x_off', float), ('y_off', float), ('vel', float), ('amp', float)])
def get_fixations(self):
if self.events is None:
raise ValueError("Tried to get fixations with no parsed date on file" + self.filepath)
fixation_data = []
for i_fix, a_fix in self.events.dframes['EFIX'].iterrows():
fix_on = i_fix
fix_off = i_fix + a_fix.duration
fixation_data.append((fix_on, fix_off, a_fix.x_pos, a_fix.y_pos))
# Fixation columns: timestamps start-end and x-y position.
return
|
np.array(fixation_data, dtype=[('t_on', int), ('t_off', int), ('x_on', float), ('y_on', float)])
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
bootstrapping based on event rate on one animal
num_bs_replicates=50000 takes too much time, I did 1000 instead.
Results with narrowest CIs: (space: 60%,75%,90% exceedance, 40-90 state threshold )
CASE: SpikerateCoact
animal min of exceedance & state threshold combination winner exceedance winner state_threshold
H1 0p9 70
H2 0p9 90
H3 0p75 90
H4 0p9 90
H5 0p9 90
H6 0p9 90
H7 0p9 90
H8 0p9 90
H9 0p9 90
H10 0p9 90
H11 0p9 90
N1 0p9 60
N2 0p9 90
N3 0p9 80
N4 0p9 90
N5 0p9 90
N6 0p9 90
CASE: SpikestdCoact
animal min of exceedance & state threshold combination winner exceedance winner state_threshold
H1 0p9 90
H2 0p9 90
H3 0p9 90
H4 0p9 90
H5 0p9 60
H6 0p9 90
H7 0p9 90
H8 0p9 90
H9 0p9 90
H10 0p9 90
H11 0p9 90
N1 0p9 90
N2 0p9 90
N3 0p9 90
N4 0p9 90
N5 0p9 90
N6 0p9 90
"""
# In[ ]:
from string import ascii_letters
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
# In[ ]: fns
def getResampledStats(time, stats):
resampled_index = np.random.choice(np.arange(len(time)), len(time))
print(resampled_index)
return np.array(time)[resampled_index], np.array(stats)[resampled_index]
def getEventRate(denom, stats,threshold):
#time - resampled time
#stats - resample stats with same index as resampled time
state_array = np.zeros(len(stats))
for i in range(len(stats)):
if stats[i] > threshold:
state_array[i] = 1
transition_timestamp = []
for i in range(len(state_array) - 1):
if (state_array[i] - state_array[i + 1]) == -1:
transition_timestamp.append([i + 1])
return len(transition_timestamp) / (denom)
def draw_bs_replicates(denom,time,stats,size):
"""creates a bootstrap sample, computes replicates and returns replicates array"""
# Create an empty array to store replicates
bs_replicates = np.empty(size)
# Create bootstrap replicates as much as size
for i in range(size):
# Create a bootstrap sample
#bs_sample = np.random.choice(data,size=len(data))
_, bb = getResampledStats(time, stats)
rate = getEventRate(denom,bb,threshold)
# Get bootstrap replicate and append to bs_replicates
bs_replicates[i] = rate
return bs_replicates
# In[ ]: EXAMPLE ONE ANIMAL
df = pd.read_csv('../HeartFailureAnimals/H10/SpikerateCoact_output_1min_20minbuff_0p6/coactivity_stats.csv')
time = df['time']
stats = df['coactivity_stat']
endbaseline_1844 = 19430.61330
threshold = 50
num_bs_replicates=100 #Change to 50000
#time before end of baseline
index = time < endbaseline_1844
time = time[index]
stats = stats[index]
#convert to lists
time = time.tolist()
stats = stats.tolist()
if len(stats) > 0:
# Draw N bootstrap replicates
denom = time[-1] - time[0] #hard coded
bs_replicates_values = draw_bs_replicates(denom,time, stats, num_bs_replicates)
# Print empirical mean
#print("Empirical mean: " + str(np.mean(values)))
# Print the mean of bootstrap replicates
#print("Bootstrap replicates mean: " + str(np.mean(bs_replicates_values)))
########################### COMMENT IF PLOTTING STATES ######################################################
# Plot the PDF for bootstrap replicates as histogram & save fig
plt.hist(bs_replicates_values,bins=30)
lower=5
upper=95
# Showing the related percentiles
plt.axvline(x=np.percentile(bs_replicates_values,[lower]), ymin=0, ymax=1,label='5th percentile',c='y')
plt.axvline(x=np.percentile(bs_replicates_values,[upper]), ymin=0, ymax=1,label='95th percentile',c='r')
plt.xlabel("Event rate")
plt.ylabel("Probability Density Function")
#plt.title("pig" + current_animal + " SpikerateCoact_output_1min_20minbuff_0p6" +" Th: " + str(threshold))
plt.legend()
#str_PDF_savefig_pdf= "pig" + str(current_animal) + "_PDF_SpikerateCoact_output_1min_20minbuff_0p6" + "_Threshold" + str(threshold) + "_bootstrap" + str(num_bs_replicates) + "_baseline.pdf"
#plt.savefig(str_PDF_savefig_pdf)
plt.show()
# Get the corresponding values of 5th and 95th CI
CI_BS = np.percentile(bs_replicates_values,[lower,upper])
CI_width = np.diff(CI_BS)
# Print stuff
print("event rate replicates: ",bs_replicates_values)
print("event rate replicates mean: ",np.mean(bs_replicates_values))
print("event rate replicates std: ",np.std(bs_replicates_values))
print("The confidence interval: ",CI_BS)
print("CI width: ",CI_width)
else:
print("has no transition timestamps for threshold = " + str(threshold))
# In[ ]:calculation params
#End of Baseline timestamps
EndBaseline_HF = [15157.47730, 13782.64500, 14479.24235, 15010.85545, 20138.13390, 14126.76400, 22447.50400, 19488.27205, 19001.37350, 16823.12835, 19430.61330]
EndBaseline_Normal = [18081.77015, 14387.14405, 17091.46195, 21465.20400, 28360.64150, 22006.09015]
#End of Baseline linewidth
lw_EndBaseline = 3
# In[ ]: Data: HF Animals
HF_path = '../HeartFailureAnimals/'
filenames = os.listdir(HF_path)
filenames = [f for f in filenames if (f.startswith("H"))]
print(filenames)
# ['H1', 'H2', 'H3', 'H4', 'H5', 'H6', 'H7', 'H8', 'H9', 'H10', 'H11']
# In[ ]: SpikerateCoact_output_1min_20minbuff_0p6 : EACH In[] AFTER THIS ONE IS REPEAT
threshold = 10
animals = list()
coactivity_stats_filepaths = list()
state_timestamp_HF = []
bsstats_all = list()
split_char_animal="H"
num_bs_replicates=1000 #Change to 50000
# fig, ax_HF = plt.subplots(figsize = (22,12), nrows = len(filenames), ncols = 1)
# str_HF_state_title= "States for HF animals from SpikerateCoact_output_1min_20minbuff_0p6, threshold = " + str(threshold)
# fig.suptitle(str_HF_state_title, fontsize=16)
count = 0
for filename in filenames:
current_path = os.path.join(HF_path, filename)
current_path_SpikerateCoact_output_1min_20minbuff_0p6 = os.path.join(current_path, 'SpikerateCoact_output_1min_20minbuff_0p6').replace("\\","/")
current_animal = filename.split(split_char_animal)[1]
animals.append(filename)
for root, dirs, files in os.walk(current_path_SpikerateCoact_output_1min_20minbuff_0p6):
#print(files)
for name in files:
if name.startswith(("coactivity_stats.csv")):
coactivity_stats_filepath = os.path.join(current_path_SpikerateCoact_output_1min_20minbuff_0p6, name).replace("\\","/") ## FOR WINDOWS BACKSLASH
coactivity_stats_filepaths.append(coactivity_stats_filepath)
str_current = "current path = " + coactivity_stats_filepath
print(str_current)
df = pd.read_csv(coactivity_stats_filepath)
time = df['time']
stats = df['coactivity_stat']
#limit time before end of baseline
index = time < EndBaseline_HF[count]
time = time[index]
stats = stats[index]
#convert to lists
time = time.tolist()
stats = stats.tolist()
########################### UNCOMMENT TO PLOT STATES (COMMENT THE FIGURES BELOW FIRST) ######################################################
# fill figure
# ax_HF[count].plot(time/3600, state_array ,'--', color = 'orchid', alpha=0.8)
# ax_HF[count].set_xticks(np.array(transition_timestamp)/3600)
# ax_HF[count].set_xlim(time[0]/3600,EndBaseline_HF[count]/3600) #limiting to baseline data only
# #ax_HF[count].axvline(x=EndBaseline_HF[count]/3600, color = 'black', linewidth = lw_EndBaseline) #full exp, mark end of baseline for each
# ax_HF[count].tick_params(axis="x", labelsize=3)
# ax_HF[count].set_yticks([0,1])
# ax_HF[count].spines["top"].set_visible(False)
# ax_HF[count].spines["right"].set_visible(False)
# ax_HF[count].spines["bottom"].set_visible(False)
# ax_HF[count].set_ylabel((''.join(filter(lambda i: i.isdigit(), current_animal))), fontsize=12)
count = count + 1
########################## BOOTSTRAP (not sure what to bootstrap, was event rate definition len(events)/duration?) #################################
#values = np.diff(np.array(transition_timestamp)) #data to bootstrap
if len(stats) > 0:
# Draw N bootstrap replicates
denom = time[-1] - time[0] #hard coded
bs_replicates_values = draw_bs_replicates(denom,time, stats, num_bs_replicates)
########################### COMMENT IF PLOTTING STATES ######################################################
# Plot the PDF for bootstrap replicates as histogram & save fig
plt.hist(bs_replicates_values,bins=30)
lower=5
upper=95
# Showing the related percentiles
plt.axvline(x=np.percentile(bs_replicates_values,[lower]), ymin=0, ymax=1,label='5th percentile',c='y')
plt.axvline(x=np.percentile(bs_replicates_values,[upper]), ymin=0, ymax=1,label='95th percentile',c='r')
plt.xlabel("Event rate")
plt.ylabel("Probability Density Function")
plt.title("pig" + current_animal + " SpikerateCoact_output_1min_20minbuff_0p6" +" Th: " + str(threshold))
plt.legend()
str_PDF_savefig_pdf= "pig" + str(current_animal) + "_PDF_SpikerateCoact_output_1min_20minbuff_0p6" + "_Thr" + str(threshold) + "_BS" + str(num_bs_replicates) + "EvRate_base.pdf"
plt.savefig(str_PDF_savefig_pdf)
plt.show()
# Get the bootstrapped stats
bs_mean = np.mean(bs_replicates_values)
bs_std =
|
np.std(bs_replicates_values)
|
numpy.std
|
import os
import pickle
import torch
import numpy as np
import h5py
fps = 30
def load_smpl_parameters(poses, betas, trans, step):
|
np.set_printoptions(suppress=True)
|
numpy.set_printoptions
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 25 22:23:53 2019
@author: <NAME>, <EMAIL>
"""
#basepath ="./STACKS/SPJob1_120218_1"
#basepath ="./STACKS/STACKS_0"
basepath ="./STACKS"
#%matplotlib auto
import os, sys
import numpy as np
import cv2 as cv
from skimage.morphology import skeletonize
from random import randrange
from matplotlib import pyplot as plt
from scipy.signal import argrelextrema
import pickle as pickle
import easygui as gui
from xlwt import Workbook
global outcome, CELL_DICTIONARY, MAX_NUMBER_OF_CELLS, tiff_images, dim1, dim2
MAX_NUMBER_OF_CELLS=0
answer=gui.buttonbox("Would you like to Start Over or Continue Previous Project?",choices=("Start Over","Previous Project"))
if answer=="Start Over":
answer=True
if answer=="Previous Project":
answer=False
if answer==False:
with open('SAVED_WORK', 'rb') as infile:
outcome = pickle.load(infile)
def merges_red(img1,img2,amount):
overlay = cv.cvtColor(img1, cv.COLOR_GRAY2BGR)
b,g,r = cv.split(overlay)
r = cv.add(r,amount, dst = r, mask =img2, dtype = cv.CV_8U)
merged=cv.merge((b,g,r),img1)
return merged
def merges_blue(img1,img2,amount):
b,g,r = cv.split(img1)
b = cv.add(b,amount, dst = b, mask =img2, dtype = cv.CV_8U)
merged=cv.merge((b,g,r),img1)
return merged
def merges_green(img1,img2,amount):
b,g,r = cv.split(img1)
g = cv.add(g,amount, dst = g, mask =img2, dtype = cv.CV_8U)
merged=cv.merge((b,g,r),img1)
return merged
def removesmallelements(img,minsize):
img=img.astype(np.uint8)
nb_components, output, stats, centroids = cv.connectedComponentsWithStats(img, connectivity=8)
sizes = stats[1:, -1]; nb_components = nb_components - 1
img2 = np.zeros((output.shape))
for i in range(0, nb_components):
if sizes[i] >=minsize:
img2[output == i + 1] = 255
return(img2)
def nothing(x):
pass
def watershed(img):
global b1,stats
edges= np.pad(np.ones((dim1-2,dim2-2)), pad_width=1, mode='constant', constant_values=0)
img=img*edges
img=img.astype(np.uint8)
nb_components, output, stats, centroids = cv.connectedComponentsWithStats(cv.bitwise_not(img),\
connectivity=4)
b1=np.zeros((dim1,dim2)).astype(np.uint8)
g1=np.zeros((dim1,dim2)).astype(np.uint8)
r1=np.zeros((dim1,dim2)).astype(np.uint8)
sizes = stats[1:, -1]; nb_components = nb_components - 1
for i in range(0, nb_components):
if sizes[i] <=10 :
output[output==i+1]=0
output=cv.dilate(output.astype(np.uint8),None,iterations=1)
for i in range(0, nb_components):
if sizes[i] <=10000 : #MAX CELL SIZE
b1[output == i + 1]=colors[i][0]
g1[output == i + 1]=colors[i][1]
r1[output == i + 1]=colors[i][2]
image=cv.merge((b1,g1,r1))
return (image,output)
def simple_watershed(img):
global output,Centroid_list,nb_components
Centroid_list=[]
edges= np.pad(np.ones((dim1-2,dim2-2)), pad_width=1, mode='constant', constant_values=0)
# print(img,edges)
img=img*edges
img=img.astype(np.uint8)
nb_components, output, stats, centroids = cv.connectedComponentsWithStats(cv.bitwise_not(img),\
connectivity=4)
sizes = stats[1:, -1]; nb_components = nb_components -1
for i in range(0, nb_components):
if sizes[i] <=10 :
output[output==i+1]=0
for cell_index in range(0,nb_components+1):
if cell_index>1:
if np.where(output==cell_index)[0].size>0:
Centroid_list.append(((np.mean(np.where(output==cell_index)[0]),np.mean(np.where(output==cell_index)[1])),cell_index))
return (output,Centroid_list)
def get_centroids(frame):
Centroid_list=[]
nb_components=list(np.unique(frame))
for cell_index in nb_components:
if cell_index>1:
if np.where(frame==cell_index)[0].size>0:
Centroid_list.append(((np.mean(np.where(frame==cell_index)[0]),np.mean(np.where(frame==cell_index)[1])),cell_index))
return (Centroid_list)
def get_stats(input_data):
global cnt,rect, CELL_DICTIONARY
background=np.zeros((dim1,dim2))
CELL_DICTIONARY={}
nb_components=list(np.unique(input_data[0][1]))
for cell_index in range(MAX_NUMBER_OF_CELLS+1):
if cell_index>1:
CELL_DICTIONARY[cell_index]=[]
for frame_index in range(len(input_data)):
frame=input_data[frame_index][1].copy()
nb_components=list(np.unique(frame))
for cell_index in nb_components:
if cell_index>1:
if np.where(frame==cell_index)[0].size>0:
background[frame==cell_index]=255
contours,hierarchy = cv.findContours(background.astype(np.uint8), 1,method= cv.CHAIN_APPROX_NONE)
cnt = contours[0]
cy,cx=int(np.mean(np.where(frame==cell_index)[0])),int(np.mean(np.where(frame==cell_index)[1]))
area = cv.contourArea(cnt)
perimeter = cv.arcLength(cnt,True)
rect = cv.minAreaRect(cnt)
width=min(rect[1][1],rect[1][0])
length=max(rect[1][1],rect[1][0])
angle=rect[2]+90
if rect[1][1] > rect[1][0]:
angle=rect[2]
#if len(cnt)>5:
# (x,y),(MA,ma),angle = cv.fitEllipse(cnt)
vert_height = cv.boundingRect(cnt)[3]
hoz_width=len(list(np.where(np.where(frame==cell_index)[1]==cx)[0]))
CELL_DICTIONARY[cell_index].append(((cx,cy),int(area),int(perimeter),int(width),int(length),int(angle),vert_height,hoz_width))
background=np.zeros((dim1,dim2))
for cell_index in range(MAX_NUMBER_OF_CELLS+1):
if cell_index>1:
if len(CELL_DICTIONARY[cell_index])<=frame_index:
CELL_DICTIONARY[cell_index].append((("NA","NA"),"NA","NA","NA","NA","NA","NA","NA"))
for cell_index in range(MAX_NUMBER_OF_CELLS+1):
if cell_index>1:
if all(elem ==(('NA', 'NA'), 'NA', 'NA', 'NA', 'NA', 'NA', 'NA', 'NA') for elem in CELL_DICTIONARY[cell_index])==True:
del(CELL_DICTIONARY[cell_index])
return (CELL_DICTIONARY)
def follow_cells_and_watershed(prev_img,img):
#prev_img has to be mask and img has to be membrane contours
global pairs_of_cells,mask,MAX_NUMBER_OF_CELLS
centroids_prev=get_centroids(prev_img)
post_output,centroids_post=simple_watershed(img)
pairs_of_cells=[]
for centroid1num in centroids_prev:
dist_list=[]
centroid1=centroid1num[0]
for centroid2num in centroids_post:
centroid2=centroid2num[0]
dist_list.append((np.sqrt((centroid1[0]-centroid2[0])**2+((centroid1[1]-centroid2[1])**2)),(centroid1num[1],centroid2num[1])))
if min(dist_list, key = lambda t: t[0])[0]<50:
pairs_of_cells.append((min(dist_list, key = lambda t: t[0])[1],min(dist_list, key = lambda t: t[0])[0]))
mask=np.zeros((dim1,dim2))
post_cell_nums = [lis[0][1] for lis in pairs_of_cells]
pre_cell_nums= [lis[0][0] for lis in pairs_of_cells]
MAX_NUMBER_OF_CELLS=max(MAX_NUMBER_OF_CELLS,max(post_cell_nums),max(pre_cell_nums))
for cell_index in np.unique(post_output):
if cell_index>1:
if (cell_index in post_cell_nums)==False:
MAX_NUMBER_OF_CELLS+=1
pairs_of_cells.append(((MAX_NUMBER_OF_CELLS,cell_index),1000))
cell_num=len(pairs_of_cells)
for pair_index in range(cell_num):
pair=pairs_of_cells[pair_index]
for pair2_index in range(cell_num):
pair2=pairs_of_cells[pair2_index]
if pair2[0][1]==pair[0][1]:
if pair2[0][0]!=pair[0][0]:
pairs_of_cells[pair_index]=pairs_of_cells[pair2_index]=min((pair,pair2), key = lambda t: t[1])
#mask[post_output==pair[0][1]]=pair[0][0]
for pair in pairs_of_cells:
mask[post_output==pair[0][1]]=pair[0][0]
mask=cv.dilate( mask.astype(np.uint8),None,iterations=1)
b2=np.zeros((dim1,dim2)).astype(np.uint8)
g2=np.zeros((dim1,dim2)).astype(np.uint8)
r2=np.zeros((dim1,dim2)).astype(np.uint8)
for i in (np.unique( mask).astype(np.uint8)):
if i>1:
b2[ mask == i ]=colors[i-1][0]
g2[ mask == i ]=colors[i-1][1]
r2[ mask == i ]=colors[i-1][2]
image=cv.merge((b2,g2,r2))
return image, mask
def GUI(event,x,y,flags,param):
global Skeletonized_Image,Cursor
global drawing, X,Y ,been
global saved_list,dim, iter_photo
Cursor=np.zeros((dim1,dim2)).astype(np.uint8)
Skeletonized_Image=saved_list[len(saved_list)-1].copy()
if drawing==True:
if mode==True:
if event == cv.EVENT_LBUTTONDOWN:
debug = cv.line(Skeletonized_Image,(x,y),(X,Y),(255),1).copy()
saved_list.append(debug)
update_numbers(saved_list[-1],iter_photo)
drawing=False
been=False
if been ==True:
if drawing==False:
if mode==True:
if event == cv.EVENT_LBUTTONDOWN:
drawing=True
X,Y=x,y
if drawing==False:
been=True
if mode==False:
cv.circle(Cursor,(x,y),dim, (1), 0)
cv.circle(Skeletonized_Image,(x,y),dim, (0), -1)
if event== cv.EVENT_LBUTTONDOWN:
saved_list.append(Skeletonized_Image.copy())
update_numbers(saved_list[-1],iter_photo)
def process_image(img,a,b,c,d):
if c <1:
c=1
BLURED= cv.GaussianBlur(img,(5,5),0)
GAUSSTHRESH=cv.adaptiveThreshold(BLURED,255,cv.ADAPTIVE_THRESH_MEAN_C,cv.THRESH_BINARY,2*c+1,0)
rem=removesmallelements(GAUSSTHRESH,1000)
img3 = cv.GaussianBlur(rem,(5,5),0)
ret,img4 = cv.threshold(img3,a,255,cv.THRESH_BINARY)
rem=removesmallelements(img4,1000)
img5 = cv.GaussianBlur(rem,(5,5),0)
ret,img6 = cv.threshold(img5,b,255,cv.THRESH_BINARY)
Skeletonized_Image = (skeletonize(img6//255) * 255).astype(np.uint8)
Watershed=watershed(Skeletonized_Image)[0]
img6=cv.cvtColor(img6.astype(np.uint8), cv.COLOR_GRAY2BGR)
Skeletonized_Image_BGR=merges_red(img//2,Skeletonized_Image,255)
return (img,GAUSSTHRESH,img4.astype(np.uint8),img6,Skeletonized_Image_BGR,Watershed,Skeletonized_Image)
def update_numbers(membrane_outlines,frame_num,speed="Fast"):
global Numbers, tiff_images
Image=tiff_images[frame_num]
Base_Image=2*(Image[0]+Image[1])
Numbers=np.zeros((dim1,dim2)).astype(np.uint8)
if speed=="Slow":
mask=outcome[frame_num][1].copy()
centroids=get_centroids(mask)
if speed=="Fast":
mask=watershed(membrane_outlines)[1]
centroids=get_centroids(mask)
for centroid in centroids:
cv.putText(Numbers,str(centroid[1]),org=(int(centroid[0][1]-5),int(centroid[0][0]+5)),fontFace=cv.FONT_HERSHEY_SCRIPT_SIMPLEX,fontScale=0.3,thickness=0,color=(1))
pre_left_panel=merges_red(Base_Image//2,membrane_outlines,255)
left_panel=merges_blue(pre_left_panel,Numbers,255)
return left_panel
def save_all_work(boolean):
for frame_num in range(len(outcome)):
outline=outcome[frame_num][2].copy()
if len(outcome[frame_num][2])==0:
break
if frame_num==0:
Channel3_Mask,Channel1_Mask=watershed(outline)
outcome[frame_num][1]=Channel1_Mask.copy()
outcome[frame_num][0]=Channel3_Mask.copy()
Numbers_Mask=update_numbers(outline,frame_num,"Slow")
outcome[frame_num][3]=Numbers_Mask.copy()
if frame_num>0:
prev_frame=outcome[frame_num-1][1].copy()
Channel3_Mask,Channel1_Mask=follow_cells_and_watershed(prev_frame,outline)
outcome[frame_num][1]=Channel1_Mask.copy()
outcome[frame_num][0]=Channel3_Mask.copy()
Numbers_Mask=update_numbers(outline,frame_num,"Slow")
outcome[frame_num][3]=Numbers_Mask.copy()
if boolean==True:
with open('SAVED_WORK', 'wb') as outfile:
pickle.dump(outcome, outfile, pickle.HIGHEST_PROTOCOL)
def display(outcome):
cv.namedWindow('Press E to Exit')
frame_num=0
while(1):
k = cv.waitKey(1) & 0xFF
display_right=outcome[frame_num][0]
display_left=outcome[frame_num][3]
display= np.hstack((display_left,display_right))
cv.imshow('Press E to Exit',display)
cv.moveWindow('Press E to Exit',150,10)
if k ==ord('p'):
if frame_num<(len(outcome)-1):
frame_num+=1
if k==ord('o'):
if frame_num>0:
frame_num-=1
if k==ord('e'):
break
cv.destroyAllWindows()
def save_excel(outcome,Save_As):
wb = Workbook()
sheet1 = wb.add_sheet('Sheet 1')
CELL_DICTIONARY=get_stats(outcome)
number_of_frames=len(tiff_images)
sheet1.write(0,0,'Cell Number')
sheet1.write(0,1,'Parameter')
row=1
for i in range(number_of_frames):
sheet1.write(0,2+i,'Frame'+str(i))
row=-8
cell_index2=0
for cell_index in list(CELL_DICTIONARY.keys()):
row+=8
sheet1.write(1+cell_index2*8,1,"Centroid")
sheet1.write(2+cell_index2*8,1,"Surface Area")
sheet1.write(3+cell_index2*8,1,"Perimeter")
sheet1.write(4+cell_index2*8,1,"Width")
sheet1.write(5+cell_index2*8,1,"Length")
sheet1.write(6+cell_index2*8,1,"Angle")
sheet1.write(7+cell_index2*8,1,"Vertical Height")
sheet1.write(8+cell_index2*8,1,"Horizontal Width")
cell_index2+=1
for j in range(8):
sheet1.write((j+1)+row,0,str(cell_index))
for frame in range(number_of_frames):
sheet1.write(1+row,2+frame,str((CELL_DICTIONARY[cell_index][frame][0])))
sheet1.write(2+row,2+frame,str((CELL_DICTIONARY[cell_index][frame][1])))
sheet1.write(3+row,2+frame,str((CELL_DICTIONARY[cell_index][frame][2])))
sheet1.write(4+row,2+frame,str((CELL_DICTIONARY[cell_index][frame][3])))
sheet1.write(5+row,2+frame,str((CELL_DICTIONARY[cell_index][frame][4])))
sheet1.write(6+row,2+frame,str((CELL_DICTIONARY[cell_index][frame][5])))
sheet1.write(7+row,2+frame,str((CELL_DICTIONARY[cell_index][frame][6])))
sheet1.write(8+row,2+frame,str((CELL_DICTIONARY[cell_index][frame][7])))
wb.save(Save_As)
return CELL_DICTIONARY
def plot_cell_movement(CELL_DICTIONARY):
X = np.array(())
Y= np.array(())
U = np.array(())
V = np.array(())
for cell_index in list(CELL_DICTIONARY.keys()):
if type(CELL_DICTIONARY[cell_index][-1][0][1])!=str:
if type(CELL_DICTIONARY[cell_index][0][0][1])!=str:
startx=CELL_DICTIONARY[cell_index][0][0][1]
starty=CELL_DICTIONARY[cell_index][0][0][0]
X=np.append(X,startx)
Y=np.append(Y,starty)
endx=CELL_DICTIONARY[cell_index][-1][0][1]
endy=CELL_DICTIONARY[cell_index][-1][0][0]
vectX=(endx-startx)
vectY=(endy-starty)
U=np.append(U,vectX)
V=np.append(V,vectY)
fig, ax = plt.subplots()
ax.quiver(X, Y,U,V,units='xy' ,scale=0.2,headwidth=2)
plt.grid()
ax.set_aspect('equal')
plt.xlim(0,dim1)
plt.ylim(0,dim2)
plt.show()
print("Reading files...")
photos=[]
for entry in os.listdir(basepath): #Read all photos
if os.path.isfile(os.path.join(basepath, entry)):
photos.append(entry)
photos.sort()
list_of_means=[]
#Only select the brightest stacks
for tiff_index in range(len(photos)):
if photos[tiff_index]!='.DS_Store':
tiff_photo=cv.imread(basepath+"/"+photos[tiff_index])
list_of_means.append(np.mean(tiff_photo))
dim1=np.shape(tiff_photo)[0]
dim2=np.shape(tiff_photo)[1]
array_of_means=np.array(list_of_means)
local_maxima=argrelextrema(array_of_means, np.greater)[0]
local_minima=argrelextrema(array_of_means, np.less)[0]
false_maximas=[]
local_maxima_list=[]
for maxima in local_maxima:
local_maxima_list.append(maxima)
for minima in local_minima:
for maxima in local_maxima:
if minima==maxima+1:
false_maximas.append(maxima)
for false in false_maximas:
local_maxima_list.remove(false)
false_maximas=[]
for maxima_index in local_maxima_list:
if list_of_means[maxima_index]<=np.mean(list_of_means):
false_maximas.append(maxima_index)
for false in false_maximas:
local_maxima_list.remove(false)
print("Done")
tiff_images=[]
for Image in local_maxima_list:
tiff_images.append((cv.imread(basepath+"/"+photos[Image],cv.IMREAD_GRAYSCALE),(cv.imread(basepath+"/"+photos[Image-1],cv.IMREAD_GRAYSCALE))))
Info_Sheet=cv.imread("./Info_Sheet.tiff",cv.IMREAD_GRAYSCALE)
Info_str="""
Info Sheet
Commands:
"p" --> Go to next frame
"o" --> Go back one frame
"l" --> Switch between Manual segmentation and Automatic segmentation
"s" --> Save
User Interface:
"m" --> Switch between Drawing/Erasing
"z" --> Undo
Drawing:
Click at two points to draw a line segment
Erasing:
"b" --> Increase diameter of eraser
"n" --> Decrease diameter of eraser
If you have any questions or comments please email <EMAIL>
"""
colors=[]
for i in range(10000):
colors.append((randrange(255),(randrange(255)),(randrange(255))))
iter_photo=0
Display_Mode=False
if answer==True:
outcome=[]
for i in range(len(tiff_images)):
outcome.append([])
for j in range(4):
outcome[i].append([])
dim=1
saved_once2=False
while iter_photo < len(tiff_images):
Cursor=np.zeros((dim1,dim2)).astype(np.uint8)
Numbers=
|
np.zeros((dim1,dim2))
|
numpy.zeros
|
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from mmpose.core import (keypoint_auc, keypoint_epe, keypoint_pck_accuracy,
keypoints_from_heatmaps, keypoints_from_heatmaps3d,
multilabel_classification_accuracy, pose_pck_accuracy)
def test_pose_pck_accuracy():
output =
|
np.zeros((1, 5, 64, 64), dtype=np.float32)
|
numpy.zeros
|
import unittest
import mapf_gym as MAPF_Env
import numpy as np
# Agent 1
num_agents1 = 1
world1 = [[ 1, 0, 0, -1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
goals1 = [[ 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# Agent 1
num_agents2 = 1
world2 = [[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, -1, 0, 0, 0, 0, 0, 0],
[ 0, 0, -1, 1, -1, 0, 0, 0, 0, 0],
[ 0, 0, 0, -1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
goals2 = [[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# Agent 1 and 2
num_agents3 = 2
world3 = [[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, -1, -1, 0, 0, 0, 0, 0],
[ 0, 0, -1, 1, 2, -1, 0, 0, 0, 0],
[ 0, 0, 0, -1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
goals3 = [[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 2, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# Agent 1 and 2
num_agents4 = 2
world4 = [[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, -1, -1, -1, 0, 0, 0, 0],
[ 0, 0, -1, 1, 2, -1, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
goals4 = [[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 2, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# action: {0:NOP, 1:MOVE_NORTH, 2:MOVE_EAST, 3:MOVE_south, 4:MOVE_WEST}
# MAPF_Env.ACTION_COST, MAPF_Env.IDLE_COST, MAPF_Env.GOAL_REWARD, MAPF_Env.COLLISION_REWARD
FULL_HELP = False
class MAPFTests(unittest.TestCase):
# Bruteforce tests
def test_validActions1(self):
# MAPF_Env.MAPFEnv(self, num_agents=1, world0=None, goals0=None, DIAGONAL_MOVEMENT=False, SIZE=10, PROB=.2, FULL_HELP=False)
gameEnv1 = MAPF_Env.MAPFEnv(num_agents1, world0=np.array(world1), goals0=np.array(goals1), DIAGONAL_MOVEMENT=False)
validActions1 = gameEnv1._listNextValidActions(1)
self.assertEqual(validActions1, [0,1,2])
# With diagonal actions
gameEnv1 = MAPF_Env.MAPFEnv(num_agents1, world0=np.array(world1), goals0=np.array(goals1), DIAGONAL_MOVEMENT=True)
validActions1 = gameEnv1._listNextValidActions(1)
self.assertEqual(validActions1, [0,1,2,5])
def test_validActions2(self):
gameEnv2 = MAPF_Env.MAPFEnv(num_agents2, world0=np.array(world2), goals0=np.array(goals2), DIAGONAL_MOVEMENT=False)
validActions2 = gameEnv2._listNextValidActions(1)
self.assertEqual(validActions2, [0])
# With diagonal actions
gameEnv2 = MAPF_Env.MAPFEnv(num_agents2, world0=np.array(world2), goals0=np.array(goals2), DIAGONAL_MOVEMENT=True)
validActions2 = gameEnv2._listNextValidActions(1)
self.assertEqual(validActions2, [0,5,6,7,8])
def test_validActions3(self):
gameEnv3 = MAPF_Env.MAPFEnv(num_agents3, world0=np.array(world3), goals0=np.array(goals3), DIAGONAL_MOVEMENT=False)
validActions3a = gameEnv3._listNextValidActions(1)
validActions3b = gameEnv3._listNextValidActions(2)
self.assertEqual(validActions3a, [0])
self.assertEqual(validActions3b, [0,2])
# With diagonal actions
gameEnv3 = MAPF_Env.MAPFEnv(num_agents3, world0=np.array(world3), goals0=np.array(goals3), DIAGONAL_MOVEMENT=True)
validActions3a = gameEnv3._listNextValidActions(1)
validActions3b = gameEnv3._listNextValidActions(2)
self.assertEqual(validActions3a, [0,5,6,7])
self.assertEqual(validActions3b, [0,2,5,8])
def test_validActions4(self):
gameEnv4 = MAPF_Env.MAPFEnv(num_agents4, world0=np.array(world4), goals0=np.array(goals4),DIAGONAL_MOVEMENT=False)
validActions4a = gameEnv4._listNextValidActions(1)
validActions4b = gameEnv4._listNextValidActions(2)
self.assertEqual(validActions4a, [0,2])
self.assertEqual(validActions4b, [0,2])
# With diagonal actions
gameEnv4 = MAPF_Env.MAPFEnv(num_agents4, world0=np.array(world4), goals0=np.array(goals4),DIAGONAL_MOVEMENT=True)
validActions4a = gameEnv4._listNextValidActions(1)
validActions4b = gameEnv4._listNextValidActions(2)
self.assertEqual(validActions4a, [0,2,5,6,7])
self.assertEqual(validActions4b, [0,2,5,6])
def testIdle1(self):
gameEnv1 = MAPF_Env.MAPFEnv(num_agents1, world0=np.array(world1), goals0=np.array(goals1))
s0 = gameEnv1.world.state.copy()
# return state, reward, done, nextActions, on_goal, blocking, valid_action
s1, r, d, _, o_g, _, _ = gameEnv1.step((1,0))
s2 = gameEnv1.world.state.copy()
self.assertEqual(r, MAPF_Env.IDLE_COST)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def testIdle2(self):
gameEnv2 = MAPF_Env.MAPFEnv(num_agents2, world0=np.array(world2), goals0=np.array(goals2))
s0 = gameEnv2.world.state.copy()
s1, r, d, _, o_g, _, _ = gameEnv2.step((1,0))
s2 = gameEnv2.world.state.copy()
self.assertEqual(r, MAPF_Env.GOAL_REWARD)
self.assertTrue(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def testIdle3(self):
gameEnv3 = MAPF_Env.MAPFEnv(num_agents3, world0=np.array(world3), goals0=np.array(goals3))
s0 = gameEnv3.world.state.copy()
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv3.step((1,0))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.GOAL_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv3.step((2,0))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.IDLE_COST)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def testIdle4(self):
gameEnv4 = MAPF_Env.MAPFEnv(num_agents4, world0=np.array(world4), goals0=np.array(goals4),DIAGONAL_MOVEMENT=False)
s0 = gameEnv4.world.state.copy()
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv4.step((1,0))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.GOAL_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv4.step((2,0))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.IDLE_COST)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_east1(self):
gameEnv1 = MAPF_Env.MAPFEnv(num_agents1, world0=np.array(world1), goals0=np.array(goals1))
s0 = gameEnv1.world.state.copy()
# return state, reward, done, nextActions, on_goal
s1, r, d, _, o_g, _, _ = gameEnv1.step((1,1))
s2 = gameEnv1.world.state.copy()
self.assertEqual(r, MAPF_Env.GOAL_REWARD)
self.assertTrue(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_east2(self):
gameEnv2 = MAPF_Env.MAPFEnv(num_agents2, world0=np.array(world2), goals0=np.array(goals2))
s0 = gameEnv2.world.state.copy()
s1, r, d, _, o_g, _, _ = gameEnv2.step((1,1))
s2 = gameEnv2.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertTrue(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_east3a(self):
gameEnv3 = MAPF_Env.MAPFEnv(num_agents3, world0=np.array(world3), goals0=np.array(goals3))
s0 = gameEnv3.world.state.copy()
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv3.step((1,1))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv3.step((2,1))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_east3b(self):
gameEnv3 = MAPF_Env.MAPFEnv(num_agents3, world0=np.array(world3), goals0=np.array(goals3))
s0 = gameEnv3.world.state.copy()
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv3.step((2,1))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv3.step((1,1))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_east4a(self):
gameEnv4 = MAPF_Env.MAPFEnv(num_agents4, world0=np.array(world4), goals0=np.array(goals4))
s0 = gameEnv4.world.state.copy()
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv4.step((1,1))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv4.step((2,1))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_east4b(self):
gameEnv4 = MAPF_Env.MAPFEnv(num_agents4, world0=np.array(world4), goals0=np.array(goals4))
s0 = gameEnv4.world.state.copy()
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv4.step((2,1))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv4.step((1,1))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_north1(self):
gameEnv1 = MAPF_Env.MAPFEnv(num_agents1, world0=np.array(world1), goals0=np.array(goals1))
s0 = gameEnv1.world.state.copy()
# return state, reward, done, nextActions, on_goal
s1, r, d, _, o_g, _, _ = gameEnv1.step((1,2))
s2 = gameEnv1.world.state.copy()
self.assertEqual(r, MAPF_Env.ACTION_COST)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_north2(self):
gameEnv2 = MAPF_Env.MAPFEnv(num_agents2, world0=np.array(world2), goals0=np.array(goals2))
s0 = gameEnv2.world.state.copy()
s1, r, d, _, o_g, _, _ = gameEnv2.step((1,2))
s2 = gameEnv2.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertTrue(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_north3a(self):
gameEnv3 = MAPF_Env.MAPFEnv(num_agents3, world0=np.array(world3), goals0=np.array(goals3))
s0 = gameEnv3.world.state.copy()
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv3.step((1,2))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv3.step((2,2))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.GOAL_REWARD)
self.assertTrue(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_north3b(self):
gameEnv3 = MAPF_Env.MAPFEnv(num_agents3, world0=np.array(world3), goals0=np.array(goals3))
s0 = gameEnv3.world.state.copy()
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv3.step((2,2))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.GOAL_REWARD)
self.assertTrue(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv3.step((1,2))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertTrue(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_north4a(self):
gameEnv4 = MAPF_Env.MAPFEnv(num_agents4, world0=np.array(world4), goals0=np.array(goals4))
s0 = gameEnv4.world.state.copy()
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv4.step((1,2))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.ACTION_COST)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv4.step((2,2))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.GOAL_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_north4b(self):
gameEnv4 = MAPF_Env.MAPFEnv(num_agents4, world0=np.array(world4), goals0=np.array(goals4))
s0 = gameEnv4.world.state.copy()
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv4.step((2,2))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.GOAL_REWARD)
self.assertTrue(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv4.step((1,2))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.ACTION_COST)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_west1(self):
gameEnv1 = MAPF_Env.MAPFEnv(num_agents1, world0=np.array(world1), goals0=np.array(goals1))
s0 = gameEnv1.world.state.copy()
# return state, reward, done, nextActions, on_goal
s1, r, d, _, o_g, _, _ = gameEnv1.step((1,3))
s2 = gameEnv1.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_west2(self):
gameEnv2 = MAPF_Env.MAPFEnv(num_agents2, world0=np.array(world2), goals0=np.array(goals2))
s0 = gameEnv2.world.state.copy()
s1, r, d, _, o_g, _, _ = gameEnv2.step((1,3))
s2 = gameEnv2.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertTrue(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_west3a(self):
gameEnv3 = MAPF_Env.MAPFEnv(num_agents3, world0=np.array(world3), goals0=np.array(goals3))
s0 = gameEnv3.world.state.copy()
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv3.step((1,3))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv3.step((2,3))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_west3b(self):
gameEnv3 = MAPF_Env.MAPFEnv(num_agents3, world0=np.array(world3), goals0=np.array(goals3))
s0 = gameEnv3.world.state.copy()
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv3.step((2,3))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv3.step((1,3))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_west4a(self):
gameEnv4 = MAPF_Env.MAPFEnv(num_agents4, world0=np.array(world4), goals0=np.array(goals4))
s0 = gameEnv4.world.state.copy()
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv4.step((1,3))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv4.step((2,3))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_west4b(self):
gameEnv4 = MAPF_Env.MAPFEnv(num_agents4, world0=np.array(world4), goals0=np.array(goals4))
s0 = gameEnv4.world.state.copy()
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv4.step((2,3))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv4.step((1,3))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_south1(self):
gameEnv1 = MAPF_Env.MAPFEnv(num_agents1, world0=np.array(world1), goals0=np.array(goals1))
s0 = gameEnv1.world.state.copy()
# return state, reward, done, nextActions, on_goal
s1, r, d, _, o_g, _, _ = gameEnv1.step((1,4))
s2 = gameEnv1.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_south2(self):
gameEnv2 = MAPF_Env.MAPFEnv(num_agents2, world0=np.array(world2), goals0=np.array(goals2))
s0 = gameEnv2.world.state.copy()
s1, r, d, _, o_g, _, _ = gameEnv2.step((1,4))
s2 = gameEnv2.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertTrue(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_south3a(self):
gameEnv3 = MAPF_Env.MAPFEnv(num_agents3, world0=np.array(world3), goals0=np.array(goals3))
s0 = gameEnv3.world.state.copy()
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv3.step((1,4))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv3.step((2,4))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_south3b(self):
gameEnv3 = MAPF_Env.MAPFEnv(num_agents3, world0=np.array(world3), goals0=np.array(goals3))
s0 = gameEnv3.world.state.copy()
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv3.step((2,4))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv3.step((1,4))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0),
|
np.sum(s2)
|
numpy.sum
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 31 16:02:02 2018
@author: gregz
"""
import matplotlib
matplotlib.use('agg')
import argparse as ap
import matplotlib.pyplot as plt
import numpy as np
import os.path as op
import sys
import cosmics
from astropy.convolution import Gaussian2DKernel, Gaussian1DKernel, convolve
from astropy.io import fits
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.modeling.models import Moffat2D
from astropy.table import Table
from astropy.visualization import AsinhStretch
from astropy.visualization.mpl_normalize import ImageNormalize
from copy import copy
from fiber_utils import bspline_x0
from input_utils import setup_logging
from photutils import detect_sources
from reducelrs2 import ReduceLRS2
from scipy.interpolate import interp1d
from scipy.signal import savgol_filter
from sklearn.gaussian_process.kernels import Matern, WhiteKernel
from sklearn.gaussian_process.kernels import ConstantKernel
from sklearn.gaussian_process import GaussianProcessRegressor
from utils import biweight_location, biweight_midvariance
from wave_utils import get_new_wave, get_red_wave, get_single_shift
def get_script_path():
return op.dirname(op.realpath(sys.argv[0]))
DIRNAME = get_script_path()
parser = ap.ArgumentParser(add_help=True)
parser.add_argument("-f", "--filename",
help='''Filename that contains list of files''',
type=str, default=None)
parser.add_argument("-s", "--side",
help='''blue for LRS2-B and red for LRS2-R''',
type=str, default='blue')
parser.add_argument("-rc", "--recalculate_wavelength",
help='''recalculate_wavelength''',
action="count", default=0)
parser.add_argument("-em", "--emission",
help='''Find emission line object?''',
action="count", default=0)
parser.add_argument("-es", "--extract_side",
help='''blue for LRS2-B and red for LRS2-R''',
type=str, default='orange')
parser.add_argument("-we", "--wave_extract",
help='''blue for LRS2-B and red for LRS2-R''',
type=float, default=None)
args = parser.parse_args(args=None)
args.log = setup_logging('combine_amp_reductions')
attrs = ['filename', 'side']
for attr in attrs:
if getattr(args, attr) is None:
args.log.error('Need a "--%s" argument.' % attr)
sys.exit(1)
args.side = args.side.lower()
def make_avg_spec(wave, spec, binsize=35, knots=None):
if knots is None:
knots = wave.shape[1]
ind = np.argsort(wave.ravel())
N, D = wave.shape
wchunks = np.array_split(wave.ravel()[ind],
N * D / binsize)
schunks = np.array_split(spec.ravel()[ind],
N * D / binsize)
nwave = np.array([np.mean(chunk) for chunk in wchunks])
B, c = bspline_x0(nwave, nknots=knots)
nspec = np.array([biweight_location(chunk) for chunk in schunks])
sol = np.linalg.lstsq(c, nspec)[0]
smooth = np.dot(c, sol)
nwave, nind = np.unique(nwave, return_index=True)
return nwave, smooth[nind]
def safe_division(num, denom, eps=1e-8, fillval=0.0):
good = np.isfinite(denom) * (np.abs(denom) > eps)
div = num * 0.
if num.ndim == denom.ndim:
div[good] = num[good] / denom[good]
div[~good] = fillval
else:
div[:, good] = num[:, good] / denom[good]
div[:, ~good] = fillval
return div
def rectify(wave, spec, lims, fac=2.5):
N, D = wave.shape
rect_wave = np.linspace(lims[0], lims[1], int(D*fac))
rect_spec = np.zeros((N, len(rect_wave)))
for i in np.arange(N):
dw = np.diff(wave[i])
dw = np.hstack([dw[0], dw])
I = interp1d(wave[i], spec[i] / dw, kind='quadratic',
bounds_error=False, fill_value=-999.)
rect_spec[i, :] = I(rect_wave)
return rect_wave, rect_spec
def gather_sn_fibers(fibconv, noise, cols):
hightolow = np.argsort(np.median(fibconv[:, cols], axis=1))[::-1]
s = 0.
ss = np.zeros((len(cols),))
nn = noise[cols]
inds = []
for ind in hightolow:
news = fibconv[ind, cols] + ss
newn = np.sqrt(nn**2 + noise[cols]**2)
rat = np.median(news / newn)
if rat > (s+0.5):
nn = newn
ss = news
s = rat
inds.append(ind)
else:
continue
return inds, s
def find_centroid(image, x, y, B):
G = Moffat2D()
G.alpha.value = 3.5
G.alpha.fixed = True
fit = LevMarLSQFitter()(G, x, y, image)
signal_to_noise = fit.amplitude.value / biweight_midvariance(image)
d = np.sqrt((x - fit.x_0.value)**2 + (y - fit.y_0.value)**2)
ratio = fit(x, y) / B
ind = np.argsort(ratio)
dthresh = np.interp(.01, ratio[ind], d[ind])
return (fit.x_0.value, fit.y_0.value, fit.alpha.value, fit.gamma.value,
fit.fwhm, signal_to_noise, dthresh)
def build_weight_matrix(x, y, sig=1.5):
d = np.sqrt((x - x[:, np.newaxis])**2 + (y - y[:, np.newaxis])**2)
G = np.exp(-0.5 * (d / sig)**2)
G = G / G.sum(axis=0)[:, np.newaxis]
return G.swapaxes(0,1)
def clean_cosmics(rect_spec):
G = np.array([-.25, -.25, 1., -.25, -.25]).reshape(5,1)
S = convolve(rect_spec, G, normalize_kernel=False)
N = biweight_midvariance(S, axis=(0,))
mask = 0. * rect_spec
mask[(S / N) > 5.] = -1.
print('[COSMICS] Number of cosmics found: %i' % int(-1.*mask.sum()))
return mask
def mask_skylines_cosmics(wave, rect_spec, name):
mask1 = rect_spec * 0.
if op.exists(op.join(DIRNAME, 'lrs2_config', '%s_skylines.dat' % name)):
T = Table.read(op.join(DIRNAME, 'lrs2_config', '%s_skylines.dat' % name),
format='ascii.fixed_width_two_line')
for w in T['wavelength']:
mask1[:, np.abs(wave - w) < 6.] = -1.
mask2 = clean_cosmics(rect_spec)
mask = (mask1 + mask2) < 0
return mask
def convolve_spatially(x, y, spec, wave, name, sig_spatial=0.7, sig_wave=1.5):
W = build_weight_matrix(x, y, sig=sig_spatial)
mask = mask_skylines_cosmics(wave, spec, name)
Z = spec * 1.
Z[mask] = np.nan
G = Gaussian1DKernel(sig_wave)
for i in np.arange(spec.shape[0]):
Z[i, :] = convolve(Z[i, :], G, nan_treatment='fill', fill_value=0.0)
for i in np.arange(spec.shape[1]):
Z[:, i] = np.dot(Z[:, i], W)
return Z, mask
def build_big_fiber_array(P):
u = np.unique(P.ifuy)
NX = []
NY = []
for ui in u:
X = np.sort(P.ifux[P.ifuy == ui])
lx = X[-1] - X[0]
dx = X[1] - X[0]
NX.append(np.hstack([X - lx - dx, X, X + lx + dx]))
NY.append(ui * np.ones(NX[-1].shape))
NX.append(NX[-2])
NY.append(NY[-2]+0.59*2.)
NX = np.hstack(NX)
NY = np.hstack(NY)
ly = NY[-1] - NY[0]
dy = u[1] - u[0]
NX = np.hstack([NX, NX, NX])
NY = np.hstack([NY - ly - dy, NY, NY + ly + dy])
return NX, NY
def get_x_y_lambda(det_ind, other_ind, detwv, otherwv,
det_xc, det_yc, sides):
side = sides[det_ind]
dar_table = Table.read(op.join(DIRNAME, 'lrs2_config', 'dar_%s.dat' % side),
format='ascii.fixed_width_two_line')
X = interp1d(dar_table['wave'], dar_table['x_0'], kind='linear',
bounds_error=False, fill_value='extrapolate')
Y = interp1d(dar_table['wave'], dar_table['y_0'], kind='linear',
bounds_error=False, fill_value='extrapolate')
xoff, yoff = (det_xc - X(detwv), det_yc - Y(detwv))
side = sides[other_ind]
dar_table = Table.read(op.join(DIRNAME, 'lrs2_config', 'dar_%s.dat' % side),
format='ascii.fixed_width_two_line')
X = interp1d(dar_table['wave'], dar_table['x_0'], kind='linear',
bounds_error=False, fill_value='extrapolate')
Y = interp1d(dar_table['wave'], dar_table['y_0'], kind='linear',
bounds_error=False, fill_value='extrapolate')
return xoff + X(otherwv), yoff + Y(otherwv)
def make_plot(zimage, xgrid, ygrid, xpos, ypos, good_mask, opath, side):
fig = plt.figure(figsize=(6, 6))
plt.imshow(zimage, origin='lower', interpolation='none',
norm=ImageNormalize(stretch=AsinhStretch()),
cmap=plt.get_cmap('gray_r'),
extent=[xgrid.min(), xgrid.max(), ygrid.min(), ygrid.max()])
plt.scatter(xpos[good_mask], ypos[good_mask], marker='x', color='g', s=90)
plt.scatter(xpos[~good_mask], ypos[~good_mask], marker='x', color='r',
s=90)
plt.axis([xgrid.min(), xgrid.max(), ygrid.min(), ygrid.max()])
fig.savefig(op.join(opath, 'image_%s.png' % side))
def mask_sources(xgrid, ygrid, xpos, ypos, zimage, sncut=2.0):
threshold = (biweight_location(zimage) +
sncut * biweight_midvariance(zimage))
kernel = Gaussian2DKernel(2, x_size=5, y_size=5)
kernel.normalize()
segm = detect_sources(zimage, threshold, npixels=8, filter_kernel=kernel)
dist = np.sqrt((xgrid - xpos[:, np.newaxis, np.newaxis])**2 +
(ygrid - ypos[:, np.newaxis, np.newaxis])**2)
fiberloc = np.argmin(dist, axis=0)
return np.unique(fiberloc[segm.array > 0])
def make_frame(xloc, yloc, data, scale=0.25,
seeing_fac=2.5):
seeing = seeing_fac * scale
a = len(data)
x = np.arange(xloc.min()-scale,
xloc.max()+1*scale, scale)
y = np.arange(yloc.min()-scale,
yloc.max()+1*scale, scale)
xgrid, ygrid = np.meshgrid(x, y)
zimage = xgrid * 0.
d = np.zeros((a,)+xgrid.shape)
w = np.zeros((a,)+xgrid.shape)
for i in np.arange(len(x)):
for j in np.arange(len(y)):
d[:, j, i] = np.sqrt((xloc - xgrid[j, i])**2 +
(yloc - ygrid[j, i])**2)
w[:, j, i] = np.exp(-1./2.*(d[:, j, i]/seeing)**2)
sel = np.where((np.abs(data) > 1e-5) * np.isfinite(data))[0]
ws = w[sel, :, :].sum(axis=0)
zimage = ((data[sel, np.newaxis, np.newaxis] * w[sel]).sum(axis=0) /
ws * 1.9)
return xgrid, ygrid, zimage
def setup_GP():
kernel = (ConstantKernel() + Matern(length_scale=2, nu=3/2) +
WhiteKernel(noise_level=1.))
G = GaussianProcessRegressor(alpha=1e-10, copy_X_train=True, kernel=kernel,
n_restarts_optimizer=0, normalize_y=False,
optimizer='fmin_l_bfgs_b', random_state=None)
return G
def fit_GP(wave, spec, mask):
G = setup_GP()
G.fit(wave[mask, np.newaxis], spec[mask])
return G.predict(wave[:, np.newaxis]), G
def smooth_fiber(X, mask, nfibs, wave_sel=None):
z = biweight_location(X, axis=(1,))
x = np.arange(len(z))
z[mask] = np.nan
model = z * 0.
for i in np.arange(2):
xl = i * nfibs
xh = (i + 1) * nfibs
sel = np.isfinite(z[xl:xh])
G = setup_GP()
G.fit(x[xl:xh][sel, np.newaxis], z[xl:xh][sel])
model[xl:xh] = G.predict(x[xl:xh, np.newaxis])
return model
def subtract_sky(R, sky_sel, args, niter=2, adjustment=None):
for j in np.arange(niter):
nwave, nspec = make_avg_spec(R.wave[sky_sel],
safe_division(R.spec[sky_sel],
R.ftf[sky_sel]), binsize=35, knots=None)
I = interp1d(nwave, nspec, bounds_error=False, kind='quadratic',
fill_value='extrapolate')
R.skysub = R.wave * 0.
R.sky = R.wave * 0.
model = R.wave * 0.
for i in np.arange(R.wave.shape[0]):
model[i] = I(R.wave[i])
if adjustment is not None:
try:
sel = np.isfinite(adjustment[i+1])
J = interp1d(adjustment[0][sel], adjustment[i+1][sel],
bounds_error=False, kind='quadratic',
fill_value='extrapolate')
add = J(R.wave[i])
except:
args.log.warning('Adjustment failed for %s on fiber %i' %
(R.side, i))
add = 0.0
else:
add = 0.0
R.sky[i] = model[i] * (R.ftf[i] + add)
R.skysub[i] = R.spec[i] - R.sky[i]
residual = safe_division(R.skysub, model)
sky_sel1 = sky_sel * (np.nanmedian(R.ftf, axis=1) > .5)
cont = smooth_fiber(residual[:,400:-400], ~sky_sel1, R.wave.shape[0] / 2)
R.ftf = R.ftf + cont[:, np.newaxis]
args.log.info('Fiber to Fiber offsets')
T = Table([R.ifux, R.ifuy, cont], names=['x', 'y', 'offset'])
args.log.info(T)
R.skysub = safe_division(R.skysub, R.ftf)
return R
def extract_source(R, side, lims2, loc, fibinds, args):
R.skynorm = safe_division(R.sky, R.ftf)
T = Table.read(op.join(DIRNAME, 'lrs2_config', 'response_%s.dat' % side),
format='ascii.fixed_width_two_line')
I = interp1d(T['wave'], T['response'], kind='linear',
bounds_error=False, fill_value='extrapolate')
R.flam = R.wave * 0.
R.slam = R.wave * 0.
for i in np.arange(R.wave.shape[0]):
response = I(R.wave[i])
R.flam[i] = R.skysub[i] / R.exptime / R.area * response
R.slam[i] = R.skynorm[i] / R.exptime / R.area * response
rect_wave, rect_spec = rectify(np.array(R.wave, dtype='float64'),
np.array(R.flam, dtype='float64'),
lims2, fac=1.0)
rect_wave, rect_sky = rectify(np.array(R.wave, dtype='float64'),
|
np.array(R.slam, dtype='float64')
|
numpy.array
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
"""
L1与L2区别
随着C的变小L1的惩罚越来越大
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
def test():
# 加载数据
digits = datasets.load_digits()
X, Y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# 数据划分为0,1
Y = (Y>4).astype(np.int)
# 设置正则化参数
for i, C in enumerate((100, 1, 0.01)):
print("C=%.2f" % C)
plt.text(-8, 3, "C = %.2f" % C)
# l1
# coef_l1_LR contains zeros
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l1_LR.fit(X, Y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("Score with L1 penalty: %.4f" % clf_l1_LR.score(X, Y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
if i == 0:
l1_plot.set_title("L1 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest', cmap='binary', vmax=1, vmin=0)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
# l2
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l2_LR.fit(X, Y)
coef_l2_LR = clf_l2_LR.coef_.ravel()
sparsity_l2_LR =
|
np.mean(coef_l2_LR == 0)
|
numpy.mean
|
import numpy as np
import os
import torch
from torch.utils.data import Dataset
import cv2
from data.imgaug_wo_shape import ImgAugWithoutShape
from data.imgaug_w_shape import ImgAugWithShape
from data.resize_uniform import resizeUniform
"""
一个image一个anno.txt
imageName.txt
xmin, ymin, w,h, cls0,cls1
xmin, ymin, w,h, cls0,cls1
xmin, ymin, w,h, cls0,cls1
这几个参数都是根目录
output
img 0-1
"""
class ListDataset(Dataset):
def __init__(self,
trainAnnoPath, # txt files root /
trainImgPath, # images root /
netInputSizehw,
imgChannelNumber,
augFlag=False,
clsname = {0: "person"}
):
self.trainAnnoPath = trainAnnoPath
self.trainImgPath = trainImgPath
self.netInputSizehw = tuple(netInputSizehw)
self.annNames = os.listdir(self.trainAnnoPath) # format me#["2008_000176.txt"]
self.imgChannelNumber = imgChannelNumber
self.augFlag = augFlag
self.clsname = clsname
self.showFlag = 0
def __getitem__(self, index):
"""bbox img org"""
txtPath = self.trainAnnoPath + self.annNames[index]
"""load infos"""
infos = np.loadtxt(txtPath)
if infos.ndim == 1:
rows = infos.shape[0]
infos = infos.reshape(-1, rows) #one row to 2dim
"""change int to float"""
infos = np.array(infos, dtype=np.float32)
"""判断是不是背景图片"""
if (infos ==np.array([[-1,-1,-1,-1,-1]])).all():
bgFlag = True
else:
bgFlag = False
bboxes = infos[:, :4]
classes = infos[:, 4]
"""input img rgb or gray"""
if self.imgChannelNumber == 3:
img = cv2.imread(self.trainImgPath + self.annNames[index].split('.')[0] + '.jpg')# cv2.COLOR_BGR2RGB)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if self.imgChannelNumber == 1:
img = cv2.imread(self.trainImgPath + self.annNames[index].split('.')[0] + '.jpg', cv2.IMREAD_GRAYSCALE)
img = img.astype(np.float32)
winName = ""#self.annNames[index]
# if self.showFlag:
# self.__show(np.copy(img).astype(np.uint8), bboxes, classes, winName, color = (0, 0, 255))
"""unifor resize 放在最后,输入网络的图片会有很多的0, 经过imgaug这些将会变为非0有利于学习"""
img, infos, bboxes = resizeUniform(img, self.netInputSizehw, bboxes)
if self.showFlag:
self.__show(np.copy(img).astype(np.uint8), bboxes, classes, winName+"_resize", color=(0, 0, 255))
"""data shape augment"""
if self.augFlag:
"""Img Aug With Shape, 放射变换的增强一定要放在前面,主要是0的情况"""
bboxes[:, 2:] = bboxes[:, :2] + bboxes[:, 2:] # (x1,y1, w,h)->(x1,y1, x2,y2)
if bgFlag:
imgauger = ImgAugWithShape(img, None)
else:
imgauger = ImgAugWithShape(img, bboxes)
imgauger.shear(5, prob =0.3)
imgauger.translate(translate=0.1, prob=0.3)
if not bgFlag:
img, bboxes = (imgauger.img, imgauger.boxes)
else:
img = imgauger.img
bboxes[:, 2:] = bboxes[:, 2:] - bboxes[:, :2] # (x1,y1, x2,y2)->(x1,y1, w,h)
if self.showFlag:
self.__show(np.copy(img).astype(np.uint8), bboxes, classes, winName + "_augshape", color=(0, 0, 255))
"""data color augment"""
if self.augFlag:
"""非放射变换,放在最后, 最后的img 不用clip到(0,1)之间"""
imgauger = ImgAugWithoutShape(img)
imgauger.brightness(delta = 0.1, prob = 0.5)
imgauger.constrast(alphaLow=0.9, alphaUp=1.1, prob = 0.5)
imgauger.saturation(alphaLow=0.1, alphaUp=1.1, prob = 0.5)
#imgauger.normalize1(mean = self.normalize[0], std= self.normalize[1])
img = imgauger.img
if self.showFlag:
self.__show(
|
np.copy(img)
|
numpy.copy
|
#!/usr/bin/env python
# file utils.py
# author <NAME> <<EMAIL>>
# version 0.0
# date 18 mars 2020
"""
Utils
=====
Various utilities unrelated to trees or profiles.
"""
import numpy as np
def ndarray_hash(x, l=8, c=1000):
"""
Compute a hash from a numpy array.
Parameters
----------
x : ndarray
The array to hash.
l : int, optional
The length of the hash. Must be an even number.
c : int, optional
A variable to affect the sampling of the hash. It has to be the
same along the matching process. Refer to notes.
Returns
-------
hash : str
The hash of array x.
Notes
-----
Python hash is slow and will offset the random generator in each
kernel. The hash of the same data will not match in different
kernels.
The idea is to sparsely sample the data to speed up the hash
computation. By fixing the number of samples the hash computation
will take a fixed amount of time, no matter the size of the data.
This hash function output a hash of :math:`x` in hexadecimal. The
length of the hash is :math:`l`. The hashes are consistent when
tuning the length :math:`l`: shorter hashes are contained in the
longer ones for the same data :math:`x`. The samples count taken in
:math:`x` is :math:`\\frac{l \\times c}{2}`.
"""
rs = np.random.RandomState(42)
x =
|
np.require(x, requirements='C')
|
numpy.require
|
# -*- coding: utf-8 -*-
#GSASIIdataGUI - Main GUI routines
########### SVN repository information ###################
# $Date: 2021-01-12 04:57:49 +0900 (火, 12 1月 2021) $
# $Author: toby $
# $Revision: 4761 $
# $URL: https://subversion.xray.aps.anl.gov/pyGSAS/trunk/GSASIIdataGUI.py $
# $Id: GSASIIdataGUI.py 4761 2021-01-11 19:57:49Z toby $
########### SVN repository information ###################
'''
*GSASIIdataGUI: Main GSAS-II GUI*
------------------------------------
Module that defines GUI routines and classes for the main GUI Frame (window)
and the main routines that define the GSAS-II tree panel and much of the
data editing panel.
'''
from __future__ import division, print_function
import platform
import time
import math
import random as ran
import copy
import sys
import os
import inspect
if '2' in platform.python_version_tuple()[0]:
import cPickle
else:
try:
import _pickle as cPickle
except:
print('Warning: failed to import the optimized Py3 pickle (_pickle)')
import pickle as cPickle
import re
import numpy as np
import numpy.ma as ma
import matplotlib as mpl
try:
import OpenGL as ogl
try:
import OpenGL.GL # this fails in <=2020 versions of Python on OS X 11.x
except ImportError:
print('Drat, patching for Big Sur')
from ctypes import util
orig_util_find_library = util.find_library
def new_util_find_library( name ):
res = orig_util_find_library( name )
if res: return res
return '/System/Library/Frameworks/'+name+'.framework/'+name
util.find_library = new_util_find_library
except ImportError:
pass
import scipy as sp
import scipy.optimize as so
try:
import wx
import wx.grid as wg
#import wx.wizard as wz
#import wx.aui
import wx.lib.scrolledpanel as wxscroll
except ImportError:
pass
import GSASIIpath
GSASIIpath.SetVersionNumber("$Revision: 4761 $")
import GSASIImath as G2mth
import GSASIIIO as G2IO
import GSASIIfiles as G2fil
import GSASIIstrIO as G2stIO
import GSASIIlattice as G2lat
import GSASIIplot as G2plt
import GSASIIpwdGUI as G2pdG
import GSASIIimgGUI as G2imG
import GSASIIphsGUI as G2phG
import GSASIIspc as G2spc
import GSASIImapvars as G2mv
import GSASIIconstrGUI as G2cnstG
import GSASIIrestrGUI as G2restG
import GSASIIobj as G2obj
import GSASIIexprGUI as G2exG
import GSASIIlog as log
import GSASIIctrlGUI as G2G
import GSASIIElem as G2elem
import GSASIIpwd as G2pwd
import GSASIIstrMain as G2stMn
import defaultIparms as dI
import GSASIIfpaGUI as G2fpa
try:
wx.NewIdRef
wx.NewId = wx.NewIdRef
except AttributeError:
pass
# trig functions in degrees
sind = lambda x: np.sin(x*np.pi/180.)
tand = lambda x: np.tan(x*np.pi/180.)
cosd = lambda x: np.cos(x*np.pi/180.)
# Define short names for convenience
WACV = wx.ALIGN_CENTER_VERTICAL
VERY_LIGHT_GREY = wx.Colour(240,240,240)
DULL_YELLOW = (230,230,190)
# define Ids for wx menu items
commonTrans = {'abc':np.eye(3),'a-cb':np.array([[1.,0.,0.],[0.,0.,-1.],[0.,1.,0.]]),
'ba-c':np.array([[0.,1.,0.],[1.,0.,0.],[0.,0.,-1.]]),'-cba':np.array([[0.,0.,-1.],[0.,1.,0.],[1.,0.,0.]]),
'bca':np.array([[0.,1.,0.],[0.,0.,1.],[1.,0.,0.]]),'cab':np.array([[0.,0.,1.],[1.,0.,0.],[0.,1.,0.]]),
'R->H':np.array([[1.,-1.,0.],[0.,1.,-1.],[1.,1.,1.]]),'H->R':np.array([[2./3,1./3,1./3],[-1./3,1./3,1./3],[-1./3,-2./3,1./3]]),
'P->A':np.array([[-1.,0.,0.],[0.,-1.,1.],[0.,1.,1.]]),'R->O':np.array([[-1.,0.,0.],[0.,-1.,0.],[0.,0.,1.]]),
'P->B':np.array([[-1.,0.,1.],[0.,-1.,0.],[1.,0.,1.]]),'B->P':np.array([[-.5,0.,.5],[0.,-1.,0.],[.5,0.,.5]]),
'P->C':np.array([[1.,1.,0.],[1.,-1.,0.],[0.,0.,-1.]]),'C->P':np.array([[.5,.5,0.],[.5,-.5,0.],[0.,0.,-1.]]),
'P->F':np.array([[-1.,1.,1.],[1.,-1.,1.],[1.,1.,-1.]]),'F->P':np.array([[0.,.5,.5],[.5,0.,.5],[.5,.5,0.]]),
'P->I':np.array([[0.,1.,1.],[1.,0.,1.],[1.,1.,0.]]),'I->P':np.array([[-.5,.5,.5],[.5,-.5,.5],[.5,.5,-.5]]),
'A->P':np.array([[-1.,0.,0.],[0.,-.5,.5],[0.,.5,.5]]),'O->R':np.array([[-1.,0.,0.],[0.,-1.,0.],[0.,0.,1.]]),
'abc*':np.eye(3), }
commonNames = ['abc','bca','cab','a-cb','ba-c','-cba','P->A','A->P','P->B','B->P','P->C','C->P',
'P->I','I->P','P->F','F->P','H->R','R->H','R->O','O->R','abc*','setting 1->2'] #don't put any new ones after the setting one!
def SetDefaultDData(dType,histoName,NShkl=0,NDij=0):
if dType in ['SXC','SNC']:
return {'Histogram':histoName,'Show':False,'Scale':[1.0,True],
'Babinet':{'BabA':[0.0,False],'BabU':[0.0,False]},
'Extinction':['Lorentzian','None', {'Tbar':0.1,'Cos2TM':0.955,
'Eg':[1.e-10,False],'Es':[1.e-10,False],'Ep':[1.e-10,False]}],
'Flack':[0.0,False]}
elif dType == 'SNT':
return {'Histogram':histoName,'Show':False,'Scale':[1.0,True],
'Babinet':{'BabA':[0.0,False],'BabU':[0.0,False]},
'Extinction':['Lorentzian','None', {
'Eg':[1.e-10,False],'Es':[1.e-10,False],'Ep':[1.e-10,False]}]}
elif 'P' in dType:
return {'Histogram':histoName,'Show':False,'Scale':[1.0,False],
'Pref.Ori.':['MD',1.0,False,[0,0,1],0,{},[],0.1],
'Size':['isotropic',[1.,1.,1.],[False,False,False],[0,0,1],
[1.,1.,1.,0.,0.,0.],6*[False,]],
'Mustrain':['isotropic',[1000.0,1000.0,1.0],[False,False,False],[0,0,1],
NShkl*[0.01,],NShkl*[False,]],
'HStrain':[NDij*[0.0,],NDij*[False,]],
'Extinction':[0.0,False],'Babinet':{'BabA':[0.0,False],'BabU':[0.0,False]}}
def GetDisplay(pos):
'''Gets display number (0=main display) for window position (pos). If pos outside all displays
returns None
'''
displays = np.array([list(wx.Display(i).GetGeometry()) for i in range(wx.Display.GetCount())])
for ip,display in enumerate(displays):
display[2:3] += display[0:1]
if (display[0] < pos[0] < display[2]) and (display[1] < pos[1] < display[3]):
return ip
return None
################################################################################
#### class definitions used for main GUI
################################################################################
class MergeDialog(wx.Dialog):
''' HKL transformation & merge dialog
:param wx.Frame parent: reference to parent frame (or None)
:param data: HKLF data
'''
def __init__(self,parent,data):
wx.Dialog.__init__(self,parent,wx.ID_ANY,'Setup HKLF merge',
pos=wx.DefaultPosition,style=wx.DEFAULT_DIALOG_STYLE)
self.panel = wx.Panel(self) #just a dummy - gets destroyed in Draw!
self.data = data
self.Super = data[1]['Super']
if self.Super:
self.Trans = np.eye(4)
else:
self.Trans = np.eye(3)
self.Cent = 'noncentrosymmetric'
self.Laue = '1'
self.Class = 'triclinic'
self.Common = 'abc'
self.Draw()
def Draw(self):
def OnCent(event):
Obj = event.GetEventObject()
self.Cent = Obj.GetValue()
self.Laue = ''
wx.CallAfter(self.Draw)
def OnLaue(event):
Obj = event.GetEventObject()
self.Laue = Obj.GetValue()
wx.CallAfter(self.Draw)
def OnClass(event):
Obj = event.GetEventObject()
self.Class = Obj.GetValue()
self.Laue = ''
wx.CallAfter(self.Draw)
def OnCommon(event):
Obj = event.GetEventObject()
self.Common = Obj.GetValue()
self.Trans = commonTrans[self.Common]
wx.CallAfter(self.Draw)
self.panel.Destroy()
self.panel = wx.Panel(self)
mainSizer = wx.BoxSizer(wx.VERTICAL)
MatSizer = wx.BoxSizer(wx.HORIZONTAL)
transSizer = wx.BoxSizer(wx.VERTICAL)
transSizer.Add(wx.StaticText(self.panel,label=" HKL Transformation matrix: M*H = H'"))
if self.Super:
Trmat = wx.FlexGridSizer(4,4,0,0)
else:
commonSizer = wx.BoxSizer(wx.HORIZONTAL)
commonSizer.Add(wx.StaticText(self.panel,label=' Common transformations: '),0,WACV)
common = wx.ComboBox(self.panel,value=self.Common,choices=commonNames[:-2], #not the last two!
style=wx.CB_READONLY|wx.CB_DROPDOWN)
common.Bind(wx.EVT_COMBOBOX,OnCommon)
commonSizer.Add(common,0,WACV)
transSizer.Add(commonSizer)
Trmat = wx.FlexGridSizer(3,3,0,0)
for iy,line in enumerate(self.Trans):
for ix,val in enumerate(line):
item = G2G.ValidatedTxtCtrl(self.panel,self.Trans[iy],ix,nDig=(10,3),size=(65,25))
Trmat.Add(item)
transSizer.Add(Trmat)
MatSizer.Add((10,0),0)
MatSizer.Add(transSizer)
mainSizer.Add(MatSizer)
laueClass = ['triclinic','monoclinic','orthorhombic','trigonal(H)','tetragonal','hexagonal','cubic']
centroLaue = {'triclinic':['-1',],'monoclinic':['2/m','1 1 2/m','2/m 1 1',],
'orthorhombic':['m m m',],'trigonal(H)':['-3','-3 m 1','-3 1 m',], \
'tetragonal':['4/m','4/m m m',],'hexagonal':['6/m','6/m m m',],'cubic':['m 3','m 3 m']}
noncentroLaue = {'triclinic':['1',],'monoclinic':['2','2 1 1','1 1 2','m','m 1 1','1 1 m',],
'orthorhombic':['2 2 2','m m 2','m 2 m','2 m m',],
'trigonal(H)':['3','3 1 2','3 2 1','3 m 1','3 1 m',],
'tetragonal':['4','-4','4 2 2','4 m m','-4 2 m','-4 m 2',], \
'hexagonal':['6','-6','6 2 2','6 m m','-6 m 2','-6 2 m',],'cubic':['2 3','4 3 2','-4 3 m']}
centChoice = ['noncentrosymmetric','centrosymmetric']
mainSizer.Add(wx.StaticText(self.panel,label=' Select Laue class for new lattice:'),0)
Class = wx.ComboBox(self.panel,value=self.Class,choices=laueClass,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
Class.Bind(wx.EVT_COMBOBOX,OnClass)
mainSizer.Add(Class,0)
mainSizer.Add(wx.StaticText(self.panel,label=' Target Laue symmetry:'),0)
Cent = wx.ComboBox(self.panel,value=self.Cent,choices=centChoice,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
Cent.Bind(wx.EVT_COMBOBOX,OnCent)
mergeSizer = wx.BoxSizer(wx.HORIZONTAL)
mergeSizer.Add(Cent,0,WACV)
mergeSizer.Add((10,0),0)
Choice = centroLaue[self.Class]
if 'non' in self.Cent:
Choice = noncentroLaue[self.Class]
Laue = wx.ComboBox(self.panel,value=self.Laue,choices=Choice,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
Laue.Bind(wx.EVT_COMBOBOX,OnLaue)
mergeSizer.Add(Laue,0,WACV)
mainSizer.Add(mergeSizer)
OkBtn = wx.Button(self.panel,-1,"Ok")
OkBtn.Bind(wx.EVT_BUTTON, self.OnOk)
cancelBtn = wx.Button(self.panel,-1,"Cancel")
cancelBtn.Bind(wx.EVT_BUTTON, self.OnCancel)
btnSizer = wx.BoxSizer(wx.HORIZONTAL)
btnSizer.Add((20,20),1)
if self.Laue:
btnSizer.Add(OkBtn)
btnSizer.Add((20,20),1)
btnSizer.Add(cancelBtn)
btnSizer.Add((20,20),1)
mainSizer.Add(btnSizer,0,wx.EXPAND|wx.BOTTOM|wx.TOP, 10)
self.panel.SetSizer(mainSizer)
self.panel.Fit()
self.Fit()
def GetSelection(self):
return self.Trans,self.Cent,self.Laue
def OnOk(self,event):
parent = self.GetParent()
parent.Raise()
self.EndModal(wx.ID_OK)
def OnCancel(self,event):
parent = self.GetParent()
parent.Raise()
self.EndModal(wx.ID_CANCEL)
def GUIpatches():
'Misc fixes that only needs to be done when running a GUI'
try: # patch for LANG environment var problem on occasional OSX machines
import locale
locale.getdefaultlocale()
except ValueError:
print('Fixing location (see https://github.com/matplotlib/matplotlib/issues/5420.)')
os.environ['LC_ALL'] = 'en_US.UTF-8'
locale.getdefaultlocale()
try:
import OpenGL
OpenGL # avoids unused package error
except ImportError:
print('*******************************************************')
print('PyOpenGL is missing from your python installation')
print(' - we will try to install it')
print('*******************************************************')
def install_with_easyinstall(package):
try:
print ("trying a system-wide PyOpenGl install")
easy_install.main(['-f',os.path.split(__file__)[0],package])
return
except:
pass
try:
print ("trying a user level PyOpenGl install")
easy_install.main(['-f',os.path.split(__file__)[0],'--user',package])
return
except:
print (u"Install of '+package+u' failed. Please report this information:")
import traceback
print (traceback.format_exc())
sys.exit()
from setuptools.command import easy_install
install_with_easyinstall('PyOpenGl')
print('*******************************************************')
print('OpenGL has been installed. Restarting GSAS-II')
print('*******************************************************')
loc = os.path.dirname(__file__)
import subprocess
subprocess.Popen([sys.executable,os.path.join(loc,'GSASII.py')])
sys.exit()
# PATCH: for Mavericks (OS X 10.9.x), wx produces an annoying warning about LucidaGrandeUI.
# In case stderr has been suppressed there, redirect python error output to stdout. Nobody
# else should care much about this.
sys.stderr = sys.stdout
def convVersion(version):
'''Convert a version string ("x", "x.y", "x.y.z") into a series of
ints.
:returns: [i0, i1, i2] where None is used if a value is not specified
and 0 is used if a field cannot be parsed.
'''
vIntList = [None,None,None]
for i,v in enumerate(version.split('.')):
if i >= 3: break
if len(v) == 0: break
v = list(filter(None,re.split('(\\d+)',v)))[0] # conv '1b2' to '1'
try:
vIntList[i] = int(v)
except:
vIntList[i] = 0
return vIntList
def compareVersions(version1,version2):
'''Compare two version strings ("x", "x.y", "x.y.z")
Note that '3.' matches '3.1', and '3.0' matches '3.0.1'
but '3.0.0' does not match '3.0.1'
:returns: 0 if the versions match, -1 if version1 < version2,
or 1 if version1 > version2
'''
for v1,v2 in zip(convVersion(version1),convVersion(version2)):
if v1 is None or v2 is None:
return 0
if v1 < v2: return -1
if v1 > v2: return 1
return 0
# tabulate package versions that users should be warned about
versionDict = {}
'''Variable versionDict is used to designate versions of packages that
should generate warnings or error messages.
* ``versionDict['tooOld']`` is a dict with module versions that are too old and are
known to cause serious errors
* ``versionDict['tooOldWarn']`` is a dict with module versions that are
significantly out of date and should be updated, but will probably function OK.
* ``versionDict['badVersionWarn']`` is a dict of with lists of package
versions that are known to have bugs. One should select an older or
newer version of the package.
* ``versionDict['tooNewWarn']`` is a dict with module versions that have not
been tested but have changes that lead us to believe that errors are
likely to happen.
**Packages/versions to be avoided**
* wxPython:
* <=2.x.x: while most of GSAS-II has been written to be
compatible with older versions of wxpython, we are now testing with
version 4.0 only. Version 3.0 is pretty similar to 4.0 and should not
have problems. wxpython 4.1 seems to create a lot of errors for
conflicting options that will need to be checked up upon.
* Matplotlib:
* 1.x: there have been significant API changes since these versions and
significant graphics errors will occur.
* 3.1.x and 3.2.x: these versions have a known bug for plotting
3-D surfaces, such as microstrain vs crystal axes. The plots may appear
distorted as the lengths of x, y & z will not be constrained as equal.
Preferably use 3.0.x as 3.3.x is not fully tested.
* numpy:
* 1.16.0: produces .gpx files that are not compatible with older
version numpy versions. This is a pretty outmoded version; upgrade.
'''
# add comments above when changing anything below
versionDict['tooOld'] = {'matplotlib': '1.'}
'modules that will certainly fail'
versionDict['tooOldWarn'] = {'wx': '2.'}
'modules that may fail but should be updated'
versionDict['badVersionWarn'] = {'numpy':['1.16.0'],
'matplotlib': ['3.1','3.2']}
'versions of modules that are known to have bugs'
versionDict['tooNewWarn'] = {'wx':'4.1'}
#'matplotlib': '3.4',
'module versions newer than what we have tested where problems are suspected'
def ShowVersions():
'''Show the versions all of required Python packages, etc.
'''
import numpy as np
import scipy as sp
import wx
import matplotlib as mpl
import OpenGL as ogl
import GSASIIpath
# print (versions)
print ("Python module versions loaded:")
print (" Python: %s from %s"%(sys.version.split()[0],sys.executable))
Image = None
version = '?'
versionDict['errors'] = ''
warn = False
for s,m in [('wx',wx), ('matplotlib', mpl), ('numpy',np),
('scipy',sp), ('OpenGL',ogl)]:
msg = ''
if s in versionDict['tooOld']:
match = compareVersions(m.__version__,versionDict['tooOld'][s])
if match <= 0:
msg = "version will cause problems"
warn = True
if versionDict['errors']: versionDict['errors'] += '\n'
versionDict['errors'] += 'Package {} version {} is too old for GSAS-II. An update is required'.format(s,m.__version__)
if s in versionDict['tooOldWarn']:
match = compareVersions(m.__version__,versionDict['tooOldWarn'][s])
if match <= 0:
msg = "version can cause problems"
warn = True
if s in versionDict['tooNewWarn']:
match = compareVersions(m.__version__,versionDict['tooNewWarn'][s])
if match >= 0:
msg = "version is too new and could cause problems"
warn = True
if s in versionDict['badVersionWarn']:
for v in versionDict['badVersionWarn'][s]:
if compareVersions(m.__version__,v) == 0:
msg = "version is known to be buggy"
warn = True
break
print(" {:12s}{} {}".format(s+':',m.__version__,msg))
try:
from PIL import Image
except ImportError:
try:
import Image
except ImportError:
pass
if Image is None:
print ("Image module not present; Note that PIL (Python Imaging Library) or pillow is needed for some image operations")
else:
# version # can be in various places, try standard dunderscore first
for ver in '__version__','VERSION','PILLOW_VERSION':
if hasattr(Image,ver):
try:
version = eval('Image.'+ver)
break
except:
pass
print (" Image: %s (PIL or Pillow)"%version)
print (" Platform: %s %s %s"%(sys.platform,platform.architecture()[0],platform.machine()))
try:
import mkl
print (" Max threads:%s"%mkl.get_max_threads())
except:
pass
rev = GSASIIpath.svnGetRev()
if rev is None:
"no SVN"
else:
rev = "SVN version {}".format(rev)
print ("Latest GSAS-II revision (from .py files): {} ({})\n".format(
GSASIIpath.GetVersionNumber(),rev))
# patch 11/2020: warn if GSASII path has not been updated past v4576.
# For unknown reasons on Mac with gsas2full, there have been checksum
# errors in the .so files that prevented svn from completing updates.
# If GSASIIpath.svnChecksumPatch is not present, then the fix for that
# has not been retrieved, so warn. Keep for a year or so.
try:
GSASIIpath.svnChecksumPatch
except:
print('Warning GSAS-II incompletely updated. Please contact <EMAIL>')
# end patch
if warn:
print('You are suggested to install a new version of GSAS-II.\nSee https://bit.ly/G2install',
'\n\nFor information on packages see\nhttps://gsas-ii.readthedocs.io/en/latest/packages.html and',
'\nhttps://gsas-ii.readthedocs.io/en/latest/GSASIIGUI.html#GSASIIdataGUI.versionDict')
###############################################################################
#### GUI creation
###############################################################################
def GSASIImain(application):
'''Start up the GSAS-II GUI'''
ShowVersions()
GUIpatches()
if platform.python_version()[:3] == '2.7':
msg = '''The end-of-life for python 2.7 was January 1, 2020.
We strongly recommend reinstalling GSAS-II from a new installation kit as we may not be able to offer support for operation of GSAS-II in python 2.7. See instructions for details.
'''
download = ''
cmds = []
instructions = 'https://subversion.xray.aps.anl.gov/trac/pyGSAS'
if sys.platform == "win32":
download = 'https://subversion.xray.aps.anl.gov/admin_pyGSAS/downloads/gsas2full-Latest-Windows-x86_64.exe'
instructions = 'https://subversion.xray.aps.anl.gov/trac/pyGSAS/wiki/SingleStepWindowsIllustrated'
elif sys.platform == "darwin":
cmds = ['echo starting download, please wait...',
'''echo 'curl "https://subversion.xray.aps.anl.gov/admin_pyGSAS/downloads/gsas2full-Latest-MacOSX-x86_64.sh" > /tmp/g2.sh; bash /tmp/g2.sh' ''',
'curl "https://subversion.xray.aps.anl.gov/admin_pyGSAS/downloads/gsas2full-Latest-MacOSX-x86_64.sh" > /tmp/g2.sh; bash /tmp/g2.sh'
]
instructions = 'https://subversion.xray.aps.anl.gov/trac/pyGSAS/wiki/MacSingleStepInstallerFigs'
elif sys.platform.startswith("linux"):
download = 'https://subversion.xray.aps.anl.gov/admin_pyGSAS/downloads/gsas2full-Latest-Linux-x86_64.sh'
instructions = 'https://subversion.xray.aps.anl.gov/trac/pyGSAS/wiki/LinuxSingleStepInstaller'
else:
print(u'Unknown platform: '+sys.platform)
if platform.architecture()[0] != '64bit' and sys.platform == "win32":
msg += '''\nYou are currently using 32-bit Python. Please check if you are running 32-bit windows or 64-bit windows (use Start/Settings/System/About & look for "System type".
We recommend using the 64-bit installer if you have 64-bit windows.'''
download = ''
elif platform.architecture()[0] != '64bit' and sys.platform.startswith("linux"):
msg += '''\nYou are using 32-bit Python. We now only package for 64-bit linux.
If you are running 32-bit linux you will need to install Python yourself.
See instructions at https://subversion.xray.aps.anl.gov/trac/pyGSAS/wiki/InstallLinux'''
instructions = 'https://subversion.xray.aps.anl.gov/trac/pyGSAS/wiki/InstallLinux'
dlg = wx.Dialog(None,wx.ID_ANY,'End-Of-Life warning for Python 2.7',
style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)
mainSizer = wx.BoxSizer(wx.VERTICAL)
txt = wx.StaticText(dlg,wx.ID_ANY,G2G.StripIndents(msg))
mainSizer.Add(txt)
txt.Wrap(400)
dlg.SetSizer(mainSizer)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btnsizer.Add((1,1),1,wx.EXPAND,1)
OKbtn = wx.Button(dlg, wx.ID_OK,'Continue')
OKbtn.SetDefault()
OKbtn.Bind(wx.EVT_BUTTON,lambda event: dlg.EndModal(wx.ID_OK))
btnsizer.Add(OKbtn)
btn = wx.Button(dlg, wx.ID_ANY,'Show Instructions')
def openInstructions(event):
G2G.ShowWebPage(instructions,None)
btn.Bind(wx.EVT_BUTTON, openInstructions)
btnsizer.Add(btn)
if download:
btn = wx.Button(dlg, wx.ID_ANY,'Start Download')
btn.Bind(wx.EVT_BUTTON,lambda event: dlg.EndModal(wx.ID_YES))
btnsizer.Add(btn)
elif cmds:
btn = wx.Button(dlg, wx.ID_ANY,'Start Install')
btn.Bind(wx.EVT_BUTTON,lambda event: dlg.EndModal(wx.ID_CANCEL))
btnsizer.Add(btn)
#btn = wx.Button(dlg, wx.ID_CANCEL)
#btnsizer.AddButton(btn)
btnsizer.Add((1,1),1,wx.EXPAND,1)
#btnsizer.Realize()
mainSizer.Add((-1,5),1,wx.EXPAND,1)
mainSizer.Add(btnsizer,0,wx.ALIGN_CENTER,0)
mainSizer.Add((-1,10))
res = 0
try:
res = dlg.ShowModal()
finally:
dlg.Destroy()
if res == wx.ID_YES:
G2G.ShowWebPage(download,None)
G2G.ShowWebPage(instructions,None)
wx.Sleep(1)
dlg = wx.MessageDialog(None,G2G.StripIndents(
'''Download has been started in your browser; installation instructions will also be shown in a web page\n\nPress OK to exit GSAS-II, Cancel to continue.'''),
'start install',wx.OK|wx.CANCEL)
if dlg.ShowModal() == wx.ID_OK:
sys.exit()
elif res == wx.ID_CANCEL:
dlg = wx.MessageDialog(None,G2G.StripIndents(
'''Press OK to continue. Instructions will be shown in a web page.
Download and installation will start in the terminal window after you press OK. Respond to questions there.'''),
'start install',wx.OK|wx.CANCEL)
if dlg.ShowModal() == wx.ID_OK:
G2G.ShowWebPage(instructions,None)
GSASIIpath.runScript(cmds, wait=True)
sys.exit()
if versionDict['errors']:
dlg = wx.MessageDialog(None, versionDict['errors']+
'\n\nThe simplest solution is to install a new version of GSAS-II. '+
'See https://bit.ly/G2install',
'Python package problem', wx.OK)
try:
dlg.ShowModal()
finally:
dlg.Destroy()
sys.exit()
elif platform.python_version()[:3] not in ['2.7','3.6','3.7','3.8','3.9']:
dlg = wx.MessageDialog(None,
'GSAS-II requires Python 2.7.x or 3.6+\n Yours is '+sys.version.split()[0],
'Python version error', wx.OK)
try:
dlg.ShowModal()
finally:
dlg.Destroy()
sys.exit()
application.main = GSASII(None) # application.main is the main wx.Frame (G2frame in most places)
application.SetTopWindow(application.main)
# save the current package versions
application.main.PackageVersions = G2fil.get_python_versions([wx, mpl, np, sp, ogl])
if GSASIIpath.GetConfigValue('wxInspector'):
import wx.lib.inspection as wxeye
wxeye.InspectionTool().Show()
try:
application.SetAppDisplayName('GSAS-II')
except:
pass
#application.GetTopWindow().SendSizeEvent()
application.GetTopWindow().Show(True)
################################################################################
#### Create main frame (window) for GUI
################################################################################
class GSASII(wx.Frame):
'''Define the main GSAS-II frame and its associated menu items.
:param parent: reference to parent application
'''
def MenuBinding(self,event):
'''Called when a menu is clicked upon; looks up the binding in table
'''
log.InvokeMenuCommand(event.GetId(),self,event)
# def Bind(self,eventtype,handler,*args,**kwargs):
# '''Override the Bind function so that we can wrap calls that will be logged.
#
# N.B. This is a bit kludgy. Menu bindings with an id are wrapped and
# menu bindings with an object and no id are not.
# '''
# if eventtype == wx.EVT_MENU and 'id' in kwargs:
# menulabels = log.SaveMenuCommand(kwargs['id'],self,handler)
# if menulabels:
# wx.Frame.Bind(self,eventtype,self.MenuBinding,*args,**kwargs)
# return
# wx.Frame.Bind(self,eventtype,handler,*args,**kwargs)
def _Add_FileMenuItems(self, parent):
'''Add items to File menu
'''
item = parent.Append(wx.ID_ANY,'&Open project...\tCtrl+O','Open a GSAS-II project file (*.gpx)')
self.Bind(wx.EVT_MENU, self.OnFileOpen, id=item.GetId())
if sys.platform == "darwin":
item = parent.Append(wx.ID_ANY,'&Open in new window...','Open a GSAS-II project file (*.gpx) in a separate process')
self.Bind(wx.EVT_MENU, self.OnNewGSASII, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Reopen recent...\tCtrl+E','Reopen a previously used GSAS-II project file (*.gpx)')
self.Bind(wx.EVT_MENU, self.OnFileReopen, id=item.GetId())
item = parent.Append(wx.ID_ANY,'&Save project\tCtrl+S','Save project under current name')
self.Bind(wx.EVT_MENU, self.OnFileSave, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Save project as...','Save current project to new file')
self.Bind(wx.EVT_MENU, self.OnFileSaveas, id=item.GetId())
item = parent.Append(wx.ID_ANY,'&New project','Create empty new project, saving current is optional')
self.Bind(wx.EVT_MENU, self.OnFileClose, id=item.GetId())
item = parent.Append(wx.ID_PREFERENCES,"&Preferences",'')
self.Bind(wx.EVT_MENU, self.OnPreferences, item)
if GSASIIpath.whichsvn():
item = parent.Append(wx.ID_ANY,'Edit proxy...','Edit proxy internet information (used for updates)')
self.Bind(wx.EVT_MENU, self.EditProxyInfo, id=item.GetId())
if GSASIIpath.GetConfigValue('debug'):
def OnIPython(event):
GSASIIpath.IPyBreak()
item = parent.Append(wx.ID_ANY,"IPython Console",'')
self.Bind(wx.EVT_MENU, OnIPython, item)
def OnwxInspect(event):
import wx.lib.inspection as wxeye
wxeye.InspectionTool().Show()
item = parent.Append(wx.ID_ANY,"wx inspection tool",'')
self.Bind(wx.EVT_MENU, OnwxInspect, item)
item = parent.Append(wx.ID_EXIT,'Exit\tALT+F4','Exit from GSAS-II')
self.Bind(wx.EVT_MENU, self.ExitMain, id=item.GetId())
def _Add_DataMenuItems(self,parent):
'''Add items to Data menu
'''
# item = parent.Append(
# help='',id=wx.ID_ANY,
# kind=wx.ITEM_NORMAL,
# text='Read image data...')
# self.Bind(wx.EVT_MENU, self.OnImageRead, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Read Powder Pattern Peaks...','')
self.Bind(wx.EVT_MENU, self.OnReadPowderPeaks, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Sum or Average powder data','')
self.Bind(wx.EVT_MENU, self.OnPwdrSum, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Sum image data','')
self.Bind(wx.EVT_MENU, self.OnImageSum, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Add new phase','')
self.Bind(wx.EVT_MENU, self.OnAddPhase, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Delete phase entries','')
self.Bind(wx.EVT_MENU, self.OnDeletePhase, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Rename data entry',
'Rename the selected data tree item (PWDR, HKLF or IMG)')
self.Bind(wx.EVT_MENU, self.OnRenameData, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Delete data entries',
'Delete selected data items from data tree')
self.Bind(wx.EVT_MENU, self.OnDataDelete, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Delete plots','Delete selected plots')
self.Bind(wx.EVT_MENU, self.OnPlotDelete, id=item.GetId())
expandmenu = wx.Menu()
item = parent.AppendSubMenu(expandmenu,'Expand tree items',
'Expand items of type in GSAS-II data tree')
for s in 'all','IMG','PWDR','PDF','HKLF','SASD','REFD':
if s == 'all':
help = 'Expand all items in GSAS-II data tree'
else:
help = 'Expand '+s+' type items in GSAS-II data tree'
item = expandmenu.Append(wx.ID_ANY,s,help)
self.Bind(wx.EVT_MENU,self.ExpandAll,id=item.GetId())
movemenu = wx.Menu()
item = parent.AppendSubMenu(movemenu,'Move tree items',
'Move items of type items to end of GSAS-II data tree')
for s in 'IMG','PWDR','PDF','HKLF','SASD','REFD','Phase':
help = 'Move '+s+' type items to end of GSAS-II data tree'
item = movemenu.Append(wx.ID_ANY,s,help)
self.Bind(wx.EVT_MENU,self.MoveTreeItems,id=item.GetId())
def _Add_CalculateMenuItems(self,parent):
item = parent.Append(wx.ID_ANY,'Setup PDFs','Create PDF tree entries for selected powder patterns')
self.MakePDF.append(item)
self.Bind(wx.EVT_MENU, self.OnMakePDFs, id=item.GetId())
item = parent.Append(wx.ID_ANY,'&View LS parms\tCTRL+L','View least squares parameters')
self.Bind(wx.EVT_MENU, self.OnShowLSParms, id=item.GetId())
item = parent.Append(wx.ID_ANY,'&Refine\tCTRL+R','Perform a refinement')
if len(self.Refine): # extend state for new menus to match main (on mac)
state = self.Refine[0].IsEnabled()
else:
state = False
item.Enable(state)
self.Refine.append(item)
self.Bind(wx.EVT_MENU, self.OnRefine, id=item.GetId())
item = parent.Append(wx.ID_ANY,'&Run Fprime','X-ray resonant scattering')
self.Bind(wx.EVT_MENU, self.OnRunFprime, id=item.GetId())
item = parent.Append(wx.ID_ANY,'&Run Absorb','x-ray absorption')
self.Bind(wx.EVT_MENU, self.OnRunAbsorb, id=item.GetId())
# if GSASIIpath.GetConfigValue('debug'): # allow exceptions for debugging
# item = parent.Append(help='', id=wx.ID_ANY, kind=wx.ITEM_NORMAL,
# text='tree test')
# self.Bind(wx.EVT_MENU, self.TreeTest, id=item.GetId())
def _init_Imports(self):
'''import all the G2phase*.py & G2sfact*.py & G2pwd*.py files that
are found in the path
'''
self.ImportPhaseReaderlist = G2fil.LoadImportRoutines('phase','Phase')
self.ImportSfactReaderlist = G2fil.LoadImportRoutines('sfact','Struct_Factor')
self.ImportPowderReaderlist = G2fil.LoadImportRoutines('pwd','Powder_Data')
self.ImportSmallAngleReaderlist = G2fil.LoadImportRoutines('sad','SmallAngle_Data')
self.ImportReflectometryReaderlist = G2fil.LoadImportRoutines('rfd','Reflectometry_Data')
self.ImportPDFReaderlist = G2fil.LoadImportRoutines('pdf','PDF_Data')
self.ImportImageReaderlist = G2fil.LoadImportRoutines('img','Images')
self.ImportMenuId = {}
def testSeqRefineMode(self):
'''Returns the list of histograms included in a sequential refinement or
an empty list if a standard (non-sequential) refinement.
Also sets Menu item status depending on mode
'''
cId = GetGPXtreeItemId(self,self.root, 'Controls')
if cId:
controls = self.GPXtree.GetItemPyData(cId)
seqSetting = controls.get('Seq Data',[])
else:
seqSetting = None
if seqSetting:
for item in self.Refine:
item.SetItemLabel('Se&quential refine\tCtrl+R') #might fail on old wx
seqMode = True
else:
for item in self.Refine:
item.SetItemLabel('&Refine\tCtrl+R') #might fail on old wx
seqMode = False
for menu,Id in self.ExportSeq:
menu.Enable(Id,seqMode)
for menu,Id in self.ExportNonSeq:
menu.Enable(Id,not seqMode)
return seqSetting
def PreviewFile(self,filename):
'utility to confirm we have the right file'
fp = open(filename,'r')
rdmsg = u'File '+ filename +u' begins:\n\n'
try:
rdmsg += fp.read(80)
rdmsg += '\n\nDo you want to read this file?'
except UnicodeDecodeError:
rdmsg = None
fp.close()
if rdmsg is None or not all([ord(c) < 128 and ord(c) != 0 for c in rdmsg]): # show only if ASCII
rdmsg = u'File '+ filename +u' is a binary file. Do you want to read this file?'
# it would be better to use something that
# would resize better, but this will do for now
dlg = wx.MessageDialog(
self, rdmsg,
'Is this the file you want?',
wx.YES_NO | wx.ICON_QUESTION,
)
dlg.SetSize((700,300)) # does not resize on Mac
result = wx.ID_NO
try:
result = dlg.ShowModal()
finally:
dlg.Destroy()
if result == wx.ID_NO: return True
return False
def OnImportGeneric(self,reader,readerlist,label,multiple=False,
usedRanIdList=[],Preview=True,load2Tree=False):
'''Used for all imports, including Phases, datasets, images...
Called from :meth:`GSASII.OnImportPhase`, :meth:`GSASII.OnImportImage`,
:meth:`GSASII.OnImportSfact`, :meth:`GSASII.OnImportPowder`,
:meth:`GSASII.OnImportSmallAngle` and :meth:'GSASII.OnImportReflectometry`
Uses reader_objects subclassed from :class:`GSASIIobj.ImportPhase`,
:class:`GSASIIobj.ImportStructFactor`,
:class:`GSASIIobj.ImportPowderData`,
:class:`GSASIIobj.ImportSmallAngleData`
:class:`GSASIIobj.ImportReflectometryData` or
:class:`GSASIIobj.ImportImage`.
If a specific reader is specified, only that method will be called,
but if no reader is specified, every one that is potentially
compatible (by file extension) will be tried on the file(s)
selected in the Open File dialog.
:param reader_object reader: This will be a reference to
a particular object to be used to read a file or None,
if every appropriate reader should be used.
:param list readerlist: a list of reader objects appropriate for
the current read attempt. At present, this will be either
self.ImportPhaseReaderlist, self.ImportSfactReaderlist
self.ImportPowderReaderlist or self.ImportImageReaderlist
(defined in _init_Imports from the files found in the path),
but in theory this list could be tailored.
Used only when reader is None.
:param str label: string to place on the open file dialog:
Open `label` input file
:param bool multiple: True if multiple files can be selected
in the file dialog. False is default. At present True is used
only for reading of powder data.
:param list usedRanIdList: an optional list of random Ids that
have been used and should not be reused
:param bool Preview: indicates if a preview of the file should
be shown. Default is True, but set to False for image files
which are all binary.
:param bool load2Tree: indicates if the file should be loaded
into the data tree immediately (used for images only). True
only when called from :meth:`OnImportImage`; causes return
value to change to a list of True values rather than
reader objects.
:returns: a list of reader objects (rd_list) that were able
to read the specified file(s). This list may be empty.
'''
self.lastimport = ''
self.zipfile = None
singlereader = True
if reader is None:
singlereader = False
multiple = False
#print "use all formats"
choices = "any file (*.*)|*.*"
choices += "|zip archive (.zip)|*.zip"
extdict = {}
# compile a list of allowed extensions
for rd in readerlist:
fmt = rd.formatName
for extn in rd.extensionlist:
if not extdict.get(extn): extdict[extn] = []
extdict[extn] += [fmt,]
for extn in sorted(extdict.keys(),key=lambda k: k.lower()):
fmt = ''
for f in extdict[extn]:
if fmt != "": fmt += ', '
fmt += f
choices += "|" + fmt + " file (*" + extn + ")|*" + extn
else:
readerlist = [reader,]
# compile a list of allowed extensions
choices = reader.formatName + " file ("
w = ""
for extn in reader.extensionlist:
if w != "": w += ";"
w += "*" + extn
choices += w + ")|" + w
choices += "|zip archive (.zip)|*.zip"
if not reader.strictExtension:
choices += "|any file (*.*)|*.*"
# get the file(s)
if multiple:
mode = wx.FD_OPEN|wx.FD_MULTIPLE
else:
mode = wx.FD_OPEN
if len(readerlist) > 1:
typ = ' (type to be guessed)'
else:
typ = '( type '+readerlist[0].formatName+')'
filelist = G2G.GetImportFile(self,
message="Choose "+label+" input file"+typ,
defaultFile="",wildcard=choices,style=mode)
rd_list = []
filelist1 = []
for filename in filelist:
# is this a zip file?
if os.path.splitext(filename)[1].lower() == '.zip':
extractedfiles = G2IO.ExtractFileFromZip(
filename,parent=self,
multipleselect=True)
if extractedfiles is None: continue # error or Cancel
if extractedfiles != filename:
self.zipfile = filename # save zip name
filelist1 += extractedfiles
continue
filelist1.append(filename)
filelist = filelist1
Start = True #1st time read - clear selections below
for filename in filelist:
# is this a zip file?
if os.path.splitext(filename)[1].lower() == '.zip':
extractedfile = G2IO.ExtractFileFromZip(filename,parent=self)
if extractedfile is None: continue # error or Cancel
if extractedfile != filename:
filename,self.zipfile = extractedfile,filename # now use the file that was created
# determine which formats are compatible with this file
primaryReaders = []
secondaryReaders = []
for rd in readerlist:
flag = rd.ExtensionValidator(filename)
if flag is None:
secondaryReaders.append(rd)
elif flag:
primaryReaders.append(rd)
if len(secondaryReaders) + len(primaryReaders) == 0 and reader:
self.ErrorDialog('Not supported','The selected reader cannot read file '+filename)
return []
elif len(secondaryReaders) + len(primaryReaders) == 0:
self.ErrorDialog('No Format','No matching format for file '+filename)
return []
fp = None
msg = ''
if len(filelist) == 1 and Preview:
if self.PreviewFile(filename): return []
self.lastimport = filename # this is probably not what I want to do -- it saves only the
# last name in a series. See rd.readfilename for a better name.
# try the file first with Readers that specify the
# file's extension and later with ones that merely allow it
errorReport = ''
for rd in primaryReaders+secondaryReaders:
if Start: #clear old bank selections to allow new ones to be selected by user
rd.selections = []
rd.dnames = []
rd.ReInitialize() # purge anything from a previous read
rd.errors = "" # clear out any old errors
if not rd.ContentsValidator(filename): # rejected on cursory check
errorReport += "\n "+rd.formatName + ' validator error'
if rd.errors:
errorReport += ': '+rd.errors
continue
if len(rd.selections)>1 and Start:
dlg = G2G.G2MultiChoiceDialog(self,'Dataset Selector','Select data to read from the list below',rd.dnames)
if dlg.ShowModal() == wx.ID_OK:
rd.selections = dlg.GetSelections()
Start = False
dlg.Destroy()
repeat = True
rdbuffer = {} # create temporary storage for file reader
block = 0
while repeat: # loop if the reader asks for another pass on the file
block += 1
repeat = False
rd.objname = os.path.basename(filename)
flag = False
if GSASIIpath.GetConfigValue('debug'): # allow exceptions for debugging
flag = rd.Reader(filename,self,buffer=rdbuffer,blocknum=block,
usedRanIdList=usedRanIdList,)
else:
try:
flag = rd.Reader(filename,self,buffer=rdbuffer,
blocknum=block,usedRanIdList=usedRanIdList,)
except rd.ImportException as detail:
rd.errors += "\n Read exception: "+str(detail)
except Exception as detail:
import traceback
rd.errors += "\n Unhandled read exception: "+str(detail)
rd.errors += "\n Traceback info:\n"+str(traceback.format_exc())
if flag: # this read succeeded
if rd.SciPy: #was default read by scipy; needs 1 time fixes
G2IO.EditImageParms(self,rd.Data,rd.Comments,rd.Image,filename)
rd.SciPy = False
rd.readfilename = filename
if load2Tree: #images only
if rd.repeatcount == 1 and not rd.repeat: # skip image number if only one in set
rd.Data['ImageTag'] = None
else:
rd.Data['ImageTag'] = rd.repeatcount
rd.Data['formatName'] = rd.formatName
if rd.sumfile:
rd.readfilename = rd.sumfile
# Load generic metadata, as configured
G2fil.GetColumnMetadata(rd)
G2IO.LoadImage2Tree(rd.readfilename,self,rd.Comments,rd.Data,rd.Npix,rd.Image)
rd_list.append(True) # save a stub the result before it is written over
del rd.Image
else:
rd_list.append(copy.deepcopy(rd)) # save the result before it is written over
if rd.repeat:
repeat = True
continue
errorReport += '\n'+rd.formatName + ' read error'
if rd.errors:
errorReport += ': '+rd.errors
if rd_list: # read succeeded, was there a warning or any errors?
if rd.warnings:
self.ErrorDialog('Read Warning','The '+ rd.formatName+
' reader reported a warning message:\n\n'+rd.warnings)
break # success in reading, try no further
else:
if singlereader:
msg += '\n'+rd.warnings
print(u'The '+ rd.formatName+u' reader was not able to read file '+filename+msg)
try:
print(u'\n\nError message(s):\n\t'+errorReport)
except:
pass
self.ErrorDialog('Read Error','The '+ rd.formatName+
' reader was not able to read file '+filename+msg)
else:
print('No reader was able to read file '+filename+msg)
try:
print('\n\nError message(s):\n\t'+errorReport)
except:
pass
self.ErrorDialog('Read Error','No reader was able to read file '+filename+msg)
if fp: fp.close()
return rd_list
def _Add_ImportMenu_Phase(self,parent):
'''configure the Import Phase menus accord to the readers found in _init_Imports
'''
submenu = wx.Menu()
item = parent.AppendSubMenu(submenu,'Phase','Import phase data')
for reader in self.ImportPhaseReaderlist:
item = submenu.Append(wx.ID_ANY,u'from '+reader.formatName+u' file',reader.longFormatName)
self.ImportMenuId[item.GetId()] = reader
self.Bind(wx.EVT_MENU, self.OnImportPhase, id=item.GetId())
item = submenu.Append(wx.ID_ANY,'guess format from file','Import phase data, use file to try to determine format')
self.Bind(wx.EVT_MENU, self.OnImportPhase, id=item.GetId())
def OnImportPhase(self,event):
'''Called in response to an Import/Phase/... menu item
to read phase information.
dict self.ImportMenuId is used to look up the specific
reader item associated with the menu item, which will be
None for the last menu item, which is the "guess" option
where all appropriate formats will be tried.
'''
# look up which format was requested
reqrdr = self.ImportMenuId.get(event.GetId())
# make a list of phase names, ranId's and the histograms used in those phases
phaseRIdList,usedHistograms = self.GetPhaseInfofromTree()
phaseNameList = list(usedHistograms.keys()) # phase names in use
usedHKLFhists = [] # used single-crystal histograms
for p in usedHistograms:
for h in usedHistograms[p]:
if h.startswith('HKLF ') and h not in usedHKLFhists:
usedHKLFhists.append(h)
rdlist = self.OnImportGeneric(reqrdr,self.ImportPhaseReaderlist,
'phase',usedRanIdList=phaseRIdList)
if len(rdlist) == 0: return
# for now rdlist is only expected to have one element
# but below will allow multiple phases to be imported
# if ever the import routines ever implement multiple phase reads.
self.CheckNotebook()
newPhaseList = []
for rd in rdlist:
PhaseName = ''
dlg = wx.TextEntryDialog(self, 'Enter the name for the new phase',
'Edit phase name', rd.Phase['General']['Name'],style=wx.OK)
while PhaseName == '':
dlg.CenterOnParent()
if dlg.ShowModal() == wx.ID_OK:
PhaseName = dlg.GetValue().strip()
else:
dlg.Destroy()
return
dlg.Destroy()
# make new phase names unique
rd.Phase['General']['Name'] = G2obj.MakeUniqueLabel(PhaseName,phaseNameList)
if rd.Phase['General']['SGData']['SpGrp'] in G2spc.spg2origins:
choice = G2G.ChooseOrigin(self,rd)
if choice is None: return # dialog cancelled
rd.Phase = choice
PhaseName = rd.Phase['General']['Name'][:]
newPhaseList.append(PhaseName)
print(u'Read phase {} from file {}'.format(PhaseName,self.lastimport))
if not GetGPXtreeItemId(self,self.root,'Phases'):
sub = self.GPXtree.AppendItem(parent=self.root,text='Phases')
else:
sub = GetGPXtreeItemId(self,self.root,'Phases')
psub = self.GPXtree.AppendItem(parent=sub,text=PhaseName)
self.GPXtree.SetItemPyData(psub,rd.Phase)
wx.CallAfter(self.GPXtree.SelectItem,psub) # should call SelectDataTreeItem
try:
rd.MPhase['General']['Name'] = G2obj.MakeUniqueLabel(PhaseName+' mag',phaseNameList)
PhaseName = rd.MPhase['General']['Name'][:]
newPhaseList.append(PhaseName)
psub = self.GPXtree.AppendItem(parent=sub,text=PhaseName)
self.GPXtree.SetItemPyData(psub,rd.MPhase)
wx.CallAfter(self.GPXtree.SelectItem,psub) # should call SelectDataTreeItem
except (AttributeError,TypeError):
pass
self.GPXtree.Expand(self.root) # make sure phases are seen
self.GPXtree.Expand(sub)
self.GPXtree.Expand(psub)
self.PickIdText = None
# add constraints imported with phase to tree
# at present, constraints are generated only in ISODISTORT_proc in the
# CIF import
if rd.Constraints:
sub = GetGPXtreeItemId(self,self.root,'Constraints') # was created in CheckNotebook if needed
Constraints = self.GPXtree.GetItemPyData(sub)
for i in rd.Constraints:
if type(i) is dict:
if '_Explain' not in Constraints: Constraints['_Explain'] = {}
Constraints['_Explain'].update(i)
else:
Constraints['Phase'].append(i)
if not newPhaseList: return # somehow, no new phases
# get a list of existing histograms
PWDRlist = []
HKLFlist = []
if self.GPXtree.GetCount():
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
if name.startswith('PWDR ') and name not in PWDRlist:
PWDRlist.append(name)
if name.startswith('HKLF ') and name not in HKLFlist:
HKLFlist.append(name)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
TextList = PWDRlist + HKLFlist
if not TextList:
return #no histograms
header = 'Select histogram(s) to add to new phase(s):'
for phaseName in newPhaseList:
header += '\n '+phaseName
notOK = True
while notOK:
result = G2G.ItemSelector(TextList,self,header,header='Add histogram(s)',multiple=True)
if not result: return
# check that selected single crystal histograms are not already in use!
used = [TextList[i] for i in result if TextList[i] in usedHKLFhists]
#for i in result:
# if TextList[i] in usedHKLFhists: used.append(TextList[i])
if used:
msg = 'The following single crystal histogram(s) are already in use'
for i in used:
msg += '\n '+str(i)
msg += '\nAre you sure you want to add them to this phase? '
msg += 'Associating a single crystal dataset to >1 histogram is usually an error, '
msg += 'so No is suggested here.'
if self.ErrorDialog('Likely error',msg,self,wtype=wx.YES_NO) == wx.ID_YES: notOK = False
else:
notOK = False
# connect new phases to histograms
sub = GetGPXtreeItemId(self,self.root,'Phases')
if not sub:
raise Exception('ERROR -- why are there no phases here?')
wx.BeginBusyCursor()
item, cookie = self.GPXtree.GetFirstChild(sub)
while item: # loop over (new) phases
phaseName = self.GPXtree.GetItemText(item)
data = self.GPXtree.GetItemPyData(item)
item, cookie = self.GPXtree.GetNextChild(sub, cookie)
if phaseName not in newPhaseList: continue
generalData = data['General']
SGData = generalData['SGData']
Super = generalData.get('Super',0)
SuperVec = []
if Super:
SuperVec = np.array(generalData['SuperVec'][0])
UseList = data['Histograms']
NShkl = len(G2spc.MustrainNames(SGData))
NDij = len(G2spc.HStrainNames(SGData))
for i in result:
histoName = TextList[i]
if histoName in HKLFlist:
#redo UpdateHKLFdata(histoName) here:
Id = GetGPXtreeItemId(self,self.root,histoName)
refDict,reflData = self.GPXtree.GetItemPyData(Id)
G,g = G2lat.cell2Gmat(generalData['Cell'][1:7])
Super = reflData.get('Super',0)
for iref,ref in enumerate(reflData['RefList']):
hkl = ref[:3]
if Super:
H = list(hkl+SuperVec*ref[3])
else:
H = hkl
ref[4+Super] = np.sqrt(1./G2lat.calc_rDsq2(H,G))
iabsnt = G2spc.GenHKLf(H,SGData)[0]
if iabsnt: #flag space gp. absences
if Super:
if not ref[2+Super]:
ref[3+Super] = 0
else:
ref[3+Super] = 1 #twin id
else:
ref[3] = 0
UseList[histoName] = SetDefaultDData(reflData['Type'],histoName)
elif histoName in PWDRlist:
Id = GetGPXtreeItemId(self,self.root,histoName)
refList = self.GPXtree.GetItemPyData(
GetGPXtreeItemId(self,Id,'Reflection Lists'))
refList[generalData['Name']] = {}
UseList[histoName] = SetDefaultDData('PWDR',histoName,NShkl=NShkl,NDij=NDij)
else:
raise Exception('Unexpected histogram '+histoName)
wx.EndBusyCursor()
self.EnableRefineCommand()
return # success
def _Add_ImportMenu_Image(self,parent):
'''configure the Import Image menus accord to the readers found in _init_Imports
'''
submenu = wx.Menu()
item = parent.AppendSubMenu(submenu, 'Image','Import image file')
for reader in self.ImportImageReaderlist:
item = submenu.Append(wx.ID_ANY,u'from '+reader.formatName+u' file',reader.longFormatName)
self.ImportMenuId[item.GetId()] = reader
self.Bind(wx.EVT_MENU, self.OnImportImage, id=item.GetId())
item = submenu.Append(wx.ID_ANY,'guess format from file','Import image data, use file to try to determine format')
self.Bind(wx.EVT_MENU, self.OnImportImage, id=item.GetId())
def OnImportImage(self,event):
'''Called in response to an Import/Image/... menu item
to read an image from a file. Like all the other imports,
dict self.ImportMenuId is used to look up the specific
reader item associated with the menu item, which will be
None for the last menu item, which is the "guess" option
where all appropriate formats will be tried.
A reader object is filled each time an image is read.
'''
self.CheckNotebook()
# look up which format was requested
reqrdr = self.ImportMenuId.get(event.GetId())
rdlist = self.OnImportGeneric(reqrdr,self.ImportImageReaderlist,
'image',multiple=True,Preview=False,load2Tree=True)
if rdlist:
self.GPXtree.SelectItem(GetGPXtreeItemId(self,self.Image,'Image Controls')) #show last image to have beeen read
def _Add_ImportMenu_Sfact(self,parent):
'''configure the Import Structure Factor menus accord to the readers found in _init_Imports
'''
submenu = wx.Menu()
item = parent.AppendSubMenu(submenu,'Structure Factor','Import Structure Factor data')
for reader in self.ImportSfactReaderlist:
item = submenu.Append(wx.ID_ANY,u'from '+reader.formatName+u' file',reader.longFormatName)
self.ImportMenuId[item.GetId()] = reader
self.Bind(wx.EVT_MENU, self.OnImportSfact, id=item.GetId())
item = submenu.Append(wx.ID_ANY,'guess format from file','Import Structure Factor, use file to try to determine format')
self.Bind(wx.EVT_MENU, self.OnImportSfact, id=item.GetId())
def OnImportSfact(self,event):
'''Called in response to an Import/Structure Factor/... menu item
to read single crystal datasets.
dict self.ImportMenuId is used to look up the specific
reader item associated with the menu item, which will be
None for the last menu item, which is the "guess" option
where all appropriate formats will be tried.
'''
# get a list of existing histograms
HKLFlist = []
if self.GPXtree.GetCount():
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
if name.startswith('HKLF ') and name not in HKLFlist:
HKLFlist.append(name)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
# look up which format was requested
reqrdr = self.ImportMenuId.get(event.GetId())
rdlist = self.OnImportGeneric(reqrdr,self.ImportSfactReaderlist,
'Structure Factor',multiple=True)
if len(rdlist) == 0: return
self.CheckNotebook()
newHistList = []
for rd in rdlist:
HistName = rd.objname
if len(rdlist) <= 2:
dlg = wx.TextEntryDialog( # allow editing of Structure Factor name
self, 'Enter the name for the new Structure Factor',
'Edit Structure Factor name', HistName,
style=wx.OK)
dlg.CenterOnParent()
if dlg.ShowModal() == wx.ID_OK:
HistName = dlg.GetValue()
dlg.Destroy()
HistName = 'HKLF '+G2obj.StripUnicode(HistName,'_')
# make new histogram names unique
if len(rd.Banks):
for Bank in rd.Banks:
valuesdict = {'wtFactor':1.0,'Dummy':False,'ranId':ran.randint(0,sys.maxsize),}
HistName = G2obj.MakeUniqueLabel(HistName,HKLFlist)
print (u'Read structure factor table '+HistName+u' from file '+self.lastimport)
Id = self.GPXtree.AppendItem(parent=self.root,text=HistName)
if not Bank['RefDict'].get('FF'):
Bank['RefDict']['FF'] = {}
self.GPXtree.SetItemPyData(Id,[valuesdict,Bank['RefDict']])
Sub = self.GPXtree.AppendItem(Id,text='Instrument Parameters')
self.GPXtree.SetItemPyData(Sub,copy.copy(rd.Parameters))
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Reflection List'),{}) #dummy entry for GUI use
newHistList.append(HistName)
else:
valuesdict = {'wtFactor':1.0,'Dummy':False,'ranId':ran.randint(0,sys.maxsize),}
HistName = G2obj.MakeUniqueLabel(HistName,HKLFlist)
print (u'Read structure factor table '+HistName+u' from file '+self.lastimport)
if not rd.RefDict.get('FF'):
rd.RefDict['FF'] = {}
Id = self.GPXtree.AppendItem(parent=self.root,text=HistName)
self.GPXtree.SetItemPyData(Id,[valuesdict,rd.RefDict])
Sub = self.GPXtree.AppendItem(Id,text='Instrument Parameters')
self.GPXtree.SetItemPyData(Sub,rd.Parameters)
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Reflection List'),{}) #dummy entry for GUI use
newHistList.append(HistName)
self.GPXtree.SelectItem(Id)
self.GPXtree.Expand(Id)
self.Sngl = True
if not newHistList: return # somehow, no new histograms
# make a list of phase names
phaseRIdList,usedHistograms = self.GetPhaseInfofromTree()
phaseNameList = list(usedHistograms.keys()) # phase names in use
if not phaseNameList: return # no phases yet, nothing to do
header = 'Select phase(s) to add the new\nsingle crystal dataset(s) to:'
for Name in newHistList:
header += '\n '+str(Name)
result = G2G.ItemSelector(phaseNameList,self,header,header='Add to phase(s)',multiple=True)
if not result: return
# connect new phases to histograms
sub = GetGPXtreeItemId(self,self.root,'Phases')
if not sub:
raise Exception('ERROR -- why are there no phases here?')
wx.BeginBusyCursor()
item, cookie = self.GPXtree.GetFirstChild(sub)
iph = -1
while item: # loop over (new) phases
iph += 1
data = self.GPXtree.GetItemPyData(item)
item, cookie = self.GPXtree.GetNextChild(sub, cookie)
if iph not in result: continue
generalData = data['General']
SGData = generalData['SGData']
Super = generalData.get('Super',0)
SuperVec = []
if Super:
SuperVec = np.array(generalData['SuperVec'][0])
UseList = data['Histograms']
for histoName in newHistList:
#redo UpdateHKLFdata(histoName) here:
Id = GetGPXtreeItemId(self,self.root,histoName)
refDict,reflData = self.GPXtree.GetItemPyData(Id)
UseList[histoName] = SetDefaultDData(reflData['Type'],histoName)
G,g = G2lat.cell2Gmat(generalData['Cell'][1:7])
if 'TwMax' in reflData: #nonmerohedral twins present
UseList[histoName]['Twins'] = []
for iT in range(reflData['TwMax'][0]+1):
if iT in reflData['TwMax'][1]:
UseList[histoName]['Twins'].append([False,0.0])
else:
UseList[histoName]['Twins'].append([np.array([[1,0,0],[0,1,0],[0,0,1]]),[1.0,False,reflData['TwMax'][0]]])
else: #no nonmerohedral twins
UseList[histoName]['Twins'] = [[np.array([[1,0,0],[0,1,0],[0,0,1]]),[1.0,False,0]],]
for iref,ref in enumerate(reflData['RefList']):
hkl = ref[:3]
if Super:
H = list(hkl+SuperVec*ref[3])
else:
H = hkl
ref[4+Super] = np.sqrt(1./G2lat.calc_rDsq2(H,G))
iabsnt,mul,Uniq,phi = G2spc.GenHKLf(H,SGData)
if iabsnt: #flag space gp. absences
if Super:
if not ref[2+Super]:
ref[3+Super] = 0
else:
ref[3+Super] = 1 #twin id?
else:
ref[3] = 0
wx.EndBusyCursor()
self.EnableRefineCommand()
return # success
def _Add_ImportMenu_powder(self,parent):
'''configure the Powder Data menus accord to the readers found in _init_Imports
'''
submenu = wx.Menu()
item = parent.AppendSubMenu(submenu,'Powder Data','Import Powder data')
for reader in self.ImportPowderReaderlist:
item = submenu.Append(wx.ID_ANY,u'from '+reader.formatName+u' file',reader.longFormatName)
self.ImportMenuId[item.GetId()] = reader
self.Bind(wx.EVT_MENU, self.OnImportPowder, id=item.GetId())
item = submenu.Append(wx.ID_ANY,'guess format from file','Import powder data, use file to try to determine format')
self.Bind(wx.EVT_MENU, self.OnImportPowder, id=item.GetId())
submenu.AppendSeparator()
item = submenu.Append(wx.ID_ANY,'Simulate a dataset','Create a powder data set entry that will be simulated')
self.Bind(wx.EVT_MENU, self.OnDummyPowder, id=item.GetId())
item = submenu.Append(wx.ID_ANY,'Auto Import','Import data files as found')
def OnAutoImport(event):
G2G.AutoLoadFiles(self,FileTyp='pwd')
self.Bind(wx.EVT_MENU, OnAutoImport, id=item.GetId())
item = submenu.Append(wx.ID_ANY,'Fit instr. profile from fundamental parms...','')
self.Bind(wx.EVT_MENU, self.OnPowderFPA, id=item.GetId())
def OpenPowderInstprm(self,instfile):
'''Read a GSAS-II (new) instrument parameter file
:param str instfile: name of instrument parameter file
'''
File = open(instfile,'r')
lines = File.readlines()
File.close()
return lines
def ReadPowderInstprm(self,instLines,bank,databanks,rd):
'''Read lines from a GSAS-II (new) instrument parameter file
similar to G2pwdGUI.OnLoad
If instprm file has multiple banks each with header #Bank n: ..., this
finds matching bank no. to load - problem with nonmatches?
Note that this routine performs a similar role to :func:`GSASIIfiles.ReadPowderInstprm`,
but this will call a GUI routine for selection when needed. TODO: refactor to combine
:param list instLines: strings from GSAS-II parameter file; can be concatenated with ';'
:param int bank: bank number to check when instprm file has '#BANK n:...' strings
when bank = n then use parameters; otherwise skip that set. Ignored if BANK n:
not present. NB: this kind of instprm file made by a Save all profile command in Instrument Parameters
:return dict: Inst instrument parameter dict if OK, or
str: Error message if failed
'''
if 'GSAS-II' not in instLines[0]: # not a valid file
return 'Not a valid GSAS-II instprm file'
newItems = []
newVals = []
Found = False
il = 0
if bank is None: # no bank was specified in the input file, is more than one present in file?
banklist = set([])
for S in instLines:
if S[0] == '#' and 'Bank' in S:
banklist.add(int(S.split(':')[0].split()[1]))
if len(banklist) > 1: # yes, the user must make a selection
choices = [str(i) for i in banklist]
bank = int(G2G.ItemSelector(choices,self,multiple=False))
else:
bank = 1
rd.powderentry[2] = bank
while il < len(instLines):
S = instLines[il]
if S[0] == '#':
if Found:
break
if 'Bank' in S:
if bank == int(S.split(':')[0].split()[1]):
il += 1
S = instLines[il]
else:
il += 1
S = instLines[il]
while il < len(instLines) and '#Bank' not in S:
il += 1
if il == len(instLines):
return 'Bank %d not found in instprm file'%(bank)
S = instLines[il]
continue
else: #a non #Bank file
il += 1
S = instLines[il]
Found = True
if '"""' in S:
delim = '"""'
elif "'''" in S:
delim = "'''"
else:
S = S.replace(' ','')
SS = S.strip().split(';')
for s in SS:
[item,val] = s.split(':',1)
newItems.append(item)
try:
newVals.append(float(val))
except ValueError:
newVals.append(val)
il += 1
continue
# read multiline values, delimited by ''' or """
item,val = S.strip().split(':',1)
val = val.replace(delim,'').rstrip()
val += '\n'
while True:
il += 1
if il >= len(instLines): break
S = instLines[il]
if delim in S:
val += S.replace(delim,'').rstrip()
val += '\n'
break
else:
val += S.rstrip()
val += '\n'
newItems.append(item)
newVals.append(val)
il += 1
if 'Lam1' in newItems:
rd.Sample.update({'Type':'Bragg-Brentano','Shift':[0.,False],'Transparency':[0.,False],
'SurfRoughA':[0.,False],'SurfRoughB':[0.,False]})
else:
rd.Sample.update({'Type':'Debye-Scherrer','Absorption':[0.,False],'DisplaceX':[0.,False],'DisplaceY':[0.,False]})
return [G2fil.makeInstDict(newItems,newVals,len(newVals)*[False,]),{}]
def ReadPowderIparm(self,instfile,bank,databanks,rd):
'''Read a GSAS (old) instrument parameter file
:param str instfile: name of instrument parameter file
:param int bank: the bank number read in the raw data file
:param int databanks: the number of banks in the raw data file.
If the number of banks in the data and instrument parameter files
agree, then the sets of banks are assumed to match up and bank
is used to select the instrument parameter file. If not and not TOF,
the user is asked to make a selection.
:param obj rd: the raw data (histogram) data object. This
sets rd.instbank.
'''
if not os.path.exists(instfile): # no such file
return {}
fp = 0
try:
fp = open(instfile,'r')
Iparm = {}
for S in fp:
if '#' in S[0]:
continue
Iparm[S[:12]] = S[12:-1]
except IOError:
print(u'Error reading file: {}'.format(instfile))
if fp:
fp.close()
ibanks = int(Iparm.get('INS BANK ','1').strip())
if ibanks == 1: # there is only one bank here, return it
rd.instbank = 1
rd.powderentry[2] = 1
return Iparm
if 'PNT' in Iparm['INS HTYPE ']: #allow mismatch between banks in data iparm file for TOF
rd.instbank = bank
elif ibanks != databanks or bank is None:
choices = []
for i in range(1,1+ibanks):
choices.append('Bank '+str(i))
bank = 1 + G2G.BlockSelector(
choices, self,
title=u'Select an instrument parameter bank for '+
os.path.split(rd.powderentry[0])[1]+u' BANK '+str(bank)+
u'\nOr use Cancel to select from the default parameter sets',
header='Block Selector')
if bank is None: return {}
# pull out requested bank # bank from the data, and change the bank to 1
IparmS = {}
for key in Iparm:
if 'INS' in key[:3]: #skip around rubbish lines in some old iparm files
if key[4:6] == " ":
IparmS[key] = Iparm[key]
elif int(key[4:6].strip()) == bank:
IparmS[key[:4]+' 1'+key[6:]] = Iparm[key]
rd.instbank = bank
return IparmS
def GetPowderIparm(self,rd, prevIparm, lastIparmfile, lastdatafile):
'''Open and read an instrument parameter file for a data file
Returns the list of parameters used in the data tree
:param obj rd: the raw data (histogram) data object.
:param str prevIparm: not used
:param str lastIparmfile: Name of last instrument parameter
file that was read, or a empty string.
:param str lastdatafile: Name of last data file that was read.
:returns: a list of two dicts, the first containing instrument parameters
and the second used for TOF lookup tables for profile coeff.
'''
def GetDefaultParms(self,rd):
'''Solicits from user a default set of parameters & returns Inst parm dict
param: self: refers to the GSASII main class
param: rd: importer data structure
returns: dict: Instrument parameter dictionary
'''
sind = lambda x: math.sin(x*math.pi/180.)
tand = lambda x: math.tan(x*math.pi/180.)
while True: # loop until we get a choice
choices = []
head = 'Select from default instrument parameters for '+rd.idstring
for l in dI.defaultIparm_lbl:
choices.append('Defaults for '+l)
res = G2G.BlockSelector(choices,ParentFrame=self,title=head,
header='Select default inst parms',useCancel=True)
if res is None: return None
rd.instfile = ''
if 'lab data' in choices[res]:
rd.Sample.update({'Type':'Bragg-Brentano','Shift':[0.,False],'Transparency':[0.,False],
'SurfRoughA':[0.,False],'SurfRoughB':[0.,False]})
else:
rd.Sample.update({'Type':'Debye-Scherrer','Absorption':[0.,False],'DisplaceX':[0.,False],
'DisplaceY':[0.,False]})
if 'Generic' in choices[res]:
dlg = G2G.MultiDataDialog(self,title='Generic TOF detector bank',
prompts=['Total FP','2-theta',],values=[25.0,150.,],
limits=[[6.,200.],[5.,175.],],formats=['%6.2f','%6.1f',])
if dlg.ShowModal() == wx.ID_OK: #strictly empirical approx.
FP,tth = dlg.GetValues()
difC = 505.632*FP*sind(tth/2.)
sig1 = 50.+2.5e-6*(difC/tand(tth/2.))**2
bet1 = .00226+7.76e+11/difC**4
rd.instmsg = 'default: '+dI.defaultIparm_lbl[res]
Inst = self.ReadPowderInstprm(dI.defaultIparms[res],bank,numbanks,rd)
Inst[0]['difC'] = [difC,difC,0]
Inst[0]['sig-1'] = [sig1,sig1,0]
Inst[0]['beta-1'] = [bet1,bet1,0]
return Inst #this is [Inst1,Inst2] a pair of dicts
dlg.Destroy()
else:
rd.instmsg = 'default: '+dI.defaultIparm_lbl[res]
inst1,inst2 = self.ReadPowderInstprm(dI.defaultIparms[res],bank,numbanks,rd)
if rd.instdict.get('wave'):
inst1['Lam'][0] = rd.instdict.get('wave')
inst1['Lam'][1] = rd.instdict.get('wave')
return [inst1,inst2]
# stuff we might need from the reader
filename = rd.powderentry[0]
bank = rd.powderentry[2]
numbanks = rd.numbanks
#1st priority: is there an instrument parameter file matching the current file
# with extension .instprm, .prm, .inst, or .ins? If so read it
basename = os.path.splitext(filename)[0]
for ext in '.prm','.inst','.ins','.instprm':
if self.zipfile:
instfile = G2IO.ExtractFileFromZip(self.zipfile,
selection=os.path.split(basename + ext)[1],parent=self)
if instfile == None:
continue
else:
instfile = basename + ext
if not os.path.exists(instfile):
continue
if 'instprm' in instfile:
Lines = self.OpenPowderInstprm(instfile)
instParmList = self.ReadPowderInstprm(Lines,bank,numbanks,rd) #this is [Inst1,Inst2] a pair of dicts
if 'list' in str(type(instParmList)):
rd.instfile = instfile
rd.instmsg = 'GSAS-II file '+instfile
return instParmList
else:
#print 'debug: open/read failed',instfile
pass # fail silently
else:
Iparm = self.ReadPowderIparm(instfile,bank,numbanks,rd)
if Iparm:
#print 'debug: success'
rd.instfile = instfile
rd.instmsg = instfile + ' bank ' + str(rd.instbank)
return G2fil.SetPowderInstParms(Iparm,rd)
else:
#print 'debug: open/read failed',instfile
pass # fail silently
#2nd priority: is there an instrument parameter file defined for the current data set?
# or if this is a read on a set of set of files, use the last one again
#if rd.instparm as found in data file header or (lastdatafile == filename and lastIparmfile):
if rd.instparm or lastIparmfile:
if rd.instparm:
instfile = os.path.join(os.path.split(filename)[0],rd.instparm)
else:
# for multiple reads of one data file, reuse the inst parm file
instfile = lastIparmfile
# if self.zipfile:
# instfile = G2IO.ExtractFileFromZip(self.zipfile,
# selection=os.path.split(instfile)[1],parent=self)
if instfile != None and os.path.exists(instfile):
#print 'debug: try read',instfile
if 'instprm' in instfile: #GSAS-II file must have .instprm as extension
Lines = self.OpenPowderInstprm(instfile)
if Lines is not None:
instParmList = self.ReadPowderInstprm(Lines,bank,numbanks,rd) #this is [Inst1,Inst2] a pair of dicts
else: #old GSAS style iparm file - could be named anything!
Iparm = self.ReadPowderIparm(instfile,bank,numbanks,rd)
if Iparm:
#print 'debug: success'
rd.instfile = instfile
rd.instmsg = instfile + ' bank ' + str(rd.instbank)
instParmList = G2fil.SetPowderInstParms(Iparm,rd) #this is [Inst1,Inst2] a pair of dicts
if 'list' in str(type(instParmList)): #record stuff & return stuff
rd.instfile = instfile
rd.instmsg = 'GSAS-II file '+instfile
return instParmList
else: #bad iparms - try default
rd.instmsg = instParmList #an error message
return GetDefaultParms(self,rd)
else:
self.ErrorDialog('Open Error',u'Error opening instrument parameter file '
+u'{} requested by file '.format(instfile,filename))
#Finally - ask user for Instrument parametrs file - seems it can't be in a zip file
while True: # loop until we get a file that works or we get a cancel
instfile = ''
pth = G2G.GetImportPath(self)
if not pth: pth = '.'
extOrd = [0,1]
if GSASIIpath.GetConfigValue('Instprm_default',False):
extOrd = [1,0]
extList = ['GSAS iparm file (*.prm,*.inst,*.ins)|*.prm;*.inst;*.ins|','GSAS-II iparm file (*.instprm)|*.instprm|']
dlg = wx.FileDialog(self,
u'Choose inst. param file for "'+rd.idstring+u'" (or Cancel for default)',
pth, '',extList[extOrd[0]]+extList[extOrd[1]]+'All files (*.*)|*.*', wx.FD_OPEN)
if os.path.exists(lastIparmfile):
dlg.SetFilename(lastIparmfile)
if dlg.ShowModal() == wx.ID_OK:
instfile = dlg.GetPath()
dlg.Destroy()
if not instfile:
return GetDefaultParms(self,rd) #on Cancel/break
if 'instprm' in instfile:
Lines = self.OpenPowderInstprm(instfile)
if Lines is not None:
instParmList = self.ReadPowderInstprm(Lines,bank,numbanks,rd) #this is [Inst1,Inst2] a pair of dicts
if 'list' in str(type(instParmList)):
rd.instfile = instfile
rd.instmsg = 'GSAS-II file '+instfile
return instParmList
else:
rd.instmsg = instParmList #an error message
return GetDefaultParms(self,rd)
else:
Iparm = self.ReadPowderIparm(instfile,bank,numbanks,rd)
if Iparm:
#print 'debug: success with',instfile
rd.instfile = instfile
rd.instmsg = instfile + ' bank ' + str(rd.instbank)
return G2fil.SetPowderInstParms(Iparm,rd)
else:
self.ErrorDialog('Read Error',
u'Error opening/reading file {}'.format(instfile))
def EnableRefineCommand(self):
haveData = False
# check for phases connected to histograms
sub = GetGPXtreeItemId(self,self.root,'Phases')
if sub:
item, cookie = self.GPXtree.GetFirstChild(sub)
while item: # loop over phases
data = self.GPXtree.GetItemPyData(item)
item, cookie = self.GPXtree.GetNextChild(sub, cookie)
UseList = data['Histograms']
if UseList: haveData = True
if haveData:
self.dataWindow.DataMenu.Enable(G2G.wxID_DATADELETE,True)
for item in self.Refine: item.Enable(True)
else:
self.dataWindow.DataMenu.Enable(G2G.wxID_DATADELETE,False)
for item in self.Refine: item.Enable(False)
def OnImportPowder(self,event):
'''Called in response to an Import/Powder Data/... menu item
to read a powder diffraction data set.
dict self.ImportMenuId is used to look up the specific
reader item associated with the menu item, which will be
None for the last menu item, which is the "guess" option
where all appropriate formats will be tried.
Also reads an instrument parameter file for each dataset.
'''
# get a list of existing histograms
PWDRlist = []
if self.GPXtree.GetCount():
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
if name.startswith('PWDR ') and name not in PWDRlist:
PWDRlist.append(name)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
# look up which format was requested
reqrdr = self.ImportMenuId.get(event.GetId())
rdlist = self.OnImportGeneric(
reqrdr,self.ImportPowderReaderlist,'Powder Data',multiple=True)
if len(rdlist) == 0: return
self.CheckNotebook()
Iparm = None
lastIparmfile = ''
lastdatafile = ''
newHistList = []
# lastVals = []
self.EnablePlot = False
Iparms = {}
for rd in rdlist:
if 'Instrument Parameters' in rd.pwdparms:
Iparm1,Iparm2 = rd.pwdparms['Instrument Parameters']
elif Iparms and not lastIparmfile:
Iparm1,Iparm2 = Iparms
else:
# get instrument parameters for each dataset, unless already set
# if lastIparmfile: # is this histogram like previous?
# if lastVals != (rd.powderdata[0].min(),rd.powderdata[0].max(),len(rd.powderdata[0])):
# lastIparmfile = ''
Iparms = self.GetPowderIparm(rd, Iparm, lastIparmfile, lastdatafile)
if not Iparms: #may have bailed out
Id = 0
continue
Iparm1,Iparm2 = Iparms
if rd.repeat_instparm:
lastIparmfile = rd.instfile
else:
Iparms = {}
# lastVals = (rd.powderdata[0].min(),rd.powderdata[0].max(),len(rd.powderdata[0]))
# override any keys in read instrument parameters with ones set in import
for key in Iparm1:
if key in rd.instdict:
Iparm1[key] = rd.instdict[key]
lastdatafile = rd.powderentry[0]
if 'phoenix' in wx.version():
HistName = 'PWDR '+rd.idstring
else:
HistName = 'PWDR '+G2obj.StripUnicode(rd.idstring,'_')
# make new histogram names unique
if HistName in PWDRlist:
dlg = wx.MessageDialog(self,'Skip %s?'%(HistName),'Duplicate data name',wx.YES_NO)
try:
if dlg.ShowModal() == wx.ID_YES:
Id = 0
continue
finally:
dlg.Destroy()
HistName = G2obj.MakeUniqueLabel(HistName,PWDRlist)
try:
print('Read powder data '+HistName+
' from file '+G2obj.StripUnicode(rd.readfilename) +
' (format: '+ rd.formatName +
'). Inst parameters from '+G2obj.StripUnicode(rd.instmsg))
except:
print('Read powder data')
# data are read, now store them in the tree
Id = self.GPXtree.AppendItem(parent=self.root,text=HistName)
if 'T' in Iparm1['Type'][0]:
if not rd.clockWd and rd.GSAS:
rd.powderdata[0] *= 100. #put back the CW centideg correction
cw = np.diff(rd.powderdata[0])
rd.powderdata[0] = rd.powderdata[0][:-1]+cw/2.
if rd.GSAS: #NB: old GSAS wanted intensities*CW even if normalized!
npts = min(len(rd.powderdata[0]),len(rd.powderdata[1]),len(cw))
rd.powderdata[1] = rd.powderdata[1][:npts]/cw[:npts]
rd.powderdata[2] = rd.powderdata[2][:npts]*cw[:npts]**2 #1/var=w at this point
else: #NB: from topas/fullprof type files
rd.powderdata[1] = rd.powderdata[1][:-1]
rd.powderdata[2] = rd.powderdata[2][:-1]
if 'Itype' in Iparm2:
Ibeg = np.searchsorted(rd.powderdata[0],Iparm2['Tminmax'][0])
Ifin = np.searchsorted(rd.powderdata[0],Iparm2['Tminmax'][1])
rd.powderdata[0] = rd.powderdata[0][Ibeg:Ifin]
YI,WYI = G2pwd.calcIncident(Iparm2,rd.powderdata[0])
rd.powderdata[1] = rd.powderdata[1][Ibeg:Ifin]/YI
var = 1./rd.powderdata[2][Ibeg:Ifin]
var += WYI*rd.powderdata[1]**2
var /= YI**2
rd.powderdata[2] = 1./var
rd.powderdata[1] = np.where(np.isinf(rd.powderdata[1]),0.,rd.powderdata[1])
rd.powderdata[3] = np.zeros_like(rd.powderdata[0])
rd.powderdata[4] = np.zeros_like(rd.powderdata[0])
rd.powderdata[5] = np.zeros_like(rd.powderdata[0])
Ymin = np.min(rd.powderdata[1])
Ymax = np.max(rd.powderdata[1])
valuesdict = {
'wtFactor':1.0,
'Dummy':False,
'ranId':ran.randint(0,sys.maxsize),
'Offset':[0.0,0.0],'delOffset':0.02*Ymax,'refOffset':-.1*Ymax,'refDelt':0.1*Ymax,
'Yminmax':[Ymin,Ymax]
}
# apply user-supplied corrections to powder data
if 'CorrectionCode' in Iparm1:
print('Applying corrections from instprm file')
corr = Iparm1['CorrectionCode'][0]
try:
exec(corr)
print('done')
except Exception as err:
print(u'error: {}'.format(err))
print('with commands -------------------')
print(corr)
print('---------------------------------')
finally:
del Iparm1['CorrectionCode']
rd.Sample['ranId'] = valuesdict['ranId'] # this should be removed someday
self.GPXtree.SetItemPyData(Id,[valuesdict,rd.powderdata])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Comments'),
rd.comments)
Tmin = min(rd.powderdata[0])
Tmax = max(rd.powderdata[0])
Tmin1 = Tmin
if 'NT' in Iparm1['Type'][0] and G2lat.Pos2dsp(Iparm1,Tmin) < 0.4:
Tmin1 = G2lat.Dsp2pos(Iparm1,0.4)
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Limits'),
rd.pwdparms.get('Limits',[(Tmin,Tmax),[Tmin1,Tmax]])
)
self.PatternId = GetGPXtreeItemId(self,Id,'Limits')
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Background'),
rd.pwdparms.get('Background',
[['chebyschev-1',True,3,1.0,0.0,0.0],{'nDebye':0,'debyeTerms':[],'nPeaks':0,'peaksList':[],
'background PWDR':['',1.0,False]}]))
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Instrument Parameters'),
[Iparm1,Iparm2])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Sample Parameters'),
rd.Sample)
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Peak List')
,{'peaks':[],'sigDict':{}})
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Index Peak List'),
[[],[]])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Unit Cells List'),
[])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Reflection Lists'),
{})
# if any Control values have been set, move them into tree
Controls = self.GPXtree.GetItemPyData(GetGPXtreeItemId(self,self.root, 'Controls'))
Controls.update(rd.Controls)
newHistList.append(HistName)
rd.repeat_instparm = False #clear the iparm reuse flag
else:
self.EnablePlot = True
if Id:
self.GPXtree.Expand(Id)
self.GPXtree.SelectItem(Id)
if not newHistList: return # somehow, no new histograms
# make a list of phase names
phaseRIdList,usedHistograms = self.GetPhaseInfofromTree()
phaseNameList = list(usedHistograms.keys()) # phase names in use
if not phaseNameList: return # no phases yet, nothing to do
header = 'Select phase(s) to link\nto the newly-read data:'
for Name in newHistList:
header += '\n '+str(Name)
result = G2G.ItemSelector(phaseNameList,self,header,header='Add to phase(s)',multiple=True)
if not result: return
# connect new phases to histograms
sub = GetGPXtreeItemId(self,self.root,'Phases')
if not sub:
raise Exception('ERROR -- why are there no phases here?')
item, cookie = self.GPXtree.GetFirstChild(sub)
iph = -1
while item: # loop over (new) phases
iph += 1
data = self.GPXtree.GetItemPyData(item)
item, cookie = self.GPXtree.GetNextChild(sub, cookie)
if iph not in result: continue
generalData = data['General']
SGData = generalData['SGData']
UseList = data['Histograms']
NShkl = len(G2spc.MustrainNames(SGData))
NDij = len(G2spc.HStrainNames(SGData))
for histoName in newHistList:
UseList[histoName] = SetDefaultDData('PWDR',histoName,NShkl=NShkl,NDij=NDij)
Id = GetGPXtreeItemId(self,self.root,histoName)
refList = self.GPXtree.GetItemPyData(
GetGPXtreeItemId(self,Id,'Reflection Lists'))
refList[generalData['Name']] = []
self.EnableRefineCommand()
return # success
def OnDummyPowder(self,event):
'''Called in response to Import/Powder Data/Simulate menu item
to create a Dummy powder diffraction data set.
Reads an instrument parameter file and then gets input from the user
'''
# get a list of existing histograms
PWDRlist = []
if self.GPXtree.GetCount():
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
if name.startswith('PWDR ') and name not in PWDRlist:
PWDRlist.append(name)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
# Initialize a base class reader
rd = G2obj.ImportPowderData(
extensionlist=tuple(),
strictExtension=False,
formatName = 'Simulate dataset',
longFormatName = 'Compute a simulated pattern')
rd.powderentry[0] = '' # no filename
# #self.powderentry[1] = pos # bank offset (N/A here)
rd.powderentry[2] = 1 # only one bank
rd.comments.append('This is a dummy dataset for powder pattern simulation')
self.CheckNotebook()
Iparm = None
lastdatafile = ''
self.zipfile = None
# get instrument parameters for it
Iparm = self.GetPowderIparm(rd, Iparm, '', lastdatafile)
if Iparm is None:
return
Iparm1, Iparm2 = Iparm
if 'T' in Iparm1['Type'][0]:
rd.idstring = ' TOF neutron simulation'
simType = 'TOF'
else:
# need to get name, 2theta start, end, step
rd.idstring = ' CW'
simType = 'CW'
if 'X' in Iparm1['Type'][0]:
rd.idstring = 'CW x-ray simulation'
else:
rd.idstring = 'CW neutron simulation'
# base initial range on wavelength
wave = Iparm1.get('Lam')
if wave:
wave = wave[0]
else:
wave = Iparm1.get('Lam1')
if wave:
wave = wave[0]
N = 0
while (N < 3): # insist on a dataset with a few points
if 'TOF' in rd.idstring:
names = ('dataset name', 'start TOF(ms)', 'end TOF(ms)', 'DT/T')
inp = [rd.idstring, 10.,80.,0.0005] # see names for what's what
dlg = G2G.ScrolledMultiEditor(
self,[inp] * len(inp),range(len(inp)),names,
header='Enter simulation name and range',
minvals=(None,.5,1.0,0.0001),
maxvals=(None,200.,200.,.001),
sizevals=((225,-1),)
)
else:
names = ('dataset name', 'start angle', 'end angle', 'step size')
if not wave or wave < 1.0:
inp = [rd.idstring, 10.,40.,0.005] # see names for what's what
else:
inp = [rd.idstring, 10.,80.,0.01] # see names for what's what
dlg = G2G.ScrolledMultiEditor(
self,[inp] * len(inp),range(len(inp)),names,
header='Enter simulation name and range',
minvals=(None,0.001,0.001,0.0001),
maxvals=(None,180.,180.,.1),
sizevals=((225,-1),)
)
dlg.CenterOnParent()
if dlg.ShowModal() == wx.ID_OK:
if inp[1] > inp[2]:
end,start,step = inp[1:]
else:
start,end,step = inp[1:]
step = abs(step)
else:
return False
if 'TOF' in rd.idstring:
N = (np.log(end)-np.log(start))/step
x = np.exp((np.arange(0,N))*step+np.log(start*1000.))
N = len(x)
else:
N = int((end-start)/step)+1
x = np.linspace(start,end,N,True)
N = len(x)
rd.powderdata = [
np.array(x), # x-axis values
np.zeros_like(x), # powder pattern intensities
np.ones_like(x), # 1/sig(intensity)^2 values (weights)
np.zeros_like(x), # calc. intensities (zero)
np.zeros_like(x), # calc. background (zero)
np.zeros_like(x), # obs-calc profiles
]
Tmin = rd.powderdata[0][0]
Tmax = rd.powderdata[0][-1]
# data are read, now store them in the tree
HistName = inp[0]
HistName = 'PWDR '+HistName
HistName = G2obj.MakeUniqueLabel(HistName,PWDRlist) # make new histogram names unique
Id = self.GPXtree.AppendItem(parent=self.root,text=HistName)
Ymin = np.min(rd.powderdata[1])
Ymax = np.max(rd.powderdata[1])
valuesdict = {
'wtFactor':1.0,
'Dummy':True,'simType':simType,
'ranId':ran.randint(0,sys.maxsize),
'Offset':[0.0,0.0],'delOffset':0.02*Ymax,'refOffset':-.1*Ymax,'refDelt':0.1*Ymax,
'Yminmax':[Ymin,Ymax]
}
self.GPXtree.SetItemPyData(Id,[valuesdict,rd.powderdata])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Comments'),
rd.comments)
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Limits'),
[(Tmin,Tmax),[Tmin,Tmax]])
self.PatternId = GetGPXtreeItemId(self,Id,'Limits')
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Background'),
[['chebyschev-1',True,3,1.0,0.0,0.0],
{'nDebye':0,'debyeTerms':[],'nPeaks':0,'peaksList':[],'background PWDR':['',1.0,False]}])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Instrument Parameters'),
[Iparm1,Iparm2])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Sample Parameters'),
rd.Sample)
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Peak List')
,{'peaks':[],'sigDict':{}})
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Index Peak List'),
[[],[]])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Unit Cells List'),
[])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Reflection Lists'),
{})
self.GPXtree.Expand(Id)
self.GPXtree.SelectItem(Id)
print(u'Added simulation powder data {}'.format(HistName)+
' with parameters from {}'.format(rd.instmsg))
# make a list of phase names
phaseRIdList,usedHistograms = self.GetPhaseInfofromTree()
phaseNameList = list(usedHistograms.keys()) # phase names in use
if not phaseNameList: return # no phases yet, nothing to do
header = 'Select phase(s) to add the new\npowder simulation (dummy) dataset to:'
result = G2G.ItemSelector(phaseNameList,self,header,header='Add to phase(s)',multiple=True)
if not result: return
# connect new phases to histograms
sub = GetGPXtreeItemId(self,self.root,'Phases')
if not sub:
raise Exception('ERROR -- why are there no phases here?')
item, cookie = self.GPXtree.GetFirstChild(sub)
iph = -1
while item: # loop over (new) phases
iph += 1
data = self.GPXtree.GetItemPyData(item)
item, cookie = self.GPXtree.GetNextChild(sub, cookie)
if iph not in result: continue
generalData = data['General']
SGData = generalData['SGData']
UseList = data['Histograms']
NShkl = len(G2spc.MustrainNames(SGData))
NDij = len(G2spc.HStrainNames(SGData))
UseList[HistName] = SetDefaultDData('PWDR',HistName,NShkl=NShkl,NDij=NDij)
Id = GetGPXtreeItemId(self,self.root,HistName)
refList = self.GPXtree.GetItemPyData(
GetGPXtreeItemId(self,Id,'Reflection Lists'))
refList[generalData['Name']] = []
cId = GetGPXtreeItemId(self,self.root, 'Controls')
Controls = self.GPXtree.GetItemPyData(cId)
Controls['max cyc'] = 0
self.EnableRefineCommand()
return # success
def AddSimulatedPowder(self,ttArr,intArr,HistName,Lam1,Lam2):
'''Create a PWDR entry for a computed powder pattern
'''
# get a list of existing histograms
PWDRlist = []
if self.GPXtree.GetCount():
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
if name.startswith('PWDR ') and name not in PWDRlist:
PWDRlist.append(name)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
# Initialize a base class reader
rd = G2obj.ImportPowderData(
extensionlist=tuple(),
strictExtension=False,
formatName = 'FPA Simulated dataset',
longFormatName = 'Fundamental Parameters simulated pattern')
rd.powderentry[0] = '' # no filename
# #self.powderentry[1] = pos # bank offset (N/A here)
rd.powderentry[2] = 1 # only one bank
rd.comments.append('This is a powder pattern simulated with Fundamental Parameters')
self.CheckNotebook()
#self.zipfile = None
# get instrument parameters for it
rd.Sample.update({'Type':'Bragg-Brentano','Shift':[0.,False],'Transparency':[0.,False],
'SurfRoughA':[0.,False],'SurfRoughB':[0.,False]})
Iparm1, Iparm2 = G2fil.ReadPowderInstprm(dI.defaultIparms[0],1,1,rd)
rd.idstring = ' CW'
simType = 'CW'
# set wavelength
if Lam2:
Iparm1['Lam1'][0] = Lam1
Iparm1['Lam2'][0] = Lam2
Iparm1['Lam1'][1] = Lam1
Iparm1['Lam2'][1] = Lam2
else:
Iparm1['Lam'] = Iparm1['Lam1']
del Iparm1['Lam1'],Iparm1['Lam2']
Iparm1['Lam'][0] = Lam1
Iparm1['Lam'][1] = Lam1
rd.powderdata = [
np.array(ttArr), # x-axis values
np.array(intArr), # powder pattern intensities
np.ones_like(ttArr), # 1/sig(intensity)^2 values (weights)
np.zeros_like(intArr), # calc. intensities (zero)
np.zeros_like(ttArr), # calc. background (zero)
np.zeros_like(ttArr), # obs-calc profiles
]
Tmin = rd.powderdata[0][0]
Tmax = rd.powderdata[0][-1]
# data are read, now store them in the tree
HistName = 'PWDR '+HistName
HistName = G2obj.MakeUniqueLabel(HistName,PWDRlist) # make new histogram names unique
Id = self.GPXtree.AppendItem(parent=self.root,text=HistName)
Ymin = np.min(rd.powderdata[1])
Ymax = np.max(rd.powderdata[1])
valuesdict = {
'wtFactor':1.0,
'Dummy':True,'simType':simType,
'ranId':ran.randint(0,sys.maxsize),
'Offset':[0.0,0.0],'delOffset':0.02*Ymax,'refOffset':-.1*Ymax,'refDelt':0.1*Ymax,
'Yminmax':[Ymin,Ymax]
}
self.GPXtree.SetItemPyData(Id,[valuesdict,rd.powderdata])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Comments'),
rd.comments)
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Limits'),
[(Tmin,Tmax),[Tmin,Tmax]])
self.PatternId = GetGPXtreeItemId(self,Id,'Limits')
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Background'),
[['chebyschev-1',True,3,1.0,0.0,0.0],
{'nDebye':0,'debyeTerms':[],'nPeaks':0,'peaksList':[],'background PWDR':['',1.0,False]}])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Instrument Parameters'),
[Iparm1,Iparm2])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Sample Parameters'),
rd.Sample)
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Peak List')
,{'peaks':[],'sigDict':{}})
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Index Peak List'),
[[],[]])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Unit Cells List'),
[])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Reflection Lists'),
{})
self.GPXtree.Expand(Id)
self.GPXtree.SelectItem(Id)
print(u'Added simulation powder data {}'.format(HistName))
return Id
def OnPreferences(self,event):
'Edit the GSAS-II configuration variables'
dlg = G2G.SelectConfigSetting(self)
dlg.ShowModal() == wx.ID_OK
dlg.Destroy()
def EditProxyInfo(self,event):
'''Edit the proxy information used by subversion
'''
h,p,e = host,port,etc = GSASIIpath.getsvnProxy()
labels = ['Proxy address','proxy port']
values = [host,port]
i = 1
for item in etc:
i += 1
labels.append('extra svn arg #'+str(i))
values.append(item)
msg = '''This dialog allows customization of the subversion (svn)
command. If a proxy server is needed, the address/host and port
can be added supplied here. This will generate command-line options
--config-option servers:global:http-proxy-host=*host*
--config-option servers:global:http-proxy-port=*port*
Additional subversion command line options can be supplied here
by pressing the '+' button. As examples of options that might be of
value, use two extra lines to add:
--config-dir
DIR
to specify an alternate configuration location.
Or, use four extra lines to add
--config-option
servers:global:http-proxy-username=*account*
--config-option
servers:global:http-proxy-password=*password*
to specify a proxy user name and password.
Note that strings marked *value* are items that will be configured
by the user. See http://svnbook.red-bean.com for more information on
subversion.
'''
dlg = G2G.MultiStringDialog(self,'Enter proxy values',
labels,values,size=300,addRows=True,hlp=msg)
if dlg.Show():
values = dlg.GetValues()
h,p = values[:2]
e = values[2:]
dlg.Destroy()
if h != host or p != port or etc != e:
localproxy = proxyinfo = os.path.join(
os.path.expanduser('~/.G2local/'),
"proxyinfo.txt")
if not os.path.exists(proxyinfo):
proxyinfo = os.path.join(GSASIIpath.path2GSAS2,"proxyinfo.txt")
GSASIIpath.setsvnProxy(h,p,e)
if not h.strip() and not e:
if os.path.exists(localproxy): os.remove(localproxy)
if os.path.exists(proxyinfo): os.remove(proxyinfo)
return
try:
fp = open(proxyinfo,'w')
except:
fp = open(localproxy,'w')
proxyinfo = localproxy
try:
fp.write(h.strip()+'\n')
fp.write(p.strip()+'\n')
for i in e:
if i.strip():
fp.write(i.strip()+'\n')
fp.close()
except Exception as err:
print('Error writing file {}:\n{}'.format(proxyinfo,err))
print('File {} written'.format(proxyinfo))
def _Add_ImportMenu_smallangle(self,parent):
'''configure the Small Angle Data menus accord to the readers found in _init_Imports
'''
submenu = wx.Menu()
item = parent.AppendSubMenu(submenu,'Small Angle Data','Import small angle data')
for reader in self.ImportSmallAngleReaderlist:
item = submenu.Append(wx.ID_ANY,u'from '+reader.formatName+u' file',reader.longFormatName)
self.ImportMenuId[item.GetId()] = reader
self.Bind(wx.EVT_MENU, self.OnImportSmallAngle, id=item.GetId())
# item = submenu.Append(wx.ID_ANY,
# help='Import small angle data, use file to try to determine format',
# kind=wx.ITEM_NORMAL,text='guess format from file')
# self.Bind(wx.EVT_MENU, self.OnImportSmallAngle, id=item.GetId())
def OnImportSmallAngle(self,event):
'''Called in response to an Import/Small Angle Data/... menu item
to read a small angle diffraction data set.
dict self.ImportMenuId is used to look up the specific
reader item associated with the menu item, which will be
None for the last menu item, which is the "guess" option
where all appropriate formats will be tried.
'''
def GetSASDIparm(reader):
parm = reader.instdict
Iparm = {'Type':[parm['type'],parm['type'],0],'Lam':[parm['wave'],
parm['wave'],0],'Azimuth':[0.,0.,0]}
return Iparm,{}
# get a list of existing histograms
SASDlist = []
if self.GPXtree.GetCount():
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
if name.startswith('SASD ') and name not in SASDlist:
SASDlist.append(name)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
# look up which format was requested
reqrdr = self.ImportMenuId.get(event.GetId())
rdlist = self.OnImportGeneric(
reqrdr,self.ImportSmallAngleReaderlist,'Small Angle Data',multiple=True)
if len(rdlist) == 0: return
self.CheckNotebook()
newHistList = []
self.EnablePlot = False
for rd in rdlist:
HistName = rd.idstring
HistName = 'SASD '+HistName
# make new histogram names unique
HistName = G2obj.MakeUniqueLabel(HistName,SASDlist)
print ('Read small angle data '+HistName+ \
' from file '+self.lastimport)
# data are read, now store them in the tree
Id = self.GPXtree.AppendItem(parent=self.root,text=HistName)
Iparm1,Iparm2 = GetSASDIparm(rd)
# if 'T' in Iparm1['Type'][0]:
# if not rd.clockWd and rd.GSAS:
# rd.powderdata[0] *= 100. #put back the CW centideg correction
# cw = np.diff(rd.powderdata[0])
# rd.powderdata[0] = rd.powderdata[0][:-1]+cw/2.
# rd.powderdata[1] = rd.powderdata[1][:-1]/cw
# rd.powderdata[2] = rd.powderdata[2][:-1]*cw**2 #1/var=w at this point
# if 'Itype' in Iparm2:
# Ibeg = np.searchsorted(rd.powderdata[0],Iparm2['Tminmax'][0])
# Ifin = np.searchsorted(rd.powderdata[0],Iparm2['Tminmax'][1])
# rd.powderdata[0] = rd.powderdata[0][Ibeg:Ifin]
# YI,WYI = G2pwd.calcIncident(Iparm2,rd.powderdata[0])
# rd.powderdata[1] = rd.powderdata[1][Ibeg:Ifin]/YI
# var = 1./rd.powderdata[2][Ibeg:Ifin]
# var += WYI*rd.powderdata[1]**2
# var /= YI**2
# rd.powderdata[2] = 1./var
# rd.powderdata[3] = np.zeros_like(rd.powderdata[0])
# rd.powderdata[4] = np.zeros_like(rd.powderdata[0])
# rd.powderdata[5] = np.zeros_like(rd.powderdata[0])
Tmin = min(rd.smallangledata[0])
Tmax = max(rd.smallangledata[0])
valuesdict = {
'wtFactor':1.0,
'Dummy':False,
'ranId':ran.randint(0,sys.maxsize),
'Offset':[0.0,0.0],
}
rd.Sample['ranId'] = valuesdict['ranId'] # this should be removed someday
self.GPXtree.SetItemPyData(Id,[valuesdict,rd.smallangledata])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Comments'),
rd.comments)
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Limits'),
[(Tmin,Tmax),[Tmin,Tmax]])
self.PatternId = GetGPXtreeItemId(self,Id,'Limits')
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Instrument Parameters'),
[Iparm1,Iparm2])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Substances'),G2pdG.SetDefaultSubstances())
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Sample Parameters'),
rd.Sample)
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Models'),G2pdG.SetDefaultSASDModel())
newHistList.append(HistName)
else:
self.EnablePlot = True
self.GPXtree.Expand(Id)
self.GPXtree.SelectItem(Id)
if not newHistList: return # somehow, no new histograms
return # success
def _Add_ImportMenu_reflectometry(self,parent):
'''configure the reflectometry Data menus accord to the readers found in _init_Imports
'''
submenu = wx.Menu()
item = parent.AppendSubMenu(submenu,'Reflectometry Data','Import reflectometry data')
for reader in self.ImportReflectometryReaderlist:
item = submenu.Append(wx.ID_ANY,u'from '+reader.formatName+u' file',reader.longFormatName)
self.ImportMenuId[item.GetId()] = reader
self.Bind(wx.EVT_MENU, self.OnImportReflectometry, id=item.GetId())
# item = submenu.Append(wx.ID_ANY,
# help='Import reflectometry data, use file to try to determine format',
# kind=wx.ITEM_NORMAL,text='guess format from file')
# self.Bind(wx.EVT_MENU, self.OnImportReflectometry, id=item.GetId())
def OnImportReflectometry(self,event):
'''Called in response to an Import/Reflectometry Data/... menu item
to read a reflectometry data set.
dict self.ImportMenuId is used to look up the specific
reader item associated with the menu item, which will be
None for the last menu item, which is the "guess" option
where all appropriate formats will be tried.
'''
def GetREFDIparm(reader):
parm = reader.instdict
Iparm = {'Type':[parm['type'],parm['type'],0],'Lam':[parm['wave'],
parm['wave'],0],'Azimuth':[0.,0.,0]}
return Iparm,{}
# get a list of existing histograms
REFDlist = []
if self.GPXtree.GetCount():
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
if name.startswith('REFD ') and name not in REFDlist:
REFDlist.append(name)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
# look up which format was requested
reqrdr = self.ImportMenuId.get(event.GetId())
rdlist = self.OnImportGeneric(
reqrdr,self.ImportReflectometryReaderlist,'Reflectometry Data',multiple=True)
if len(rdlist) == 0: return
self.CheckNotebook()
newHistList = []
self.EnablePlot = False
for rd in rdlist:
HistName = rd.idstring
HistName = 'REFD '+HistName
# make new histogram names unique
HistName = G2obj.MakeUniqueLabel(HistName,REFDlist)
print ('Read reflectometry data '+HistName+ \
' from file '+self.lastimport)
# data are read, now store them in the tree
Id = self.GPXtree.AppendItem(parent=self.root,text=HistName)
Iparm1,Iparm2 = GetREFDIparm(rd)
# if 'T' in Iparm1['Type'][0]:
# if not rd.clockWd and rd.GSAS:
# rd.powderdata[0] *= 100. #put back the CW centideg correction
# cw = np.diff(rd.powderdata[0])
# rd.powderdata[0] = rd.powderdata[0][:-1]+cw/2.
# rd.powderdata[1] = rd.powderdata[1][:-1]/cw
# rd.powderdata[2] = rd.powderdata[2][:-1]*cw**2 #1/var=w at this point
# if 'Itype' in Iparm2:
# Ibeg = np.searchsorted(rd.powderdata[0],Iparm2['Tminmax'][0])
# Ifin = np.searchsorted(rd.powderdata[0],Iparm2['Tminmax'][1])
# rd.powderdata[0] = rd.powderdata[0][Ibeg:Ifin]
# YI,WYI = G2pwd.calcIncident(Iparm2,rd.powderdata[0])
# rd.powderdata[1] = rd.powderdata[1][Ibeg:Ifin]/YI
# var = 1./rd.powderdata[2][Ibeg:Ifin]
# var += WYI*rd.powderdata[1]**2
# var /= YI**2
# rd.powderdata[2] = 1./var
# rd.powderdata[3] = np.zeros_like(rd.powderdata[0])
# rd.powderdata[4] = np.zeros_like(rd.powderdata[0])
# rd.powderdata[5] = np.zeros_like(rd.powderdata[0])
Tmin = min(rd.reflectometrydata[0])
Tmax = max(rd.reflectometrydata[0])
ifDQ = np.any(rd.reflectometrydata[5])
valuesdict = {
'wtFactor':1.0,
'Dummy':False,
'ranId':ran.randint(0,sys.maxsize),
'Offset':[0.0,0.0],
'ifDQ':ifDQ
}
rd.Sample['ranId'] = valuesdict['ranId'] # this should be removed someday
self.GPXtree.SetItemPyData(Id,[valuesdict,rd.reflectometrydata])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Comments'),
rd.comments)
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Limits'),
[(Tmin,Tmax),[Tmin,Tmax]])
self.PatternId = GetGPXtreeItemId(self,Id,'Limits')
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Instrument Parameters'),
[Iparm1,Iparm2])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Substances'),G2pdG.SetDefaultSubstances())
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Sample Parameters'),
rd.Sample)
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Models'),G2pdG.SetDefaultREFDModel())
newHistList.append(HistName)
else:
self.EnablePlot = True
self.GPXtree.Expand(Id)
self.GPXtree.SelectItem(Id)
if not newHistList: return # somehow, no new histograms
return # success
def _Add_ImportMenu_PDF(self,parent):
'''configure the PDF Data menus accord to the readers found in _init_Imports
'''
submenu = wx.Menu()
item = parent.AppendSubMenu(submenu,'PDF G(R) Data','Import PDF G(R) data')
for reader in self.ImportPDFReaderlist:
item = submenu.Append(wx.ID_ANY,u'from '+reader.formatName+u' file',reader.longFormatName)
self.ImportMenuId[item.GetId()] = reader
self.Bind(wx.EVT_MENU, self.OnImportPDF, id=item.GetId())
submenu.AppendSeparator()
item = submenu.Append(wx.ID_ANY,'Auto Import','Import PDF files as found')
def OnAutoImport(event):
G2G.AutoLoadFiles(self,FileTyp='gr')
self.Bind(wx.EVT_MENU, OnAutoImport, id=item.GetId())
# item = submenu.Append(wx.ID_ANY,
# help='Import reflectometry data, use file to try to determine format',
# kind=wx.ITEM_NORMAL,text='guess format from file')
# self.Bind(wx.EVT_MENU, self.OnImportReflectometry, id=item.GetId())
def OnImportPDF(self,event):
'''Called in response to an Import/PDF G(R) Data/... menu item
to read a PDF G(R) data set.
dict self.ImportMenuId is used to look up the specific
reader item associated with the menu item, which will be
None for the last menu item, which is the "guess" option
where all appropriate formats will be tried.
'''
# get a list of existing histograms
PDFlist = []
if self.GPXtree.GetCount():
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
if name.startswith('PDF ') and name not in PDFlist:
PDFlist.append(name)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
# look up which format was requested
reqrdr = self.ImportMenuId.get(event.GetId())
rdlist = self.OnImportGeneric(
reqrdr,self.ImportPDFReaderlist,'PDF G(R) Data',multiple=True)
if len(rdlist) == 0: return
self.CheckNotebook()
newHistList = []
self.EnablePlot = False
for rd in rdlist:
HistName = rd.idstring
HistName = 'PDF '+HistName
# make new histogram names unique
HistName = G2obj.MakeUniqueLabel(HistName,PDFlist)
print ('Read PDF G(R) data '+HistName+ \
' from file '+self.lastimport)
# data are read, now store them in the tree
Id = self.GPXtree.AppendItem(self.root,text=HistName)
Ymin = np.min(rd.pdfdata[1])
Ymax = np.max(rd.pdfdata[1])
valuesdict = {
'wtFactor':1.0,'Dummy':False,'ranId':ran.randint(0,sys.maxsize),
'Offset':[0.0,0.0],'delOffset':0.02*Ymax,
'Yminmax':[Ymin,Ymax],
}
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='PDF Controls'),
{'G(R)':[valuesdict,rd.pdfdata,HistName],
'diffGRname':'','diffMult':1.0,'Rmax':Ymax,})
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='PDF Peaks'),
{'Limits':[1.,5.],'Background':[2,[0.,-0.2*np.pi],False],'Peaks':[]})
else:
self.EnablePlot = True
self.GPXtree.Expand(Id)
self.GPXtree.SelectItem(Id)
if not newHistList: return # somehow, no new histograms
return # success
def AddToNotebook(self,text):
Id = GetGPXtreeItemId(self,self.root,'Notebook')
data = self.GPXtree.GetItemPyData(Id)
data.append('Notebook entry @ %s: %s'%(time.ctime(),text))
###############################################################################
#Command logging
###############################################################################
def OnMacroRecordStatus(self,event,setvalue=None):
'''Called when the record macro menu item is used which toggles the
value. Alternately a value to be set can be provided. Note that this
routine is made more complex because on the Mac there are lots of menu
items (listed in self.MacroStatusList) and this loops over all of them.
'''
nextvalue = log.ShowLogStatus() != True
if setvalue is not None:
nextvalue = setvalue
if nextvalue:
log.LogOn()
set2 = True
else:
log.LogOff()
set2 = False
for menuitem in self.MacroStatusList:
menuitem.Check(set2)
def _init_Macro(self):
'''Define the items in the macro menu.
'''
menu = self.MacroMenu
item = menu.Append(
help='Start or stop recording of menu actions, etc.', id=wx.ID_ANY,
kind=wx.ITEM_CHECK,text='Record actions')
self.MacroStatusList.append(item)
item.Check(log.ShowLogStatus())
self.Bind(wx.EVT_MENU, self.OnMacroRecordStatus, item)
# this may only be of value for development work
item = menu.Append(
help='Show logged commands', id=wx.ID_ANY,
kind=wx.ITEM_NORMAL,text='Show log')
def OnShowLog(event):
print (70*'=')
print ('List of logged actions')
for i,line in enumerate(log.G2logList):
if line: print ('%d %s'%(i,line))
print (70*'=')
self.Bind(wx.EVT_MENU, OnShowLog, item)
item = menu.Append(
help='Clear logged commands', id=wx.ID_ANY,
kind=wx.ITEM_NORMAL,text='Clear log')
def OnClearLog(event): log.G2logList=[None]
self.Bind(wx.EVT_MENU, OnClearLog, item)
item = menu.Append(
help='Save logged commands to file', id=wx.ID_ANY,
kind=wx.ITEM_NORMAL,text='Save log')
def OnSaveLog(event):
defnam = os.path.splitext(os.path.split(self.GSASprojectfile)[1])[0]+'.gcmd'
dlg = wx.FileDialog(self,
'Choose an file to save past actions', '.', defnam,
'GSAS-II cmd output (*.gcmd)|*.gcmd',
wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
dlg.CenterOnParent()
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
# make sure extension is correct
filename = os.path.splitext(filename)[0]+'.gcmd'
else:
filename = None
finally:
dlg.Destroy()
if filename:
fp = open(filename,'wb')
fp.write(str(len(log.G2logList)-1)+'\n')
for item in log.G2logList:
if item: cPickle.dump(item,fp)
fp.close()
self.Bind(wx.EVT_MENU, OnSaveLog, item)
item = menu.Append(
help='Load logged commands from file', id=wx.ID_ANY,
kind=wx.ITEM_NORMAL,text='Load log')
def OnLoadLog(event):
# this appends. Perhaps we should ask to clear?
defnam = os.path.splitext(
os.path.split(self.GSASprojectfile)[1])[0]+'.gcmd'
dlg = wx.FileDialog(self,
'Choose an file to read saved actions', '.', defnam,
'GSAS-II cmd output (*.gcmd)|*.gcmd',
wx.FD_OPEN)
dlg.CenterOnParent()
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
# make sure extension is correct
filename = os.path.splitext(filename)[0]+'.gcmd'
else:
filename = None
finally:
dlg.Destroy()
if filename and os.path.exists(filename):
fp = open(filename,'rb')
lines = fp.readline()
for i in range(int(lines)):
log.G2logList.append(cPickle.load(fp))
fp.close()
self.Bind(wx.EVT_MENU, OnLoadLog, item)
item = menu.Append(
help='Replay saved commands', id=wx.ID_ANY,
kind=wx.ITEM_NORMAL,text='Replay log')
self.Bind(wx.EVT_MENU, log.ReplayLog, item)
# End of logging ##############################################################
def _init_Exports(self,menu):
'''Find exporter routines and add them into menus
'''
# set up the top-level menus
projectmenu = wx.Menu()
item = menu.AppendSubMenu(projectmenu,'Entire project as','Export entire project')
self.ExportNonSeq.append([menu,item.Id])
phasemenu = wx.Menu()
item = menu.AppendSubMenu(phasemenu,'Phase as','Export phase or sometimes phases')
powdermenu = wx.Menu()
item = menu.AppendSubMenu(powdermenu,'Powder data as','Export powder diffraction histogram(s)')
sasdmenu = wx.Menu()
item = menu.AppendSubMenu(sasdmenu,'Small angle data as','Export small angle histogram(s)')
refdmenu = wx.Menu()
item = menu.AppendSubMenu(refdmenu,'Reflectometry data as','Export reflectometry histogram(s)')
singlemenu = wx.Menu()
item = menu.AppendSubMenu(singlemenu,'Single crystal data as','Export single crystal histogram(s)')
imagemenu = wx.Menu()
item = menu.AppendSubMenu(imagemenu,'Image data as','Export powder image(s) data')
mapmenu = wx.Menu()
item = menu.AppendSubMenu(mapmenu,'Maps as','Export density map(s)')
# sequential exports are handled differently; N.B. enabled in testSeqRefineMode
seqPhasemenu = wx.Menu()
item = menu.AppendSubMenu(seqPhasemenu,'Sequential phases','Export phases from sequential fit')
self.ExportSeq.append([menu,item.Id])
seqHistmenu = wx.Menu()
item = menu.AppendSubMenu(seqHistmenu,'Sequential histograms','Export histograms from sequential fit')
self.ExportSeq.append([menu,item.Id])
# find all the exporter files
if not self.exporterlist: # this only needs to be done once
self.exporterlist = G2fil.LoadExportRoutines(self)
# Add submenu item(s) for each Exporter by its self-declared type (can be more than one)
for obj in self.exporterlist:
#print 'exporter',obj
for typ in obj.exporttype:
if typ == "project":
submenu = projectmenu
elif typ == "phase":
submenu = phasemenu
elif typ == "powder":
submenu = powdermenu
elif typ == "single":
submenu = singlemenu
elif typ == "image":
submenu = imagemenu
elif typ == "map":
submenu = mapmenu
elif typ == "sasd":
submenu = sasdmenu
elif typ == "refd":
submenu = refdmenu
# elif typ == "pdf":
# submenu = pdfmenu
else:
print("Error, unknown type in "+str(obj))
break
item = submenu.Append(wx.ID_ANY,obj.formatName,obj.longFormatName)
self.Bind(wx.EVT_MENU, obj.Exporter, id=item.GetId())
self.ExportLookup[item.GetId()] = typ # lookup table for submenu item
for lbl,submenu in (('Phase',seqPhasemenu),
('Powder',seqHistmenu),
):
if lbl.lower() in obj.exporttype:
try:
obj.Writer
except AttributeError:
continue
# define a unique event handler for this menu item
def seqMenuItemEventHandler(event,obj=obj,typ=lbl):
'This handler has the needed exporter/type embedded'
# lookup sequential table
Id = GetGPXtreeItemId(self,self.root,'Sequential results')
if not Id:
print('Error in Seq seqMenuItemEventHandler for ',typ,'without Seq Res table')
return
data = self.GPXtree.GetItemPyData(Id)
G2IO.ExportSequential(self,data,obj,typ)
if '2' in platform.python_version_tuple()[0]:
if 'mode' in inspect.getargspec(obj.Writer)[0]:
item = submenu.Append(wx.ID_ANY,obj.formatName,obj.longFormatName)
self.Bind(wx.EVT_MENU, seqMenuItemEventHandler, item)
else:
if 'mode' in inspect.getfullargspec(obj.Writer)[0]:
item = submenu.Append(wx.ID_ANY,obj.formatName,obj.longFormatName)
self.Bind(wx.EVT_MENU, seqMenuItemEventHandler, item)
# self.SeqExportLookup[item.GetId()] = (obj,lbl) # lookup table for submenu item
# Bind is in UpdateSeqResults
item = imagemenu.Append(wx.ID_ANY,'Multiple image controls and masks',
'Export image controls and masks for multiple images')
self.Bind(wx.EVT_MENU, self.OnSaveMultipleImg, id=item.GetId())
#code to debug an Exporter. hard-code the routine below, to allow a reload before use
# def DebugExport(event):
# print 'start reload'
# reload(G2IO)
# import G2export_pwdr as dev
# reload(dev)
# dev.ExportPowderFXYE(self).Exporter(event)
# item = menu.Append(
# wx.ID_ANY,kind=wx.ITEM_NORMAL,
# help="debug exporter",text="test Export FXYE")
# self.Bind(wx.EVT_MENU, DebugExport, id=item.GetId())
# # #self.ExportLookup[item.GetId()] = 'image'
# self.ExportLookup[item.GetId()] = 'powder'
###############################################################################
# Exporters
###############################################################################
def _Add_ExportMenuItems(self,parent):
# item = parent.Append(
# help='Select PWDR item to enable',id=wx.ID_ANY,
# kind=wx.ITEM_NORMAL,
# text='Export Powder Patterns...')
# self.ExportPattern.append(item)
# item.Enable(False)
# self.Bind(wx.EVT_MENU, self.OnExportPatterns, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Export All Peak Lists...','')
self.ExportPeakList.append(item)
item.Enable(True)
self.Bind(wx.EVT_MENU, self.OnExportPeakList, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Export HKLs...','')
self.ExportHKL.append(item)
self.Bind(wx.EVT_MENU, self.OnExportHKL, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Export PDF...','Select PDF item to enable')
self.ExportPDF.append(item)
item.Enable(False)
self.Bind(wx.EVT_MENU, self.OnExportPDF, id=item.GetId())
def FillMainMenu(self,menubar,addhelp=True):
'''Define contents of the main GSAS-II menu for the (main) data tree window.
For the mac, this is also called for the data item windows as well so that
the main menu items are data menu as well.
'''
File = wx.Menu(title='')
menubar.Append(menu=File, title='&File')
self._Add_FileMenuItems(File)
Data = wx.Menu(title='')
menubar.Append(menu=Data, title='Data')
self._Add_DataMenuItems(Data)
Calculate = wx.Menu(title='')
menubar.Append(menu=Calculate, title='&Calculate')
self._Add_CalculateMenuItems(Calculate)
Import = wx.Menu(title='')
menubar.Append(menu=Import, title='Import')
self._Add_ImportMenu_Image(Import)
self._Add_ImportMenu_Phase(Import)
self._Add_ImportMenu_powder(Import)
self._Add_ImportMenu_Sfact(Import)
self._Add_ImportMenu_smallangle(Import)
self._Add_ImportMenu_reflectometry(Import)
self._Add_ImportMenu_PDF(Import)
item = Import.Append(wx.ID_ANY,'Column metadata test','Test Column (.par) metadata import')
self.Bind(wx.EVT_MENU, self.OnColMetaTest, id=item.GetId())
#======================================================================
# Code to help develop/debug an importer, much is hard-coded below
# but module is reloaded before each use, allowing faster testing
# def DebugImport(event):
# print 'start reload'
# import G2phase_ISO as dev
# reload(dev)
# rd = dev.ISODISTORTPhaseReader()
# self.ImportMenuId[event.GetId()] = rd
# self.OnImportPhase(event)
# or ----------------------------------------------------------------------
#self.OnImportGeneric(rd,[],'test of ISODISTORTPhaseReader')
# special debug code
# or ----------------------------------------------------------------------
# filename = '/Users/toby/projects/branton/subgroup_cif.txt'
# if not rd.ContentsValidator(filename):
# print 'not validated'
# # make a list of used phase ranId's
# phaseRIdList = []
# sub = GetGPXtreeItemId(self,self.root,'Phases')
# if sub:
# item, cookie = self.GPXtree.GetFirstChild(sub)
# while item:
# phaseName = self.GPXtree.GetItemText(item)
# ranId = self.GPXtree.GetItemPyData(item).get('ranId')
# if ranId: phaseRIdList.append(ranId)
# item, cookie = self.GPXtree.GetNextChild(sub, cookie)
# if rd.Reader(filename,usedRanIdList=phaseRIdList):
# print 'read OK'
# item = Import.Append(
# wx.ID_ANY,kind=wx.ITEM_NORMAL,
# help="debug importer",text="test importer")
# self.Bind(wx.EVT_MENU, DebugImport, id=item.GetId())
#======================================================================
self.ExportMenu = wx.Menu(title='')
menubar.Append(menu=self.ExportMenu, title='Export')
self._init_Exports(self.ExportMenu)
self._Add_ExportMenuItems(self.ExportMenu)
if GSASIIpath.GetConfigValue('Enable_logging'):
self.MacroMenu = wx.Menu(title='')
menubar.Append(menu=self.MacroMenu, title='Macro')
self._init_Macro()
if addhelp:
HelpMenu=G2G.MyHelp(self,includeTree=True,
morehelpitems=[('&Tutorials\tCtrl+T','Tutorials'),])
menubar.Append(menu=HelpMenu,title='&Help')
def _init_ctrls(self, parent):
try:
size = GSASIIpath.GetConfigValue('Main_Size')
if type(size) is tuple:
pass
elif type(size) is str:
size = eval(size)
else:
raise Exception
except:
size = wx.Size(700,450)
wx.Frame.__init__(self, name='GSASII', parent=parent,
size=size,style=wx.DEFAULT_FRAME_STYLE, title='GSAS-II main window')
self._init_Imports()
#initialize Menu item objects (these contain lists of menu items that are enabled or disabled)
self.MakePDF = []
self.Refine = []
self.ExportSeq = []
self.ExportNonSeq = []
#self.ExportPattern = []
self.ExportPeakList = []
self.ExportHKL = []
self.ExportPDF = []
self.ExportPhase = []
self.ExportCIF = []
#
self.MacroStatusList = [] # logging
self.Status = self.CreateStatusBar()
self.Status.SetFieldsCount(2)
# Bob: note different ways to display the SplitterWindow. I like the 3d effect on the Mac
# as it makes the splitter bar a bit easier to "grab" -- this might need to be platform selected.
#self.mainPanel = wx.SplitterWindow(self, wx.ID_ANY, style=wx.SP_BORDER|wx.SP_LIVE_UPDATE)
#self.mainPanel = wx.SplitterWindow(self, wx.ID_ANY, style=wx.SP_BORDER|wx.SP_LIVE_UPDATE|wx.SP_3DSASH)
self.mainPanel = wx.SplitterWindow(self, wx.ID_ANY, style=wx.SP_LIVE_UPDATE|wx.SP_3D)
self.mainPanel.SetMinimumPaneSize(100)
self.treePanel = wx.Panel(self.mainPanel, wx.ID_ANY,
style = wx.TAB_TRAVERSAL|wx.SUNKEN_BORDER)
self.dataWindow = G2DataWindow(self.mainPanel)
dataSizer = wx.BoxSizer(wx.VERTICAL)
self.dataWindow.SetSizer(dataSizer)
self.mainPanel.SplitVertically(self.treePanel, self.dataWindow, 200)
self.Status.SetStatusWidths([200,-1]) # make these match?
G2G.wxID_GPXTREE = wx.NewId()
treeSizer = wx.BoxSizer(wx.VERTICAL)
self.treePanel.SetSizer(treeSizer)
self.GPXtree = G2G.G2TreeCtrl(id=G2G.wxID_GPXTREE,
parent=self.treePanel, size=self.treePanel.GetClientSize(),style=wx.TR_DEFAULT_STYLE )
treeSizer.Add(self.GPXtree,1,wx.EXPAND|wx.ALL,0)
self.GPXtree.Bind(wx.EVT_TREE_SEL_CHANGED,self.OnDataTreeSelChanged)
self.GPXtree.Bind(wx.EVT_TREE_ITEM_RIGHT_CLICK,self.OnDataTreeSelChanged)
self.GPXtree.Bind(wx.EVT_TREE_ITEM_COLLAPSED,
self.OnGPXtreeItemCollapsed, id=G2G.wxID_GPXTREE)
self.GPXtree.Bind(wx.EVT_TREE_ITEM_EXPANDED,
self.OnGPXtreeItemExpanded, id=G2G.wxID_GPXTREE)
self.GPXtree.Bind(wx.EVT_TREE_DELETE_ITEM,
self.OnGPXtreeItemDelete, id=G2G.wxID_GPXTREE)
self.GPXtree.Bind(wx.EVT_TREE_KEY_DOWN,
self.OnGPXtreeKeyDown, id=G2G.wxID_GPXTREE)
self.GPXtree.Bind(wx.EVT_TREE_BEGIN_RDRAG,
self.OnGPXtreeBeginRDrag, id=G2G.wxID_GPXTREE)
self.GPXtree.Bind(wx.EVT_TREE_END_DRAG,
self.OnGPXtreeEndDrag, id=G2G.wxID_GPXTREE)
self.root = self.GPXtree.root
try:
size = GSASIIpath.GetConfigValue('Plot_Size')
if type(size) is tuple:
pass
elif type(size) is str:
size = eval(size)
else:
raise Exception
except:
size = wx.Size(700,600)
self.plotFrame = wx.Frame(None,-1,'GSASII Plots',size=size,
style=wx.DEFAULT_FRAME_STYLE ^ wx.CLOSE_BOX)
self.G2plotNB = G2plt.G2PlotNoteBook(self.plotFrame,G2frame=self)
self.plotFrame.Show()
for win,var in ((self,'Main_Pos'),(self.plotFrame,'Plot_Pos')):
try:
pos = GSASIIpath.GetConfigValue(var)
if type(pos) is str: pos = eval(pos)
win.SetPosition(pos)
if GetDisplay(pos) is None: win.Center()
except:
if GSASIIpath.GetConfigValue(var):
print('Value for config {} {} is invalid'.format(var,GSASIIpath.GetConfigValue(var)))
win.Center()
################################################################################
#### init_vars
################################################################################
def init_vars(self):
# initialize default values for GSAS-II "global" variables (saved in main Frame)
self.oldFocus = None
self.undofile = ''
self.TreeItemDelete = False
self.Weight = False
self.IfPlot = False
self.DDShowAll = False
self.atmSel = ''
self.PatternId = 0
self.PickId = 0
self.PickIdText = None
self.PeakTable = []
self.LimitsTable = []
self.ifX20 = True #use M20 /= (1+X20) in powder indexing, etc.
self.HKL = []
self.Lines = [] # lines used for data limits & excluded regions
self.MagLines = [] # lines used for plot magnification
self.itemPicked = None
self.Interpolate = 'nearest'
self.ContourColor = GSASIIpath.GetConfigValue('Contour_color','Paired')
self.VcovColor = 'RdYlGn'
self.RamaColor = 'Blues'
self.Projection = 'equal area'
self.logPlot = False
self.plusPlot = True
self.ErrorBars = False
self.Contour = False
self.TforYaxis = False
self.Legend = False
self.SinglePlot = True
self.Waterfall = False
self.selections= None
self.PDFselections = None
self.SubBack = False
self.seqReverse = False
self.seqLines = True #draw lines between points
self.plotView = 0
self.Image = 0
self.oldImagefile = '' # the name of the last image file read
self.oldImageTag = None # the name of the tag for multi-image files
self.PauseIntegration = False
self.ImageZ = []
self.Integrate = 0
self.imageDefault = {}
self.IntgOutList = [] # list of integration tree item Ids created in G2IO.SaveIntegration
self.AutointPWDRnames = [] # list of autoint created PWDR tree item names (to be deleted on a reset)
self.autoIntFrame = None
self.IntegratedList = [] # list of already integrated IMG tree items
self.Sngl = False
self.ifGetRing = False
self.MaskKey = '' #trigger for making image masks
self.MskDelete = False #trigger for mask delete
self.StrainKey = '' #ditto for new strain d-zeros
self.EnablePlot = True
self.hist = '' # selected histogram in Phase/Data tab
self.dataDisplayPhaseText = ''
self.lastTreeSetting = [] # used to track the selected Tree item before a refinement
self.ExpandingAll = False
self.SeqTblHideList = None
self.newGPXfile = ''
self.lastSelectedPhaseTab = None # track the last tab pressed on a phase window
self.testRBObjSizers = {} #rigid body sizer datafile contents
self.RMCchoice = 'RMCProfile'
def __init__(self, parent):
self.ExportLookup = {}
self.exporterlist = []
self._init_ctrls(parent)
self.Image = wx.Image(
os.path.join(GSASIIpath.path2GSAS2,'gsas2.ico'),
wx.BITMAP_TYPE_ICO)
if "wxMSW" in wx.PlatformInfo:
img = self.Image.Scale(16, 16).ConvertToBitmap()
elif "wxGTK" in wx.PlatformInfo:
img = self.Image.Scale(22, 22).ConvertToBitmap()
else:
img = self.Image.ConvertToBitmap()
if 'phoenix' in wx.version():
self.SetIcon(wx.Icon(img))
else:
self.SetIcon(wx.IconFromBitmap(img))
self.Bind(wx.EVT_CLOSE, self.ExitMain)
self.GSASprojectfile = ''
self.dirname = os.path.abspath(os.path.expanduser('~')) #start in the users home directory by default; may be meaningless
self.TutorialImportDir = None # location to read tutorial files, set when a tutorial is viewed
self.LastImportDir = None # last-used directory where an import was done
self.LastGPXdir = None # directory where a GPX file was last read
self.LastExportDir = None # the last directory used for exports, if any.
self.dataDisplay = None
self.init_vars()
arg = sys.argv
if len(arg) > 1 and arg[1]:
try:
self.GSASprojectfile = os.path.splitext(arg[1])[0]+u'.gpx'
except:
self.GSASprojectfile = os.path.splitext(arg[1])[0]+'.gpx'
self.dirname = os.path.abspath(os.path.dirname(arg[1]))
if self.dirname:
self.GSASprojectfile = os.path.split(self.GSASprojectfile)[1]
os.chdir(self.dirname)
self.LastGPXdir = self.dirname
try:
#open the file if possible
if sys.platform == "darwin": # on Mac delay a bit so GUI can open
wx.CallAfter(self.StartProject)
else:
self.StartProject()
return
except Exception:
print ('Error opening or reading file'+arg[1])
import traceback
print (traceback.format_exc())
if GSASIIpath.GetConfigValue('Starting_directory'):
try:
pth = GSASIIpath.GetConfigValue('Starting_directory')
pth = os.path.expanduser(pth)
os.chdir(pth)
self.LastGPXdir = pth
except:
print('Ignoring Config Starting_directory value: '+
GSASIIpath.GetConfigValue('Starting_directory'))
def GetTreeItemsList(self,item):
return self.GPXtree._getTreeItemsList(item)
# def OnSize(self,event):
# 'Called to make GPXtree fill mainPanel'
# print 'OnSize'
# event.Skip()
# w,h = self.GetClientSizeTuple()
# self.dataWindow.SetupScrolling()
# self.mainPanel.SetSize(wx.Size(w,h))
# self.GPXtree.SetSize(wx.Size(w,h))
# self.dataWindow.SetSize(self.dataPanel.GetClientSize())
def SetDataSize(self):
'''this routine is a placeholder until all G2frame.SetDataSize calls are replaced
by G2frame.dataWindow.SetDataSize
'''
# TOTO: diagnostic patch
print ('G2frame.SetDataSize called rather than dataWindow.SetDataSize')
G2obj.HowDidIgetHere(True)
self.dataWindow.SetDataSize()
def OnDataTreeSelChanged(self, event):
'''Called when a data tree item is selected. May be called on item deletion as well.
'''
if self.TreeItemDelete:
self.TreeItemDelete = False
else:
if self.ExpandingAll:
if GSASIIpath.GetConfigValue('debug'): print('Skipping Tree selection due to ExpandAll')
return
pltNum = self.G2plotNB.nb.GetSelection()
if pltNum >= 0: #to avoid the startup with no plot!
self.G2plotNB.nb.GetPage(pltNum)
item = event.GetItem()
wx.CallAfter(SelectDataTreeItem,self,item,self.oldFocus)
#if self.oldFocus: # now done via last parameter on SelectDataTreeItem
# wx.CallAfter(self.oldFocus.SetFocus)
def OnGPXtreeItemCollapsed(self, event):
'Called when a tree item is collapsed - all children will be collapsed'
self.GPXtree.CollapseAllChildren(event.GetItem())
def OnGPXtreeItemExpanded(self, event):
'Called when a tree item is expanded'
event.Skip()
def OnGPXtreeItemDelete(self, event):
'Called when a tree item is deleted, inhibit the next tree item selection action'
self.TreeItemDelete = True
def OnGPXtreeItemActivated(self, event):
'Called when a tree item is activated'
event.Skip()
def OnGPXtreeBeginRDrag(self,event):
event.Allow()
self.BeginDragId = event.GetItem()
self.ParentId = self.GPXtree.GetItemParent(self.BeginDragId)
DragText = self.GPXtree.GetItemText(self.BeginDragId)
self.DragData = [[DragText,self.GPXtree.GetItemPyData(self.BeginDragId)],]
item, cookie = self.GPXtree.GetFirstChild(self.BeginDragId)
while item: #G2 data tree has no sub children under a child of a tree item
name = self.GPXtree.GetItemText(item)
self.DragData.append([name,self.GPXtree.GetItemPyData(item)])
item, cookie = self.GPXtree.GetNextChild(self.BeginDragId, cookie)
def OnGPXtreeEndDrag(self,event):
event.Allow()
self.EndDragId = event.GetItem()
try:
NewParent = self.GPXtree.GetItemParent(self.EndDragId)
except:
self.EndDragId = self.GPXtree.GetLastChild(self.root)
NewParent = self.root
if self.ParentId != NewParent:
self.ErrorDialog('Drag not allowed','Wrong parent for item dragged')
else:
Name,Item = self.DragData[0]
NewId = self.GPXtree.InsertItem(self.ParentId,self.EndDragId,Name,data=None)
self.GPXtree.SetItemPyData(NewId,Item)
for name,item in self.DragData[1:]: #loop over children
Id = self.GPXtree.AppendItem(parent=NewId,text=name)
self.GPXtree.SetItemPyData(Id,item)
self.GPXtree.Delete(self.BeginDragId)
SelectDataTreeItem(self,NewId)
def OnGPXtreeKeyDown(self,event): #doesn't exactly work right with Shift key down
'Allows stepping through the tree with the up/down arrow keys'
self.oldFocus = wx.Window.FindFocus()
keyevt = event.GetKeyEvent()
key = event.GetKeyCode()
item = self.GPXtree.GetSelection()
if type(item) is int: return # is this the toplevel in tree?
name = self.GPXtree.GetItemText(item)
parent = self.GPXtree.GetItemParent(item)
if key == wx.WXK_UP:
if keyevt.GetModifiers() == wx.MOD_SHIFT and parent != self.root:
if type(parent) is int: return # is this the toplevel in tree?
prev = self.GPXtree.GetPrevSibling(parent)
NewId = GetGPXtreeItemId(self,prev,name)
if NewId:
self.GPXtree.Collapse(parent)
self.GPXtree.Expand(prev)
self.oldFocus = wx.Window.FindFocus()
wx.CallAfter(self.GPXtree.SelectItem,NewId)
else:
wx.CallAfter(self.GPXtree.SelectItem,item)
elif sys.platform == "win32":
self.GPXtree.GetPrevSibling(item)
self.GPXtree.SelectItem(item)
else:
item = self.GPXtree.GetPrevSibling(item)
if item.IsOk(): self.GPXtree.SelectItem(item)
elif key == wx.WXK_DOWN:
if keyevt.GetModifiers() == wx.MOD_SHIFT and parent != self.root:
prev = self.GPXtree.GetNextSibling(parent)
NewId = GetGPXtreeItemId(self,prev,name)
if NewId:
self.GPXtree.Collapse(parent)
self.GPXtree.Expand(prev)
self.oldFocus = wx.Window.FindFocus()
wx.CallAfter(self.GPXtree.SelectItem,NewId)
else:
wx.CallAfter(self.GPXtree.SelectItem,item)
elif sys.platform == "win32":
self.GPXtree.GetNextSibling(item)
self.GPXtree.SelectItem(item)
else:
item = self.GPXtree.GetNextSibling(item)
if item.IsOk(): self.GPXtree.SelectItem(item)
def OnColMetaTest(self,event):
'Test the .par/.*lbls pair for contents'
G2imG.testColumnMetadata(self)
def OnPowderFPA(self,event):
'Perform FPA simulation/peak fitting'
G2fpa.GetFPAInput(self)
def OnReadPowderPeaks(self,event):
'Bound to menu Data/Read Powder Peaks'
self.CheckNotebook()
pth = G2G.GetImportPath(self)
if not pth: pth = '.'
dlg = wx.FileDialog(self, 'Choose file with peak list', pth, '',
'peak files (*.txt)|*.txt|All files (*.*)|*.*',wx.FD_MULTIPLE)
try:
if dlg.ShowModal() == wx.ID_OK:
for file_ajk in dlg.GetPaths():
self.HKL = []
self.powderfile = file_ajk
comments,peaks,limits,wave = G2IO.GetPowderPeaks(self.powderfile)
Id = self.GPXtree.AppendItem(parent=self.root,text='PKS '+os.path.basename(self.powderfile))
data = ['PKS',wave,0.0]
names = ['Type','Lam','Zero']
codes = [0,0,0]
inst = [G2fil.makeInstDict(names,data,codes),{}]
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Instrument Parameters'),inst)
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Comments'),comments)
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Limits'),[tuple(limits),limits])
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Index Peak List'),[peaks,[]])
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Unit Cells List'),[])
self.GPXtree.Expand(Id)
self.GPXtree.SelectItem(Id)
os.chdir(dlg.GetDirectory()) # to get Mac/Linux to change directory!
finally:
dlg.Destroy()
def CheckNotebook(self):
'''Make sure the data tree has the minimally expected controls.
'''
new = False
if not GetGPXtreeItemId(self,self.root,'Notebook'):
new = True
sub = self.GPXtree.AppendItem(parent=self.root,text='Notebook')
self.GPXtree.SetItemPyData(sub,[''])
if not GetGPXtreeItemId(self,self.root,'Controls'):
new = True
sub = self.GPXtree.AppendItem(parent=self.root,text='Controls')
self.GPXtree.SetItemPyData(sub,copy.copy(G2obj.DefaultControls))
if not GetGPXtreeItemId(self,self.root,'Covariance'):
new = True
sub = self.GPXtree.AppendItem(parent=self.root,text='Covariance')
self.GPXtree.SetItemPyData(sub,{})
if not GetGPXtreeItemId(self,self.root,'Constraints'):
new = True
sub = self.GPXtree.AppendItem(parent=self.root,text='Constraints')
self.GPXtree.SetItemPyData(sub,{'Hist':[],'HAP':[],'Phase':[]})
if not GetGPXtreeItemId(self,self.root,'Restraints'):
new = True
sub = self.GPXtree.AppendItem(parent=self.root,text='Restraints')
self.GPXtree.SetItemPyData(sub,{})
if not GetGPXtreeItemId(self,self.root,'Rigid bodies'):
new = True
sub = self.GPXtree.AppendItem(parent=self.root,text='Rigid bodies')
self.GPXtree.SetItemPyData(sub,{'Vector':{'AtInfo':{}},
'Residue':{'AtInfo':{}},'RBIds':{'Vector':[],'Residue':[]}})
if new:
self.GPXtree.Expand(self.GPXtree.root)
class CopyDialog(wx.Dialog):
'''Creates a dialog for copying control settings between
data tree items'''
def __init__(self,parent,title,text,data):
wx.Dialog.__init__(self,parent,-1,title,
pos=wx.DefaultPosition,style=wx.DEFAULT_DIALOG_STYLE)
self.data = data
panel = wx.Panel(self)
mainSizer = wx.BoxSizer(wx.VERTICAL)
topLabl = wx.StaticText(panel,-1,text)
mainSizer.Add((10,10),1)
mainSizer.Add(topLabl,0,wx.ALIGN_CENTER_VERTICAL|wx.LEFT,10)
mainSizer.Add((10,10),1)
ncols = len(data)/40+1
dataGridSizer = wx.FlexGridSizer(cols=ncols,hgap=2,vgap=2)
for Id,item in enumerate(self.data):
ckbox = wx.CheckBox(panel,Id,item[1])
ckbox.Bind(wx.EVT_CHECKBOX,self.OnCopyChange)
dataGridSizer.Add(ckbox,0,wx.LEFT,10)
mainSizer.Add(dataGridSizer,0,wx.EXPAND)
OkBtn = wx.Button(panel,-1,"Ok")
OkBtn.Bind(wx.EVT_BUTTON, self.OnOk)
cancelBtn = wx.Button(panel,-1,"Cancel")
cancelBtn.Bind(wx.EVT_BUTTON, self.OnCancel)
btnSizer = wx.BoxSizer(wx.HORIZONTAL)
btnSizer.Add((20,20),1)
btnSizer.Add(OkBtn)
btnSizer.Add((20,20),1)
btnSizer.Add(cancelBtn)
btnSizer.Add((20,20),1)
mainSizer.Add(btnSizer,0,wx.EXPAND|wx.BOTTOM|wx.TOP, 10)
panel.SetSizer(mainSizer)
panel.Fit()
self.Fit()
def OnCopyChange(self,event):
Id = event.GetId()
self.data[Id][0] = self.FindWindowById(Id).GetValue()
def OnOk(self,event):
parent = self.GetParent()
parent.Raise()
self.EndModal(wx.ID_OK)
def OnCancel(self,event):
parent = self.GetParent()
parent.Raise()
self.EndModal(wx.ID_CANCEL)
def GetData(self):
return self.data
class SumDialog(wx.Dialog):
'''Allows user to supply scale factor(s) when summing data
'''
def __init__(self,parent,title,text,dataType,data,dataList,Limits=None):
wx.Dialog.__init__(self,parent,-1,title,size=(400,250),
pos=wx.DefaultPosition,style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)
self.plotFrame = wx.Frame(self,-1,'Sum Plots',size=wx.Size(700,600), \
style=wx.DEFAULT_FRAME_STYLE ^ wx.CLOSE_BOX)
self.G2plotNB = G2plt.G2PlotNoteBook(self.plotFrame,G2frame=self)
self.text = text
self.data = data
self.average = False
self.selectData = copy.copy(data[:-1])
self.selectVals = len(data)*[0.0,]
self.dataList = dataList
self.Limits = Limits
self.filterlist = range(len(self.dataList)) # list of the choice numbers that have been filtered (list of int indices)
self.dataType = dataType
self.filterVal = ''
self.panel = None
self.Draw()
def Draw(self):
if self.panel:
self.panel.DestroyChildren() #safe: wx.Panel
self.panel.Destroy()
size = (480,350)
self.panel = wxscroll.ScrolledPanel(self, wx.ID_ANY,size=size,
style = wx.TAB_TRAVERSAL|wx.SUNKEN_BORDER)
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(wx.StaticText(self.panel,label=self.text),0)
mainSizer.Add((10,10))
self.dataGridSizer = wx.FlexGridSizer(cols=2,hgap=2,vgap=2)
self.dataGridSizer.Add((-1,-1))
topSizer = wx.BoxSizer(wx.HORIZONTAL)
topSizer.Add((-1,-1),1,wx.EXPAND,1)
topSizer.Add(wx.StaticText(self.panel,label='Filter: '),0,WACV)
self.timer = wx.Timer()
self.timer.Bind(wx.EVT_TIMER,self.OnFilter)
self.filterBox = wx.TextCtrl(self.panel, wx.ID_ANY, self.filterVal,
size=(80,-1),style=wx.TE_PROCESS_ENTER)
self.filterBox.Bind(wx.EVT_TEXT,self.onChar)
self.filterBox.Bind(wx.EVT_TEXT_ENTER,self.OnFilter)
topSizer.Add(self.filterBox,0,WACV)
self.dataGridSizer.Add(topSizer,1,wx.RIGHT|wx.BOTTOM|wx.EXPAND,1)
self.dataGridSizer.Add((-1,10))
self.dataGridSizer.Add((-1,10))
for Id,item in enumerate(self.selectData):
name = wx.TextCtrl(self.panel,-1,item,size=wx.Size(300,20))
name.SetEditable(False)
scale = G2G.ValidatedTxtCtrl(self.panel,self.selectVals,Id,nDig=(10,3),typeHint=float)
self.dataGridSizer.Add(scale,0,wx.LEFT,10)
self.dataGridSizer.Add(name,0,wx.RIGHT,10)
if self.dataType:
ScaleAll = wx.Button(self.panel,wx.ID_ANY,'Set all above')
ScaleAll.Bind(wx.EVT_BUTTON, self.OnAllScale)
if self.dataType == 'PWDR':
self.Avg = wx.CheckBox(self.panel,label=' Make average?')
self.Avg.Bind(wx.EVT_CHECKBOX,self.OnAve)
self.dataGridSizer.Add(ScaleAll,0,wx.LEFT,10)
if self.dataType == 'PWDR':
self.dataGridSizer.Add(self.Avg,0,wx.RIGHT,10)
self.dataGridSizer.Add(wx.StaticText(self.panel,-1,' Result type: '+self.dataType),1,
wx.LEFT|wx.ALIGN_CENTER_VERTICAL,1)
mainSizer.Add(self.dataGridSizer,0,wx.EXPAND)
self.name = G2G.ValidatedTxtCtrl(self.panel,self.data,-1,size=wx.Size(300,20))
mainSizer.Add(self.name,0,wx.RIGHT|wx.TOP,10)
self.OkBtn = wx.Button(self.panel,-1,"Ok")
self.OkBtn.Bind(wx.EVT_BUTTON, self.OnOk)
cancelBtn = wx.Button(self.panel,-1,"Cancel")
cancelBtn.Bind(wx.EVT_BUTTON, self.OnCancel)
btnSizer = wx.FlexGridSizer(0,3,10,20)
if self.dataType =='PWDR':
TestBtn = wx.Button(self.panel,-1,"Test")
TestBtn.Bind(wx.EVT_BUTTON, self.OnTest)
btnSizer.Add(TestBtn)
btnSizer.Add(self.OkBtn)
btnSizer.Add(cancelBtn)
btnSizer.Add((5,5))
self.panel.SetSizer(mainSizer)
self.panel.SetAutoLayout(1)
self.panel.SetupScrolling()
mainSizer.Add((10,10),1)
mainSizer.Add(btnSizer,0,wx.CENTER)
self.panel.SetSizer(mainSizer)
self.panel.Fit()
self.Fit()
def OnAve(self,event):
self.average = self.Avg.GetValue()
def OnFilter(self,event):
'''Read text from filter control and select entries that match.
'''
if self.timer.IsRunning():
self.timer.Stop()
self.filterVal = txt = self.filterBox.GetValue()
if txt:
txt = txt.lower()
ChoiceList = []
ChoiceVals = []
for i,item in enumerate(self.selectData):
if item.lower().find(txt) != -1:
ChoiceList.append(item)
ChoiceVals.append(self.selectVals[i])
self.selectData = ChoiceList
self.selectVals = ChoiceVals
else:
# self.selectData = copy.copy(self.data[:-1])
self.selectVals = len(self.data)*[0.0,]
wx.CallAfter(self.Draw)
def GetData(self):
if self.dataType == 'PWDR':
return self.selectData+[self.data[-1],],self.result
else:
return self.selectData+[self.data[-1],],self.selectVals
def onChar(self,event):
'Respond to keyboard events in the Filter box'
self.filterVal = self.filterBox.GetValue()
if self.timer.IsRunning():
self.timer.Stop()
self.timer.Start(1000,oneShot=True)
if event: event.Skip()
def OnAllScale(self,event):
dlg = G2G.SingleFloatDialog(self,'New scale',
'Enter new value for all scale factors',1.)
dlg.CenterOnParent()
if dlg.ShowModal() == wx.ID_OK:
val = dlg.GetValue()
dlg.Destroy()
else:
dlg.Destroy()
return
for Id,item in enumerate(self.selectData):
self.selectVals[Id] = val
wx.CallAfter(self.Draw)
def OnTest(self,event):
lenX = 0
Xminmax = [0,0]
XY = []
Xsum = []
Ysum = []
Vsum = []
for i,item in enumerate(self.selectData):
name = item
scale = self.selectVals[i]
Id = self.data.index(name)
data = self.dataList[Id]
if scale:
x,y,w,yc,yb,yd = data #numpy arrays!
if self.Limits is not None:
xMin = np.searchsorted(x,self.Limits[1][0])
xMax = np.searchsorted(x,self.Limits[1][1])
x = x[xMin:xMax+1]
y = y[xMin:xMax+1]
lenX = xMax-xMin+1
XY.append([x,scale*y])
v = 1./w[xMin:xMax+1]
if lenX:
if lenX != len(x):
self.GetParent().ErrorDialog('Data length error','Data to be summed must have same number of points'+
'\nExpected:'+str(lenX)+
'\nFound: '+str(len(x))+'\nfor '+name)
return
# self.OnCancel(event)
else:
lenX = len(x)
if Xminmax[1]:
if Xminmax != [x[0],x[-1]]:
self.GetParent().ErrorDialog('Data range error','Data to be summed must span same range'+
'\nExpected:'+str(Xminmax[0])+' '+str(Xminmax[1])+
'\nFound: '+str(x[0])+' '+str(x[-1])+'\nfor '+name)
return
# self.OnCancel(event)
else:
Xminmax = [x[0],x[-1]]
Xsum = x
if self.dataType == 'PWDR' and self.average:
Ysum.append(scale*y)
Vsum.append(abs(scale)*v)
else:
try:
Ysum += scale*y
Vsum += abs(scale)*v
except ValueError:
Ysum = scale*y
Vsum = abs(scale)*v
if self.dataType =='PWDR' and self.average:
maYsum = ma.masked_equal(Ysum,0)
Ysum = ma.mean(maYsum,axis=0)
Wsum = 1./np.array(Ysum)
else:
Wsum = 1./Vsum
YCsum = np.zeros(lenX)
YBsum = np.zeros(lenX)
YDsum = np.zeros(lenX)
XY.append([Xsum,Ysum])
self.result = [Xsum,Ysum,Wsum,YCsum,YBsum,YDsum]
# N.B. PlotXY expects the first arg to point to G2frame. In this case, we
# create a duplicate (temporary) Plot notebook window that is a child of the
# modal SumDialog dialog (self). This nicely gets deleted when the dialog is destroyed,
# but the plot window is not fully functional, at least on the Mac.
if len(XY[0][0]):
G2plt.PlotXY(self,XY,lines=True,Title='Sum:'+self.data[-1],labelY='Intensity',)
self.plotFrame.Show()
return True
def OnOk(self,event):
if self.dataType == 'PWDR':
if not self.OnTest(event): return
parent = self.GetParent()
parent.Raise()
self.EndModal(wx.ID_OK)
def OnCancel(self,event):
parent = self.GetParent()
parent.Raise()
self.EndModal(wx.ID_CANCEL)
def OnPwdrSum(self,event):
'Sum or Average together powder data(?)'
TextList = []
DataList = []
Limits = []
Names = []
Inst = None
Comments = ['Sum/Average equals: \n']
if self.GPXtree.GetCount():
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
Names.append(name)
if 'PWDR' in name:
TextList.append(name)
DataList.append(self.GPXtree.GetItemPyData(item)[1]) # (x,y,w,yc,yb,yd)
if not Inst:
Inst = self.GPXtree.GetItemPyData(GetGPXtreeItemId(self,item, 'Instrument Parameters'))
if not Limits:
Limits = self.GPXtree.GetItemPyData(GetGPXtreeItemId(self,item, 'Limits'))
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
if len(TextList) < 2:
self.ErrorDialog('Not enough data to sum/average','There must be more than one "PWDR" pattern')
return
TextList.append('default_ave_name')
dlg = self.SumDialog(self,'Sum/Average data','''
Enter scale for each pattern to be summed/averaged
Limits for first pattern used sets range for the sum
All patterns used must extend over this range
''','PWDR',
TextList,DataList,Limits)
try:
if dlg.ShowModal() == wx.ID_OK:
result,sumData = dlg.GetData()
Xsum,Ysum,Wsum,YCsum,YBsum,YDsum = sumData
Xminmax = [Xsum[0],Xsum[-1]]
outname = 'PWDR '+result[-1]
Id = 0
if outname in Names:
dlg2 = wx.MessageDialog(self,'Overwrite data?','Duplicate data name',wx.OK|wx.CANCEL)
try:
if dlg2.ShowModal() == wx.ID_OK:
Id = GetGPXtreeItemId(self,self.root,name)
self.GPXtree.Delete(Id)
finally:
dlg2.Destroy()
Id = self.GPXtree.AppendItem(parent=self.root,text=outname)
if Id:
Sample = G2obj.SetDefaultSample()
Ymin = np.min(Ysum)
Ymax = np.max(Ysum)
valuesdict = {
'wtFactor':1.0,
'Dummy':False,
'ranId':ran.randint(0,sys.maxsize),
'Offset':[0.0,0.0],'delOffset':0.02*Ymax,'refOffset':-.1*Ymax,'refDelt':0.1*Ymax,
'Yminmax':[Ymin,Ymax]
}
self.GPXtree.SetItemPyData(Id,[valuesdict,[np.array(Xsum),np.array(Ysum),np.array(Wsum),
np.array(YCsum),np.array(YBsum),np.array(YDsum)]])
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Comments'),Comments)
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Limits'),[tuple(Xminmax),Xminmax])
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Background'),[['chebyschev-1',True,3,1.0,0.0,0.0],
{'nDebye':0,'debyeTerms':[],'nPeaks':0,'peaksList':[],'background PWDR':['',1.0,False]}])
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Instrument Parameters'),Inst)
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Sample Parameters'),Sample)
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Peak List'),{'peaks':[],'sigDict':{}})
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Index Peak List'),[[],[]])
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Unit Cells List'),[])
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Reflection Lists'),{})
self.GPXtree.SelectItem(Id)
self.GPXtree.Expand(Id)
finally:
dlg.Destroy()
def OnImageSum(self,event):
'Sum together image data'
TextList = []
DataList = []
IdList = []
Names = []
Comments = ['Sum equals: \n']
if self.GPXtree.GetCount():
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
Names.append(name)
if 'IMG' in name:
TextList.append(name)
DataList.append(self.GPXtree.GetImageLoc(item)) #Size,Image,Tag
IdList.append(item)
Data = self.GPXtree.GetItemPyData(GetGPXtreeItemId(self,item,'Image Controls'))
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
if len(TextList) < 2:
self.ErrorDialog('Not enough data to sum','There must be more than one "IMG" pattern')
return
TextList.append('default_sum_name')
dlg = self.SumDialog(self,'Sum data',' Enter scale for each image to be summed','IMG',
TextList,DataList)
try:
if dlg.ShowModal() == wx.ID_OK:
imSize = 0
result,scales = dlg.GetData()
First = True
Found = False
for name,scale in zip(result,scales):
if scale:
Found = True
Comments.append("%10.3f %s" % (scale,' * '+name))
i = TextList.index(name)
Npix,imagefile,imagetag = DataList[i]
imagefile = G2IO.GetCheckImageFile(self,IdList[i])[1]
image = G2IO.GetImageData(self,imagefile,imageOnly=True,ImageTag=imagetag)
if First:
newImage = np.zeros_like(image)
First = False
if imSize:
if imSize != Npix:
self.ErrorDialog('Image size error','Images to be summed must be same size'+ \
'\nExpected:'+str(imSize)+ \
'\nFound: '+str(Npix)+'\nfor '+name)
return
newImage = newImage+scale*image
else:
imSize = Npix
newImage = newImage+scale*image
del(image)
if not Found:
self.ErrorDialog('Image sum error','No nonzero image multipliers found')
return
newImage = np.array(newImage,dtype=np.int32)
outname = 'IMG '+result[-1]
Id = 0
if outname in Names:
dlg2 = wx.MessageDialog(self,'Overwrite data?','Duplicate data name',wx.OK|wx.CANCEL)
try:
if dlg2.ShowModal() == wx.ID_OK:
Id = GetGPXtreeItemId(self,self.root,name)
finally:
dlg2.Destroy()
else:
Id = self.GPXtree.AppendItem(parent=self.root,text=outname)
if Id:
pth = os.path.split(os.path.abspath(imagefile))[0]
# pth = G2G.GetExportPath(self)
dlg = wx.FileDialog(self, 'Choose sum image filename', pth,outname.split('IMG ')[1],
'G2img files (*.G2img)|*.G2img',
wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
if dlg.ShowModal() == wx.ID_OK:
newimagefile = dlg.GetPath()
newimagefile = G2IO.FileDlgFixExt(dlg,newimagefile)
G2IO.PutG2Image(newimagefile,Comments,Data,Npix,newImage)
Imax = np.amax(newImage)
Imin = np.amin(newImage)
newImage = []
self.GPXtree.SetItemPyData(Id,[imSize,newimagefile])
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Comments'),Comments)
del(newImage)
if self.imageDefault:
Data = copy.copy(self.imageDefault)
Data['formatName'] = 'GSAS-II image'
Data['showLines'] = True
Data['ring'] = []
Data['rings'] = []
Data['cutoff'] = 10
Data['pixLimit'] = 20
Data['ellipses'] = []
Data['calibrant'] = ''
Data['range'] = [(Imin,Imax),[Imin,Imax]]
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Image Controls'),Data)
Masks = {'Points':[],'Rings':[],'Arcs':[],'Polygons':[],
'Frames':[],'Thresholds':[(Imin,Imax),[Imin,Imax]],
'SpotMask':{'esdMul':2.,'spotMask':None}}
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Masks'),Masks)
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Stress/Strain'),
{'Type':'True','d-zero':[],'Sample phi':0.0,'Sample z':0.0,'Sample load':0.0})
self.GPXtree.SelectItem(Id)
self.GPXtree.Expand(Id)
self.PickId = GetGPXtreeItemId(self,self.root,outname)
self.Image = self.PickId
finally:
dlg.Destroy()
def OnAddPhase(self,event):
'Add a new, empty phase to the tree. Called by Data/Add Phase menu'
self.CheckNotebook()
if not GetGPXtreeItemId(self,self.root,'Phases'):
sub = self.GPXtree.AppendItem(parent=self.root,text='Phases')
else:
sub = GetGPXtreeItemId(self,self.root,'Phases')
PhaseName = ''
dlg = wx.TextEntryDialog(None,'Enter a name for this phase','Phase Name Entry','New phase',
style=wx.OK)
if dlg.ShowModal() == wx.ID_OK:
PhaseName = dlg.GetValue()
dlg.Destroy()
if not GetGPXtreeItemId(self,self.root,'Restraints'):
subr = self.GPXtree.AppendItem(parent=self.root,text='Restraints')
self.GPXtree.SetItemPyData(subr,{PhaseName:{}})
else:
subr = GetGPXtreeItemId(self,self.root,'Restraints')
self.GPXtree.GetItemPyData(subr).update({PhaseName:{}})
self.GPXtree.AppendItem(parent=subr,text=PhaseName)
newphase = self.GPXtree.AppendItem(parent=sub,text=PhaseName)
E,SGData = G2spc.SpcGroup('P 1')
self.GPXtree.SetItemPyData(newphase,G2obj.SetNewPhase(Name=PhaseName,SGData=SGData))
self.GPXtree.Expand(sub)
SelectDataTreeItem(self,newphase) #bring up new phase General tab
def OnDeletePhase(self,event):
'''Delete one or more phases from the tree. Called by Data/Delete Phase menu.
Also delete this phase from Reflection Lists for each PWDR histogram;
removes the phase from restraints and deletes any constraints
with variables from the phase.
If any deleted phase is marked as Used in a histogram, a more rigorous
"deep clean" is done and histogram refinement results are cleared, as well as
the covariance information and all plots are deleted
'''
selItem = self.GPXtree.GetSelection()
if self.dataWindow:
self.dataWindow.ClearData()
TextList = []
DelList = []
DelItemList = []
consDeleted = 0
usedPhase = False
if GetGPXtreeItemId(self,self.root,'Phases'):
sub = GetGPXtreeItemId(self,self.root,'Phases')
else:
return
if GetGPXtreeItemId(self,self.root,'Restraints'):
subr = GetGPXtreeItemId(self,self.root,'Restraints')
else:
subr = 0
if GetGPXtreeItemId(self,self.root,'Constraints'):
id = GetGPXtreeItemId(self,self.root,'Constraints')
constr = self.GPXtree.GetItemPyData(id)
else:
constr = {}
item, cookie = self.GPXtree.GetFirstChild(sub)
while item:
TextList.append(self.GPXtree.GetItemText(item))
item, cookie = self.GPXtree.GetNextChild(sub, cookie)
dlg = wx.MultiChoiceDialog(self, 'Which phase to delete?', 'Delete phase', TextList, wx.CHOICEDLG_STYLE)
try:
if dlg.ShowModal() == wx.ID_OK:
result = dlg.GetSelections()
for i in result: DelList.append([i,TextList[i]])
item, cookie = self.GPXtree.GetFirstChild(sub)
i = 0
while item:
if [i,self.GPXtree.GetItemText(item)] in DelList: DelItemList.append(item)
item, cookie = self.GPXtree.GetNextChild(sub, cookie)
i += 1
for item in DelItemList:
phase = self.GPXtree.GetItemPyData(item)
for h in phase['Histograms']:
if 'Use' not in phase['Histograms'][h]: continue
if phase['Histograms'][h]['Use']:
usedPhase = True
break
if 'pId' in phase:
p = phase['pId']
else:
p = '?'
self.GPXtree.Delete(item)
if item == selItem: selItem = self.root
# look for constraints to remove
for key in constr:
delThis = []
if key.startswith('_'): continue
for i,cons in enumerate(constr[key]):
for var in cons[0:-3]:
if str(var[1]).startswith(str(p)):
delThis.append(i)
break
for i in reversed(delThis):
consDeleted += 1
del constr[key][i]
# delete refinement results from histograms
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
if 'PWDR' in name:
data = self.GPXtree.GetItemPyData(item)
if usedPhase: # remove r-factors
dellist = [value for value in data[0] if ':' in value]
for v in dellist+['Durbin-Watson', 'R', 'wR', 'Rb',
'wRb', 'wRmin','Nobs']:
if v in data[0]: del data[0][v]
# could wipe out computed & difference patterns, but does not work
#data[1][3] = np.zeros_like(data[1][3])
#data[1][5] = np.zeros_like(data[1][5])
# always get rid of reflection lists
Id = GetGPXtreeItemId(self,item, 'Reflection Lists')
refList = self.GPXtree.GetItemPyData(Id)
if len(refList):
for i,item in DelList:
if item in refList:
del(refList[item])
elif 'HKLF' in name and usedPhase: # probably not needed if phase is not used
data = self.GPXtree.GetItemPyData(item)
data[0] = {}
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
finally:
dlg.Destroy()
if usedPhase: # clear info from last refinement for "deep clean" if a used phase is deleted
id = GetGPXtreeItemId(self,self.root,'Covariance')
if DelItemList and id:
self.GPXtree.SetItemPyData(id,{})
id = GetGPXtreeItemId(self,self.root,'Sequential results')
if DelItemList and id:
self.GPXtree.Delete(id)
if id == selItem: selItem = self.root
# delete all plots
for lbl in self.G2plotNB.plotList:
self.G2plotNB.Delete(lbl)
if subr and DelList: #remove restraints for deleted phase
DelList = [itm[1] for itm in DelList]
item, cookie = self.GPXtree.GetFirstChild(subr)
while item:
name = self.GPXtree.GetItemText(item)
if name in DelList:
self.GPXtree.Delete(item)
if item == selItem: selItem = self.root
item, cookie = self.GPXtree.GetNextChild(subr, cookie)
# force redisplay of current tree item if it was not deleted
self.PickId = 0
self.PatternId = 0
self.PickIdText = None
SelectDataTreeItem(self,selItem)
wx.CallAfter(self.GPXtree.SelectItem,selItem)
if consDeleted:
print('\n',consDeleted,'constraints were deleted')
def OnRenameData(self,event):
'''Renames an existing histogram. Called by Data/Rename Phase menu.
Must be used before a histogram is used in a phase.
'''
name = self.GPXtree.GetItemText(self.PickId)
Histograms,Phases = self.GetUsedHistogramsAndPhasesfromTree()
if name in Histograms:
G2G.G2MessageBox(self,
'Histogram is used. You must remove it from all phases before it can be renamed',
'Rename not allowed')
return
if 'PWDR' in name or 'HKLF' in name or 'IMG' in name:
if 'Bank' in name:
names = name.split('Bank')
names[1] = ' Bank'+names[1]
elif 'Azm' in name:
names = name.split('Azm')
names[1] = ' Azm'+names[1]
else:
names = [name,'']
dataType = names[0][:names[0].index(' ')+1] #includes the ' '
dlg = G2G.SingleStringDialog(self,'Change tree name',
'Data name: '+name,names[0][names[0].index(' ')+1:])
#if dlg.ShowModal() == wx.ID_OK:
if dlg.Show():
name = dataType+dlg.GetValue().strip()+names[1]
self.GPXtree.SetItemText(self.PickId,name)
if 'PWDR' in name:
self.GPXtree.GetItemPyData(self.PickId)[2] = name
dlg.Destroy()
def GetFileList(self,fileType,skip=None): #potentially useful?
'Appears unused. Note routine of same name in GSASIIpwdGUI'
fileList = []
Source = ''
Id, cookie = self.GPXtree.GetFirstChild(self.root)
while Id:
name = self.GPXtree.GetItemText(Id)
if fileType in name:
if Id == skip:
Source = name
else:
fileList.append([False,name,Id])
Id, cookie = self.GPXtree.GetNextChild(self.root, cookie)
if skip:
return fileList,Source
else:
return fileList
def OnDataDelete(self, event):
'''Delete one or more histograms from data tree. Called by the
Data/DeleteData menu
'''
TextList = []
DelList = []
DelItemList = []
nItems = {'PWDR':0,'SASD':0,'REFD':0,'IMG':0,'HKLF':0,'PDF':0}
PDFnames = []
selItem = self.GPXtree.GetSelection()
Histograms,Phases = self.GetUsedHistogramsAndPhasesfromTree()
if not self.GPXtree.GetCount():
G2G.G2MessageBox(self,'No tree items to be deleted',
'Nothing to delete')
return
item, cookie = self.GPXtree.GetFirstChild(self.root)
used = False
while item:
name = self.GPXtree.GetItemText(item)
if name not in ['Notebook','Controls','Covariance','Constraints',
'Restraints','Phases','Rigid bodies'] and 'Sequential' not in name:
if 'PWDR' in name[:4]:
nItems['PWDR'] += 1
if name in Histograms:
used = True
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
continue
if 'SASD' in name[:4]: nItems['SASD'] += 1
if 'REFD' in name[:4]: nItems['REFD'] += 1
if 'IMG' in name[:3]: nItems['IMG'] += 1
if 'HKLF' in name[:4]:
nItems['HKLF'] += 1
if name in Histograms:
used = True
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
continue
if 'PDF' in name[:3]:
PDFnames.append(name)
nItems['PDF'] += 1
TextList.append(name)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
for pdfName in PDFnames:
try:
TextList.remove('PWDR'+pdfName[4:])
except ValueError:
print (u'PWDR'+pdfName[4:]+u' for '+pdfName+u' not found')
if len(TextList) == 0 and used:
G2G.G2MessageBox(self,'All histograms are used. You must remove them from phases before they can be deleted',
'Nothing to delete')
return
elif len(TextList) == 0:
G2G.G2MessageBox(self,'None of the tree items are allowed to be deleted',
'Nothing to delete')
return
dlg = G2G.G2MultiChoiceDialog(self, 'Which data to delete?', 'Delete data', TextList, wx.CHOICEDLG_STYLE)
try:
if dlg.ShowModal() == wx.ID_OK:
result = dlg.GetSelections()
for i in result: DelList.append(TextList[i])
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
itemName = self.GPXtree.GetItemText(item)
if itemName in DelList:
if 'PWDR' in itemName[:4]: nItems['PWDR'] -= 1
elif 'SASD' in itemName[:4]: nItems['SASD'] -= 1
elif 'REFD' in itemName[:4]: nItems['REFD'] -= 1
elif 'IMG' in itemName[:3]: nItems['IMG'] -= 1
elif 'HKLF' in itemName[:4]: nItems['HKLF'] -= 1
elif 'PDF' in itemName[:3]: nItems['PDF'] -= 1
DelItemList.append(item)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
for item in DelItemList:
self.GPXtree.Delete(item)
if item == selItem: selItem = self.root
if DelList:
self.PickId = 0
self.PickIdText = None
self.PatternId = 0
if nItems['PWDR']:
wx.CallAfter(G2plt.PlotPatterns,self,True)
else:
self.G2plotNB.Delete('Powder Patterns')
self.lastPlotType = None
if not nItems['IMG']:
self.G2plotNB.Delete('2D Powder Image')
if not nItems['HKLF']:
self.G2plotNB.Delete('Structure Factors')
if '3D Structure Factors' in self.G2plotNB.plotList:
self.G2plotNB.Delete('3D Structure Factors')
finally:
dlg.Destroy()
if DelList:
SelectDataTreeItem(self,selItem)
wx.CallAfter(self.GPXtree.SelectItem,selItem)
def OnPlotDelete(self,event):
'''Delete one or more plots from plot window. Called by the
Data/DeletePlots menu
'''
plotNames = self.G2plotNB.plotList
if len(plotNames):
dlg = G2G.G2MultiChoiceDialog(self, 'Which plots to delete?', 'Delete plots', plotNames, wx.CHOICEDLG_STYLE)
try:
if dlg.ShowModal() == wx.ID_OK:
result = dlg.GetSelections()
result.sort(reverse=True)
for i in result:
self.G2plotNB.Delete(plotNames[i])
finally:
dlg.Destroy()
def OnFileReopen(self, event):
files = GSASIIpath.GetConfigValue('previous_GPX_files')
if not files:
print('no previous projects found')
return
sellist = []
for f in files:
dirname,filroot = os.path.split(f)
if os.path.exists(f) and '.gpx' in f:
sellist.append("{} from {}".format(filroot,dirname))
# else:
# sellist.append("not found: {}".format(f))
dlg = G2G.G2SingleChoiceDialog(self,
'Select previous project to open',
'Select project',sellist)
if dlg.ShowModal() == wx.ID_OK:
sel = dlg.GetSelection()
dlg.Destroy()
else:
dlg.Destroy()
return
filroot,dirname = sellist[sel].split(' from ')
f = os.path.join(dirname,filroot)
if os.path.exists(f):
self.OnFileOpen(event, filename=f)
self.LastGPXdir = dirname
else:
print('file not found',f)
def OnFileOpen(self, event, filename=None):
'''Gets a GSAS-II .gpx project file in response to the
File/Open Project menu button
'''
def SaveOld():
'''See if we should save current project and continue
to read another.
returns True if the project load should continue
'''
if self.dataWindow:
self.dataWindow.ClearData()
dlg = wx.MessageDialog(self,
'Do you want to save and replace the current project?\n(Use No to read without saving or Cancel to continue with current project)',
'Save & Overwrite?',
wx.YES|wx.NO|wx.CANCEL)
try:
result = dlg.ShowModal()
finally:
dlg.Destroy()
if result == wx.ID_NO:
result = True
elif result == wx.ID_CANCEL:
return False
else:
if not self.OnFileSave(None): return False
self.GPXtree.DeleteChildren(self.root)
self.GSASprojectfile = ''
self.HKL = []
if self.G2plotNB.plotList:
self.G2plotNB.clear()
return True
def GetGPX():
if self.LastGPXdir:
pth = self.LastGPXdir
else:
pth = '.'
#if GSASIIpath.GetConfigValue('debug'): print('debug: open from '+pth)
dlg = wx.FileDialog(self, 'Choose GSAS-II project file', pth,
wildcard='GSAS-II project file (*.gpx)|*.gpx',style=wx.FD_OPEN)
try:
if dlg.ShowModal() != wx.ID_OK: return
self.GSASprojectfile = dlg.GetPath()
self.GSASprojectfile = G2IO.FileDlgFixExt(dlg,self.GSASprojectfile)
self.LastGPXdir = dlg.GetDirectory()
finally:
dlg.Destroy()
self.EnablePlot = False
if self.GPXtree.GetChildrenCount(self.root,False):
if not SaveOld(): return
if not filename:
GetGPX()
filename = self.GSASprojectfile
else:
try:
self.GSASprojectfile = os.path.splitext(filename)[0]+u'.gpx'
except:
self.GSASprojectfile = os.path.splitext(filename)[0]+'.gpx'
self.dirname = os.path.split(filename)[0]
self.init_vars()
try:
self.StartProject() #open the file if possible
except:
print ('\nError opening file '+filename)
import traceback
print (traceback.format_exc())
def StartProject(self):
'''Opens a GSAS-II project file & selects the 1st available data set to
display (PWDR, HKLF, REFD or SASD)
'''
Id = 0
phaseId = None
G2IO.ProjFileOpen(self)
self.GPXtree.SetItemText(self.root,'Project: '+self.GSASprojectfile)
self.GPXtree.Expand(self.root)
self.HKL = []
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
if name[:4] in ['PWDR','HKLF','IMG ','PDF ','SASD','REFD']:
if not Id:
if name[:4] == 'IMG ':
Id = GetGPXtreeItemId(self,item,'Image Controls')
else:
Id = item
elif name == "Phases":
phaseId = item
elif name == 'Controls':
data = self.GPXtree.GetItemPyData(item)
if data:
for item in self.Refine: item.Enable(True)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
if phaseId: # show all phases
self.GPXtree.Expand(phaseId)
if Id:
self.EnablePlot = True
self.GPXtree.Expand(Id)
SelectDataTreeItem(self,Id)
self.GPXtree.SelectItem(Id) # needed on OSX or item is not selected in tree; perhaps not needed elsewhere
elif phaseId:
Id = phaseId
# open 1st phase
Id, unused = self.GPXtree.GetFirstChild(phaseId)
SelectDataTreeItem(self,Id)
self.GPXtree.SelectItem(Id) # as before for OSX
self.CheckNotebook()
if self.dirname: os.chdir(self.dirname) # to get Mac/Linux to change directory!
pth = os.path.split(os.path.abspath(self.GSASprojectfile))[0]
if GSASIIpath.GetConfigValue('Save_paths'):
G2G.SaveGPXdirectory(pth,write=False)
config = G2G.GetConfigValsDocs()
GSASIIpath.addPrevGPX(self.GSASprojectfile,config)
G2G.SaveConfigVars(config)
self.LastGPXdir = pth
def OnFileClose(self, event):
'''Clears the data tree in response to the
File/New Project menu button. User is given option to save
the project.
'''
dlg = wx.MessageDialog(self,
'Do you want to save the current project and start with an empty one?\n(Use No to clear without saving or Cancel to continue with current project)',
'Save & Clear?',
wx.YES | wx.NO | wx.CANCEL)
try:
result = dlg.ShowModal()
if result == wx.ID_OK:
self.OnFileSaveMenu(event)
if result != wx.ID_CANCEL:
self.GSASprojectfile = ''
self.GPXtree.SetItemText(self.root,'Project: ')
self.GPXtree.DeleteChildren(self.root)
self.dataWindow.ClearData()
if len(self.HKL): self.HKL = []
if self.G2plotNB.plotList:
self.G2plotNB.clear()
self.SetTitleByGPX()
self.EnableRefineCommand()
self.init_vars()
finally:
dlg.Destroy()
def OnFileSave(self, event):
'''Save the current project in response to the
File/Save Project menu button
'''
if self.GSASprojectfile:
self.GPXtree.SetItemText(self.root,'Project: '+self.GSASprojectfile)
self.CheckNotebook()
G2IO.ProjFileSave(self)
return True
else:
return self.OnFileSaveas(event)
def OnNewGSASII(self, event):
'''Gets a GSAS-II .gpx project file in response to the
File/Open new window menu button. Runs only on Mac.
'''
if self.LastGPXdir:
pth = self.LastGPXdir
else:
pth = '.'
GSASprojectfile = ''
dlg = wx.FileDialog(self, 'Choose GSAS-II project file', pth,
wildcard='GSAS-II project file (*.gpx)|*.gpx',style=wx.FD_OPEN)
try:
if dlg.ShowModal() == wx.ID_OK:
GSASprojectfile = dlg.GetPath()
GSASprojectfile = G2IO.FileDlgFixExt(dlg,GSASprojectfile)
self.LastGPXdir = dlg.GetDirectory()
finally:
dlg.Destroy()
G2script = os.path.join(os.path.split(__file__)[0],'GSASII.py')
GSASIIpath.MacStartGSASII(G2script,GSASprojectfile)
def SetTitleByGPX(self):
'''Set the title for the two window frames
'''
projName = os.path.split(self.GSASprojectfile)[1]
if not projName: projName = "<unnamed project>"
if self.testSeqRefineMode():
s = u' (sequential refinement)'
else:
s = u''
self.SetTitle("GSAS-II project: "+projName + s)
self.plotFrame.SetTitle("GSAS-II plots: "+projName)
def OnFileSaveas(self, event):
'''Save the current project in response to the
File/Save as menu button
'''
if GSASIIpath.GetConfigValue('Starting_directory'):
pth = GSASIIpath.GetConfigValue('Starting_directory')
pth = os.path.expanduser(pth)
elif self.LastGPXdir:
pth = self.LastGPXdir
else:
pth = '.'
dlg = wx.FileDialog(self, 'Choose GSAS-II project file name', pth, self.newGPXfile,
'GSAS-II project file (*.gpx)|*.gpx',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK: #TODO: what about Cancel?
self.GSASprojectfile = dlg.GetPath()
self.GSASprojectfile = G2IO.FileDlgFixExt(dlg,self.GSASprojectfile)
self.GPXtree.SetItemText(self.root,'Project: '+self.GSASprojectfile)
self.CheckNotebook()
G2IO.ProjFileSave(self)
self.SetTitleByGPX()
os.chdir(dlg.GetDirectory()) # to get Mac/Linux to change directory!
config = G2G.GetConfigValsDocs()
GSASIIpath.addPrevGPX(self.GSASprojectfile,config)
return True
else:
return False
finally:
dlg.Destroy()
def ExpandAll(self,event):
'''Expand all tree items or those of a single type
'''
txt = self.GetMenuBar().GetLabel(event.Id)
if txt == 'all':
self.ExpandingAll = True
try:
self.GPXtree.ExpandAll()
finally:
self.ExpandingAll = False
else:
self.ExpandingAll = True
try:
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
if name.startswith(txt+' '): self.GPXtree.Expand(item)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
finally:
self.ExpandingAll = False
def MoveTreeItems(self,event):
'''Move tree items of a single type to the end of the tree
'''
txt = self.GetMenuBar().GetLabel(event.Id)
# make a list of items to copy
copyList = []
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
if self.GPXtree.GetItemText(item).startswith(txt+' '):
copyList.append(item)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
self.ExpandingAll = True
try:
for item in copyList:
name = self.GPXtree.GetItemText(item)
newId = self.GPXtree.AppendItem(self.root,name)
self.GPXtree.SetItemPyData(newId,self.GPXtree.GetItemPyData(item))
chld, chldcookie = self.GPXtree.GetFirstChild(item)
while chld:
chname = self.GPXtree.GetItemText(chld)
newCh = self.GPXtree.AppendItem(newId,chname)
self.GPXtree.SetItemPyData(newCh,self.GPXtree.GetItemPyData(chld))
chld, chldcookie = self.GPXtree.GetNextChild(item, chldcookie)
self.GPXtree.Delete(item)
finally:
self.ExpandingAll = False
SelectDataTreeItem(self,self.root)
def ExitMain(self, event):
'''Called if exit selected or the main window is closed
rescord last position of data & plot windows; saved to config.py file
NB: not called if console window closed
'''
if self.GPXtree.GetCount() > 1:
dlg = wx.MessageDialog(self,
'Do you want to save and exit?\n(Use No to exit without save or Cancel to prevent exiting)',
'Confirm exit/save?',
wx.YES|wx.NO|wx.CANCEL)
try:
result = dlg.ShowModal()
finally:
dlg.Destroy()
else:
result = wx.ID_NO
if result == wx.ID_NO:
pass
elif result == wx.ID_CANCEL:
return
else:
if not self.OnFileSave(event): return
FrameInfo = {'Main_Pos':tuple(self.GetPosition()),
'Main_Size':tuple(self.GetSize()),
'Plot_Pos':tuple(self.plotFrame.GetPosition()),
'Plot_Size':tuple(self.plotFrame.GetSize())}
GSASIIpath.SetConfigValue(FrameInfo)
# FramePos = {'Main_Pos':tuple(self.GetPosition()),'Plot_Pos':tuple(self.plotFrame.GetPosition())}
# GSASIIpath.SetConfigValue(FramePos)
config = G2G.GetConfigValsDocs()
G2G.SaveConfigVars(config)
if self.G2plotNB:
self.G2plotNB.Destroy()
if self.undofile:
os.remove(self.undofile)
sys.exit()
def OnExportPeakList(self,event):
pth = G2G.GetExportPath(self)
dlg = wx.FileDialog(self, 'Choose output peak list file name', pth, '',
'(*.*)|*.*',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK:
self.peaklistfile = dlg.GetPath()
self.peaklistfile = G2IO.FileDlgFixExt(dlg,self.peaklistfile)
file = open(self.peaklistfile,'w')
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
if 'PWDR' in name:
item2, cookie2 = self.GPXtree.GetFirstChild(item)
wave = 0.0
while item2:
name2 = self.GPXtree.GetItemText(item2)
if name2 == 'Instrument Parameters':
Inst = self.GPXtree.GetItemPyData(item2)[0]
Type = Inst['Type'][0]
if 'T' not in Type:
wave = G2mth.getWave(Inst)
elif name2 == 'Peak List':
pkdata = self.GPXtree.GetItemPyData(item2)
peaks = pkdata['peaks']
sigDict = pkdata['sigDict']
item2, cookie2 = self.GPXtree.GetNextChild(item, cookie2)
file.write("#%s \n" % (name+' Peak List'))
if wave:
file.write('#wavelength = %10.6f\n'%(wave))
if 'T' in Type:
file.write('#%9s %10s %10s %12s %10s %10s %10s %10s %10s %10s\n'%('pos','dsp','esd','int','esd','alp','bet','sig','gam','FWHM'))
else:
file.write('#%9s %10s %10s %12s %10s %10s %10s %10s\n'%('pos','dsp','esd','int','esd','sig','gam','FWHM'))
for ip,peak in enumerate(peaks):
dsp = G2lat.Pos2dsp(Inst,peak[0])
if 'T' in Type: #TOF - more cols
esds = {'pos':0.,'int':0.,'alp':0.,'bet':0.,'sig':0.,'gam':0.}
for name in list(esds.keys()):
esds[name] = sigDict.get('%s%d'%(name,ip),0.)
sig = np.sqrt(peak[8])
gam = peak[10]
esddsp = abs(G2lat.Pos2dsp(Inst,peak[0]-esds['pos'])-G2lat.Pos2dsp(Inst,peak[0]+esds['pos']))/2.
FWHM = G2pwd.getgamFW(gam,sig) +(peak[4]+peak[6])*np.log(2.)/(peak[4]*peak[6]) #to get delta-TOF from Gam(peak)
file.write("%10.2f %10.5f %10.5f %12.2f%10.2f %10.3f %10.3f %10.3f %10.3f %10.3f\n" % \
(peak[0],dsp,esddsp,peak[2],esds['int'],peak[4],peak[6],peak[8],peak[10],FWHM))
else: #CW
#get esds from sigDict for each peak & put in output - esds for sig & gam from UVWXY?
esds = {'pos':0.,'int':0.,'sig':0.,'gam':0.}
for name in list(esds.keys()):
esds[name] = sigDict.get('%s%d'%(name,ip),0.)
sig =
|
np.sqrt(peak[4])
|
numpy.sqrt
|
import sys
import json
import numpy as np
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from . import commons as bc
from . import climate
# データベースファイルの保存場所
database_directory = os.path.dirname(os.path.abspath(__file__)) + "/database/"
# 気象データファイルの保存場所
climatedata_directory = os.path.dirname(os.path.abspath(__file__)) + "/climatedata/"
def calc_energy(inputdata, DEBUG = False):
# 計算結果を格納する変数
resultJson = {
"E_hotwatersupply": 0, # 給湯設備の設計一次エネルギー消費量 [MJ/年]
"Es_hotwatersupply": 0, # 給湯設備の基準一次エネルギー消費量 [MJ/年]
"BEI_HW": 0,
"hotwatersupply":{
},
"for_CGS":{
"Edesign_MWh_Ele_day": 0, # 給湯設備(エネルギー源を電力とする給湯機器のみが対象)の電力消費量
"Edesign_MJ_CGS_day": 0, # 排熱利用する給湯系統の一次エネルギー消費量
"Q_eqp_CGS_day": 0 # 排熱が利用できる系統の給湯設備の給湯負荷
}
}
# 地域別データの読み込み
with open(database_directory + 'AREA.json', 'r', encoding='utf-8') as f:
Area = json.load(f)
##----------------------------------------------------------------------------------
## 任意入力 (SP-6: カレンダーパターン)
##----------------------------------------------------------------------------------
input_calendar = []
if "calender" in inputdata["SpecialInputData"]:
input_calendar = inputdata["SpecialInputData"]["calender"]
##----------------------------------------------------------------------------------
## 任意入力 (SP-9: 室使用条件)
##----------------------------------------------------------------------------------
input_room_usage_condition = {}
if "room_usage_condition" in inputdata["SpecialInputData"]:
input_room_usage_condition = inputdata["SpecialInputData"]["room_usage_condition"]
#----------------------------------------------------------------------------------
# 入力データの整理(計算準備)
#----------------------------------------------------------------------------------
# 台数をかけて、加熱能力等を算出する。
for unit_name in inputdata["HotwaterSupplySystems"]:
for unit_id, unit_configure in enumerate(inputdata["HotwaterSupplySystems"][unit_name]["HeatSourceUnit"]):
# 加熱能力 kW/台 × 台
inputdata["HotwaterSupplySystems"][unit_name]["HeatSourceUnit"][unit_id]["RatedCapacity_total"] = \
unit_configure["RatedCapacity"] * unit_configure["Number"]
# 消費エネルギー kW/台 × 台
inputdata["HotwaterSupplySystems"][unit_name]["HeatSourceUnit"][unit_id]["RatedEnergyConsumption_total"] = \
unit_configure["RatedPowerConsumption"] * unit_configure["Number"] * 9760/3600 + \
unit_configure["RatedFuelConsumption"] * unit_configure["Number"]
# 機器効率
inputdata["HotwaterSupplySystems"][unit_name]["HeatSourceUnit"][unit_id]["RatedEfficiency"] = \
inputdata["HotwaterSupplySystems"][unit_name]["HeatSourceUnit"][unit_id]["RatedCapacity_total"] / \
inputdata["HotwaterSupplySystems"][unit_name]["HeatSourceUnit"][unit_id]["RatedEnergyConsumption_total"]
if DEBUG:
print(f'機器名称 {unit_name} の {unit_id+1} 台目')
print(f' - 給湯機器の効率 {inputdata["HotwaterSupplySystems"][unit_name]["HeatSourceUnit"][unit_id]["RatedEfficiency"]}')
# 機器全体の合計加熱能力と重み付け平均効率を算出する。
for unit_name in inputdata["HotwaterSupplySystems"]:
# 合計加熱能力 [kW]
inputdata["HotwaterSupplySystems"][unit_name]["RatedCapacity_total"] = 0
tmp_Capacity_efficiency = 0
for unit_id, unit_configure in enumerate(inputdata["HotwaterSupplySystems"][unit_name]["HeatSourceUnit"]):
# 加熱能力の合計
inputdata["HotwaterSupplySystems"][unit_name]["RatedCapacity_total"] += \
unit_configure["RatedCapacity_total"]
# 加熱能力 × 効率
tmp_Capacity_efficiency += \
unit_configure["RatedCapacity_total"] * \
unit_configure["RatedEfficiency"]
# 加熱能力で重み付けした平均効率 [-]
inputdata["HotwaterSupplySystems"][unit_name]["RatedEfficiency_total"] = \
tmp_Capacity_efficiency / \
inputdata["HotwaterSupplySystems"][unit_name]["RatedCapacity_total"]
#----------------------------------------------------------------------------------
# 解説書 D.1 標準日積算湯使用量(標準室使用条件)
#----------------------------------------------------------------------------------
for room_name in inputdata["HotwaterRoom"]:
# 日積算湯使用利用 [L/m2/day]
hotwater_demand, hotwater_demand_washroom, hotwater_demand_shower, hotwater_demand_kitchen, hotwater_demand_other = \
bc.get_roomHotwaterDemand(
inputdata["Rooms"][room_name]["buildingType"],
inputdata["Rooms"][room_name]["roomType"],
input_room_usage_condition
)
# 日積算給湯量参照値 [L/day]
inputdata["HotwaterRoom"][room_name]["hotwater_demand"] = hotwater_demand * inputdata["Rooms"][room_name]["roomArea"]
inputdata["HotwaterRoom"][room_name]["hotwater_demand_washroom"] = hotwater_demand_washroom * inputdata["Rooms"][room_name]["roomArea"]
inputdata["HotwaterRoom"][room_name]["hotwater_demand_shower"] = hotwater_demand_shower * inputdata["Rooms"][room_name]["roomArea"]
inputdata["HotwaterRoom"][room_name]["hotwater_demand_kitchen"] = hotwater_demand_kitchen * inputdata["Rooms"][room_name]["roomArea"]
inputdata["HotwaterRoom"][room_name]["hotwater_demand_other"] = hotwater_demand_other * inputdata["Rooms"][room_name]["roomArea"]
# 各室の室使用スケジュール (=室の同時使用率。 給湯需要がある室は、必ず空調されている前提とする)
roomScheduleRoom, _, _, _, _ = \
bc.get_roomUsageSchedule(inputdata["Rooms"][room_name]["buildingType"], inputdata["Rooms"][room_name]["roomType"], input_calendar)
inputdata["HotwaterRoom"][room_name]["hotwaterSchedule"] = np.zeros(365)
inputdata["HotwaterRoom"][room_name]["hotwaterSchedule"][ np.sum(roomScheduleRoom,1) > 0 ] = 1
# 日別の給湯量 [L/day] (365×1)
inputdata["HotwaterRoom"][room_name]["hotwater_demand_daily"] = \
inputdata["HotwaterRoom"][room_name]["hotwater_demand"] * inputdata["HotwaterRoom"][room_name]["hotwaterSchedule"]
inputdata["HotwaterRoom"][room_name]["hotwater_demand_washroom_daily"] = \
inputdata["HotwaterRoom"][room_name]["hotwater_demand_washroom"] * inputdata["HotwaterRoom"][room_name]["hotwaterSchedule"]
inputdata["HotwaterRoom"][room_name]["hotwater_demand_shower_daily"] = \
inputdata["HotwaterRoom"][room_name]["hotwater_demand_shower"] * inputdata["HotwaterRoom"][room_name]["hotwaterSchedule"]
inputdata["HotwaterRoom"][room_name]["hotwater_demand_kitchen_daily"] = \
inputdata["HotwaterRoom"][room_name]["hotwater_demand_kitchen"] * inputdata["HotwaterRoom"][room_name]["hotwaterSchedule"]
inputdata["HotwaterRoom"][room_name]["hotwater_demand_other_daily"] = \
inputdata["HotwaterRoom"][room_name]["hotwater_demand_other"] * inputdata["HotwaterRoom"][room_name]["hotwaterSchedule"]
# 日別の給湯量 [L/day] (365×1) の任意入力 (SP-11: 日積算湯使用量)
if "hotwater_demand_daily" in inputdata["SpecialInputData"]:
if room_name in inputdata["SpecialInputData"]["hotwater_demand_daily"]:
if "洗面" in inputdata["SpecialInputData"]["hotwater_demand_daily"][room_name]:
inputdata["HotwaterRoom"][room_name]["hotwater_demand_washroom_daily"] = \
np.array(inputdata["SpecialInputData"]["hotwater_demand_daily"][room_name]["洗面"])
if "シャワー" in inputdata["SpecialInputData"]["hotwater_demand_daily"][room_name]:
inputdata["HotwaterRoom"][room_name]["hotwater_demand_shower_daily"] = \
np.array(inputdata["SpecialInputData"]["hotwater_demand_daily"][room_name]["シャワー"])
if "厨房" in inputdata["SpecialInputData"]["hotwater_demand_daily"][room_name]:
inputdata["HotwaterRoom"][room_name]["hotwater_demand_kitchen_daily"] = \
np.array(inputdata["SpecialInputData"]["hotwater_demand_daily"][room_name]["厨房"])
if "その他" in inputdata["SpecialInputData"]["hotwater_demand_daily"][room_name]:
inputdata["HotwaterRoom"][room_name]["hotwater_demand_other_daily"] = \
np.array(inputdata["SpecialInputData"]["hotwater_demand_daily"][room_name]["その他"])
# 合計を更新
inputdata["HotwaterRoom"][room_name]["hotwater_demand_daily"] = \
np.array(inputdata["HotwaterRoom"][room_name]["hotwater_demand_washroom_daily"]) + \
np.array(inputdata["HotwaterRoom"][room_name]["hotwater_demand_shower_daily"]) + \
np.array(inputdata["HotwaterRoom"][room_name]["hotwater_demand_kitchen_daily"]) + \
np.array(inputdata["HotwaterRoom"][room_name]["hotwater_demand_other_daily"])
if DEBUG:
print(f'室名称 {room_name}')
print(f' - 給湯使用量参照値 L/day {inputdata["HotwaterRoom"][room_name]["hotwater_demand"]}')
print(f' - 給湯日数 {np.sum(inputdata["HotwaterRoom"][room_name]["hotwaterSchedule"])}')
print(f' - 日別給湯使用量 {np.sum(inputdata["HotwaterRoom"][room_name]["hotwater_demand_daily"])}')
print(f' - 日別給湯使用量(手洗い) {np.sum(inputdata["HotwaterRoom"][room_name]["hotwater_demand_washroom_daily"])}')
print(f' - 日別給湯使用量(シャワー) {np.sum(inputdata["HotwaterRoom"][room_name]["hotwater_demand_shower_daily"])}')
print(f' - 日別給湯使用量(厨房) {np.sum(inputdata["HotwaterRoom"][room_name]["hotwater_demand_kitchen_daily"])}')
print(f' - 日別給湯使用量(その他) {np.sum(inputdata["HotwaterRoom"][room_name]["hotwater_demand_other_daily"])}')
# np.savetxt("日別給湯使用量(手洗い)_" + room_name + ".txt", inputdata["HotwaterRoom"][room_name]["hotwater_demand_washroom_daily"])
# np.savetxt("日別給湯使用量(シャワー)_" + room_name + ".txt", inputdata["HotwaterRoom"][room_name]["hotwater_demand_shower_daily"])
# np.savetxt("日別給湯使用量(厨房)_" + room_name + ".txt", inputdata["HotwaterRoom"][room_name]["hotwater_demand_kitchen_daily"])
# np.savetxt("日別給湯使用量(その他)_" + room_name + ".txt", inputdata["HotwaterRoom"][room_name]["hotwater_demand_other_daily"])
#----------------------------------------------------------------------------------
# 解説書 D.5 給湯配管の線熱損失係数
#----------------------------------------------------------------------------------
# 給湯配管の線熱損失係数の読み込み
with open(database_directory + 'ThermalConductivityPiping.json', 'r', encoding='utf-8') as f:
thermal_conductivity_dict = json.load(f)
for unit_name in inputdata["HotwaterSupplySystems"]:
# 接続口径の種類
if inputdata["HotwaterSupplySystems"][unit_name]["PipeSize"] <= 13:
inputdata["HotwaterSupplySystems"][unit_name]["PipeSizeType"] = "13A以下"
elif inputdata["HotwaterSupplySystems"][unit_name]["PipeSize"] <= 20:
inputdata["HotwaterSupplySystems"][unit_name]["PipeSizeType"] = "20A以下"
elif inputdata["HotwaterSupplySystems"][unit_name]["PipeSize"] <= 25:
inputdata["HotwaterSupplySystems"][unit_name]["PipeSizeType"] = "25A以下"
elif inputdata["HotwaterSupplySystems"][unit_name]["PipeSize"] <= 30:
inputdata["HotwaterSupplySystems"][unit_name]["PipeSizeType"] = "30A以下"
elif inputdata["HotwaterSupplySystems"][unit_name]["PipeSize"] <= 40:
inputdata["HotwaterSupplySystems"][unit_name]["PipeSizeType"] = "40A以下"
elif inputdata["HotwaterSupplySystems"][unit_name]["PipeSize"] <= 50:
inputdata["HotwaterSupplySystems"][unit_name]["PipeSizeType"] = "50A以下"
elif inputdata["HotwaterSupplySystems"][unit_name]["PipeSize"] <= 60:
inputdata["HotwaterSupplySystems"][unit_name]["PipeSizeType"] = "60A以下"
elif inputdata["HotwaterSupplySystems"][unit_name]["PipeSize"] <= 75:
inputdata["HotwaterSupplySystems"][unit_name]["PipeSizeType"] = "75A以下"
elif inputdata["HotwaterSupplySystems"][unit_name]["PipeSize"] <= 80:
inputdata["HotwaterSupplySystems"][unit_name]["PipeSizeType"] = "80A以下"
elif inputdata["HotwaterSupplySystems"][unit_name]["PipeSize"] <= 100:
inputdata["HotwaterSupplySystems"][unit_name]["PipeSizeType"] = "100A以下"
elif inputdata["HotwaterSupplySystems"][unit_name]["PipeSize"] <= 125:
inputdata["HotwaterSupplySystems"][unit_name]["PipeSizeType"] = "125A以下"
else:
inputdata["HotwaterSupplySystems"][unit_name]["PipeSizeType"] = "125Aより大きい"
# 線熱損失係数
inputdata["HotwaterSupplySystems"][unit_name]["heatloss_coefficient"] = \
thermal_conductivity_dict[inputdata["HotwaterSupplySystems"][unit_name]["PipeSizeType"]][inputdata["HotwaterSupplySystems"][unit_name]["InsulationType"]]
if DEBUG:
print(f'機器名称 {unit_name}')
print(f' - 配管接続口径 {inputdata["HotwaterSupplySystems"][unit_name]["PipeSizeType"]}')
print(f' - 線熱損失係数 {inputdata["HotwaterSupplySystems"][unit_name]["heatloss_coefficient"]}')
#----------------------------------------------------------------------------------
# 解説書 D.6 日平均給水温度
#----------------------------------------------------------------------------------
# 外気温データ(DAT形式)読み込み <365の行列>
Toa_ave = climate.readDatClimateData(climatedata_directory + "/" +
Area[inputdata["Building"]["Region"]+"地域"]["気象データファイル名(給湯)"])
# 空調運転モード
with open(database_directory + 'ACoperationMode.json', 'r', encoding='utf-8') as f:
ACoperationMode = json.load(f)
# 各日の冷暖房期間の種類(冷房期、暖房期、中間期)(365×1の行列)
ac_mode = ACoperationMode[Area[inputdata["Building"]["Region"]+"地域"]["空調運転モードタイプ"]]
if inputdata["Building"]["Region"] == '1' or inputdata["Building"]["Region"] == '2':
TWdata = 0.6639*Toa_ave + 3.466
elif inputdata["Building"]["Region"] == '3' or inputdata["Building"]["Region"] == '4':
TWdata = 0.6054*Toa_ave + 4.515
elif inputdata["Building"]["Region"] == '5':
TWdata = 0.8660*Toa_ave + 1.665
elif inputdata["Building"]["Region"] == '6':
TWdata = 0.8516*Toa_ave + 2.473
elif inputdata["Building"]["Region"] == '7':
TWdata = 0.9223*Toa_ave + 2.097
elif inputdata["Building"]["Region"] == '8':
TWdata = 0.6921*Toa_ave + 7.167
# if DEBUG:
# np.savetxt("日平均外気温度.txt", Toa_ave)
# np.savetxt("日平均給水温度.txt", TWdata)
#----------------------------------------------------------------------------------
# 解説書 5.2 日積算湯使用量
#----------------------------------------------------------------------------------
# 各室熱源の容量比を求める。
for room_name in inputdata["HotwaterRoom"]:
inputdata["HotwaterRoom"][room_name]["RatedCapacity_All"] = 0
for unit_id, unit_configure in enumerate(inputdata["HotwaterRoom"][room_name]["HotwaterSystem"]):
inputdata["HotwaterRoom"][room_name]["HotwaterSystem"][unit_id]["RatedCapacity_total"] = \
inputdata["HotwaterSupplySystems"][ unit_configure["SystemName"] ]["RatedCapacity_total"]
inputdata["HotwaterRoom"][room_name]["RatedCapacity_All"] += \
inputdata["HotwaterRoom"][room_name]["HotwaterSystem"][unit_id]["RatedCapacity_total"]
for room_name in inputdata["HotwaterRoom"]:
if DEBUG:
print(f'室名称 {room_name}')
for unit_id, unit_configure in enumerate(inputdata["HotwaterRoom"][room_name]["HotwaterSystem"]):
inputdata["HotwaterRoom"][room_name]["HotwaterSystem"][unit_id]["roomPowerRatio"] = \
inputdata["HotwaterRoom"][room_name]["HotwaterSystem"][unit_id]["RatedCapacity_total"] / \
inputdata["HotwaterRoom"][room_name]["RatedCapacity_All"]
if DEBUG:
print(f'機器名称 {unit_id}')
print(f'熱源比率 {inputdata["HotwaterRoom"][room_name]["HotwaterSystem"][unit_id]["roomPowerRatio"]}')
# 給湯対象室rの節湯器具による湯使用量削減効果を加味した日付dにおける室rの日積算湯使用量
for unit_name in inputdata["HotwaterSupplySystems"]:
inputdata["HotwaterSupplySystems"][unit_name]["Qsr_eqp_daily"] = np.zeros(365)
inputdata["HotwaterSupplySystems"][unit_name]["Qs_eqp_daily"] = np.zeros(365)
for room_name in inputdata["HotwaterRoom"]:
for unit_id, unit_configure in enumerate(inputdata["HotwaterRoom"][room_name]["HotwaterSystem"]):
if unit_name == unit_configure["SystemName"]:
# 標準日積算給湯量 [L/day] → 配管長さ算出に必要
inputdata["HotwaterSupplySystems"][unit_name]["Qsr_eqp_daily"] += \
inputdata["HotwaterRoom"][room_name]["hotwater_demand_daily"] * unit_configure["roomPowerRatio"]
# 節湯効果を加味した日積算給湯量 [L/day]
# 係数は 解説書 附属書 D.3 節湯器具による湯使用量削減率
if unit_configure["HotWaterSavingSystem"] == "無":
inputdata["HotwaterSupplySystems"][unit_name]["Qs_eqp_daily"] += \
inputdata["HotwaterRoom"][room_name]["hotwater_demand_washroom_daily"] * 1.0 * unit_configure["roomPowerRatio"] + \
inputdata["HotwaterRoom"][room_name]["hotwater_demand_shower_daily"] * 1.0 * unit_configure["roomPowerRatio"] + \
inputdata["HotwaterRoom"][room_name]["hotwater_demand_kitchen_daily"] * 1.0 * unit_configure["roomPowerRatio"] + \
inputdata["HotwaterRoom"][room_name]["hotwater_demand_other_daily"] * 1.0 * unit_configure["roomPowerRatio"]
elif unit_configure["HotWaterSavingSystem"] == "自動給湯栓":
inputdata["HotwaterSupplySystems"][unit_name]["Qs_eqp_daily"] += \
inputdata["HotwaterRoom"][room_name]["hotwater_demand_washroom_daily"] * 0.6 * unit_configure["roomPowerRatio"] + \
inputdata["HotwaterRoom"][room_name]["hotwater_demand_shower_daily"] * 1.0 * unit_configure["roomPowerRatio"] + \
inputdata["HotwaterRoom"][room_name]["hotwater_demand_kitchen_daily"] * 1.0 * unit_configure["roomPowerRatio"] + \
inputdata["HotwaterRoom"][room_name]["hotwater_demand_other_daily"] * 1.0 * unit_configure["roomPowerRatio"]
elif unit_configure["HotWaterSavingSystem"] == "節湯B1":
inputdata["HotwaterSupplySystems"][unit_name]["Qs_eqp_daily"] += \
inputdata["HotwaterRoom"][room_name]["hotwater_demand_washroom_daily"] * 1.0 * unit_configure["roomPowerRatio"] + \
inputdata["HotwaterRoom"][room_name]["hotwater_demand_shower_daily"] * 0.75 * unit_configure["roomPowerRatio"] + \
inputdata["HotwaterRoom"][room_name]["hotwater_demand_kitchen_daily"] * 1.0 * unit_configure["roomPowerRatio"] + \
inputdata["HotwaterRoom"][room_name]["hotwater_demand_other_daily"] * 1.0 * unit_configure["roomPowerRatio"]
if DEBUG:
print(f'機器名称 {unit_name}')
print(f' - 日積算湯供給量 {np.sum(inputdata["HotwaterSupplySystems"][unit_name]["Qsr_eqp_daily"])}')
print(f' - 日積算湯供給量(節湯込み) {np.sum(inputdata["HotwaterSupplySystems"][unit_name]["Qs_eqp_daily"])}')
# np.savetxt("日積算湯供給量(節湯込み)_" + unit_name + ".txt", inputdata["HotwaterSupplySystems"][unit_name]["Qs_eqp_daily"])
#----------------------------------------------------------------------------------
# 解説書 5.3 配管長さ
#----------------------------------------------------------------------------------
for unit_name in inputdata["HotwaterSupplySystems"]:
inputdata["HotwaterSupplySystems"][unit_name]["L_eqp"] = \
np.max(inputdata["HotwaterSupplySystems"][unit_name]["Qsr_eqp_daily"]) * 7 / 1000
if DEBUG:
print(f'機器名称 {unit_name}')
print(f' - 配管長さ {inputdata["HotwaterSupplySystems"][unit_name]["L_eqp"]}')
#----------------------------------------------------------------------------------
# 解説書 5.4 年間配管熱損失量
#----------------------------------------------------------------------------------
# 室内設定温度
Troom = np.zeros(365)
Taround = np.zeros(365)
for dd in range(0, 365):
if ac_mode[dd] == "冷房":
Troom[dd] = 26
elif ac_mode[dd] == "中間":
Troom[dd] = 24
elif ac_mode[dd] == "暖房":
Troom[dd] = 22
# 配管熱損失 [kJ/day]
for unit_name in inputdata["HotwaterSupplySystems"]:
inputdata["HotwaterSupplySystems"][unit_name]["Qp_eqp"] = np.zeros(365)
for dd in range(0,365):
# デバッグ出力用
Taround[dd] = (Toa_ave[dd]+Troom[dd])/2
if inputdata["HotwaterSupplySystems"][unit_name]["Qs_eqp_daily"][dd] > 0:
inputdata["HotwaterSupplySystems"][unit_name]["Qp_eqp"][dd] = \
inputdata["HotwaterSupplySystems"][unit_name]["L_eqp"] * \
inputdata["HotwaterSupplySystems"][unit_name]["heatloss_coefficient"] * \
( 60 - (Toa_ave[dd]+Troom[dd])/2 ) * 24 * 3600 * 0.001
if DEBUG:
print(f'機器名称 {unit_name}')
print(f' - 配管熱損失 {np.sum(inputdata["HotwaterSupplySystems"][unit_name]["Qp_eqp"])}')
print(f' - 配管熱損失係数 {inputdata["HotwaterSupplySystems"][unit_name]["heatloss_coefficient"]}')
# np.savetxt("配管周囲温度.txt", Taround)
# ----------------------------------------------------------------------------------
# 解説書 5.5 太陽熱利用システムの熱利用量
# ----------------------------------------------------------------------------------
# 日射量の計算
_, _, Iod, Ios, Inn = climate.readHaspClimateData(climatedata_directory + "/C1_" +
Area[inputdata["Building"]["Region"]+"地域"]["気象データファイル名"])
for unit_name in inputdata["HotwaterSupplySystems"]:
# 太陽熱利用量 [KJ/day]
inputdata["HotwaterSupplySystems"][unit_name]["Qs_solargain"] = np.zeros(365)
if inputdata["HotwaterSupplySystems"][unit_name]["SolarSystemArea"] != None:
# 日積算日射量 [Wh/m2/day]
Id, _, Is, _ = climate.solarRadiationByAzimuth( \
inputdata["HotwaterSupplySystems"][unit_name]["SolarSystemDirection"], \
inputdata["HotwaterSupplySystems"][unit_name]["SolarSystemAngle"], \
Area[inputdata["Building"]["Region"]+"地域"]["緯度"], \
Area[inputdata["Building"]["Region"]+"地域"]["経度"], \
Iod, Ios, Inn)
# 太陽熱利用量 [KJ/day]
inputdata["HotwaterSupplySystems"][unit_name]["Qs_solargain"] = \
(inputdata["HotwaterSupplySystems"][unit_name]["SolarSystemArea"]*0.4*0.85)*\
(Id + Is)*3600/1000000 * 1000
if DEBUG:
print(f'機器名称 {unit_name}')
print(f' - 太陽熱利用システムの熱利用量 {np.sum(inputdata["HotwaterSupplySystems"][unit_name]["Qs_solargain"])}')
# np.savetxt("太陽熱利用システムの熱利用量_" + unit_name + ".txt", inputdata["HotwaterSupplySystems"][unit_name]["Qs_solargain"])
#----------------------------------------------------------------------------------
# 解説書 5.6 年間給湯負荷
#----------------------------------------------------------------------------------
for unit_name in inputdata["HotwaterSupplySystems"]:
inputdata["HotwaterSupplySystems"][unit_name]["Qh_eqp_daily"] =
|
np.zeros(365)
|
numpy.zeros
|
# Copyright @2018 The CNN_MonoFusion Authors (NetEaseAI-CVlab).
# All Rights Reserved.
#
# Please cited our paper if you find CNN_MonoFusion useful in your research!
#
# See the License for the specific language governing permissions
# and limitations under the License.
#
from __future__ import print_function
import numpy as np
import tensorflow as tf
import socket
import cv2
import time
import sys
sys.path.append("..")
import os
import argparse
import adenet_def
# socket parameters
address = ('', 6666)
# gpu config
# os.environ["CUDA_VISIBLE_DEVICES"]='1'
# depth parameters
depth_factor = 13107
depth_factor_inv = 1.0/depth_factor
max_depth = 4.5
depth_gradient_thr = 0.2
img_gradient_thr = 10
INPUT_SIZE = '160,240'
height = 160
width = 240
channels = 3
batch_size = 1
black_hole_width = 25
# camera parameters
cx_gt = 492.247
cy_gt = 263.355
focal_scale = 1.0
nyu_focal = 1.49333
# img mean
IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)
model_path = '../models/adenet_merge_nyu_kinect_tum/neair-adenet-final'
def gradient(img_gray):
gx =
|
np.gradient(img_gray, axis=0)
|
numpy.gradient
|
# This is the class created to replace the functionality of PyMPChecker_Brute.py
from __future__ import print_function
import sys
import os
import re
import operator
import ephem # PyEphem module
from multiprocessing import Pool, cpu_count
import pkg_resources
import time
import pandas as pd
import psycopg2
from datetime import datetime
from astropy.coordinates import SkyCoord
from astropy import units as u
import numpy as np
from .utils import *
from .online_search import *
# Following package is to enable database queries
try:
import gotocat as gc
except:
print ('PyMPChecker: ** WARNING ** gotocat is unavailable, database queries will fail')
class Checker:
def __init__(self, interactive=True, verbose=False):
'''
Create the object and determine the list of available data files
'''
self.interactive = interactive
self.verbose = verbose
self.dir = pkg_resources.resource_filename('PyMPChecker', '/Monthly_Orbit_Catalogs/')
all_files = os.listdir(self.dir)
patt = '[0-9]{4}_[0-9]{2}'
self.epoch = [re.search(patt,x).group(0) for x in all_files if re.search(patt,x)]
# Check here if the epoch is too different from the current one
self.month = datetime.now().month
self.year = datetime.now().year
self.epoch_num = np.fromiter(map(lambda x: float(x[0:4])+(float(x[5:7])-1)/12.,
self.epoch), dtype=np.float)
self.yelapsed = min(abs(self.epoch_num-(self.year+(self.month-1)/12.)))
if self.yelapsed > 0.5/12.:
print ('PyMPChecker: ** WARNING ** your ephemeris database is >1 month old, you should update')
# Database parameters (don't connect until you need to)
# These are also only for the phase 1 information; need to update the database
# interaction
self.conn_string = "host='goto-observatory.org' dbname='photometry' user='goto' password='<PASSWORD>'"
self.conn = None
self.observation_month_read = None
self.search_run = False
self.offline_search_run = False
self.online_search_run = False
def cone_search(self, ra_degrees, dec_degrees, obs_time, radius=1, online=False,
_debug=False):
'''
Fundamental search for the package. Will run a search centred around the provided
position, at the provided epoch, and return the results as a pandas table
:param ra_degrees:
:param dec_degrees:
:param obs_time:
:return:
Example shown below; this search should return (325395) 2009 CQ5, although
the offline searched failed on the day
mpc.cone_search(227.68176, 33.36493, '2019-09-17T20:44:00',radius=10.)
'''
self.online_search_run = False
self.offline_search_run = False
self.search_run = False
if online:
# This option uses a lightly edited version of Krzysztof's online
# checker
start_calculate_t = time.time()
# date string decimals on seconds is limited to 6
# search radius is in arcminutes, and smallest allowed value is 1
result = find_asteroid(None, ra_degrees, dec_degrees, datestr=obs_time[:26],
r=max([radius/60,1]),debug=_debug)
if result is not None:
res_table = pd.DataFrame(result,
columns=['name', 'dist', 'RA_deg', 'Dec_deg', 'mag', 'RA_off', 'Dec_off', 'comment'])
if radius < 60.:
res_table = res_table.drop(res_table[res_table['dist'] > radius].index)
self.n_results = len(res_table)
if self.n_results > 0:
self.table = res_table
else:
self.n_results = 0
self.table = pd.DataFrame(columns=['name', 'dist', 'RA_deg', 'Dec_deg', 'mag', 'RA_off', 'Dec_off', 'comment'])
end_calculate_t = time.time()
self.online_search_run = True
else:
# Here we do the offline search
# Position assumed to be in degrees
ra_radians = 0.0174533 * ra_degrees
dec_radians = 0.0174533 * dec_degrees
# Time should be a stamp like '2018-08-06T03:27:05', as read in from the file header; not
# sure the best way to check that it matches that format
year = obs_time[0:4]
month = obs_time[5:7]
day = obs_time[8:10]
hour = obs_time[11:13]
min = obs_time[14:16]
sec = obs_time[17:19]
full_date = year + "/" + month + "/" + day + " " + hour + ":" + min + ":" + sec
observation_month = year + "_" + month
# You don't need to read in the file for this specific month; the filename just tells you when
# the file was created
yelapsed = abs(self.epoch_num-(float(year)+(float(month)-1)/12.))
inearest = np.argmin(yelapsed)
if ((not self.observation_month_read) |
(self.epoch[inearest] != self.observation_month_read)):
# Only read in the catalog list if we haven't already
# pick the latest one available
# self.observation_month_read = sorted(self.epoch)[-1]
self.observation_month_read = self.epoch[inearest]
if yelapsed[inearest] > 0.5/12.:
print ('''
PyMPChecker: ** WARNING ** nearest ephemeris file is >= 1 month away from target date
accuracy may be compromised''')
start_read_t = time.time()
# Turn appropriate orbit datafile into catalog_list.
self.catalog_list = []
# f_catalog = open(self.dir+observation_month + "_ORB.DAT", "r")
f_catalog = open(self.dir+self.observation_month_read + "_ORB.DAT", "r")
for line in f_catalog:
self.catalog_list.append(line.rstrip())
f_catalog.close()
end_read_t = time.time()
self.read_elapsed = end_read_t-start_read_t
start_calculate_t = time.time()
num_jobs = (len(self.catalog_list) // 20000) * (cpu_count())
if self.verbose:
print ("Sorting catalog list of", len(self.catalog_list), "minor planets",)
print ("into", num_jobs, "jobs across", cpu_count(), "processors.")
sub_catalog_list = split_seq(self.catalog_list, num_jobs)
processing_inputs = []
for sublist in sub_catalog_list:
processing_inputs.append([sublist, ra_radians, dec_radians, full_date, radius])
# This is the bit that actually does the work
p = Pool(cpu_count())
result = p.map_async(sep_sort, processing_inputs)
close_bodies = result.get()
# Added to fix issue
# https://github.com/GOTO-OBS/PyMPChecker/issues/5
p.close()
p.join()
recomb_close_bodies = []
for job in range(num_jobs):
for list in range(len(close_bodies[job])):
recomb_close_bodies.append(close_bodies[job][list])
# The result here is a list of tuples, as the example below:
# ('2014 WN13 0', 793.2264337773519, '0:19:25.96', '7:31:27.2', '20.9')
# giving the name (with uncertainty character appended), distance from the provided position
# (arcsec), RA, Dec, and magnitude
self.final_sorted_bodies = sorted(recomb_close_bodies,
key=operator.itemgetter(1))
self.n_results = len(self.final_sorted_bodies)
if self.n_results > 0:
self.table = pd.DataFrame(self.final_sorted_bodies,
columns=['name','dist','RA_str','Dec_str','mag'])
# Convert the table into more useful data formats
self.table['accuracy'] = [x[-1] for x in self.table.name]
self.table.name = [x[:-2] for x in self.table.name]
self.table.dist = pd.to_numeric(self.table.dist)
self.table.mag = pd.to_numeric(self.table.mag)
c = SkyCoord(ra=self.table.RA_str, dec=self.table.Dec_str,
unit=(u.hourangle, u.deg))
self.table['RA_deg'] = c.ra.deg
self.table['Dec_deg'] = c.dec.deg
else:
self.table = pd.DataFrame(columns=['name','dist','RA_str','Dec_str','mag','accuracy','RA_deg','Dec_deg'])
end_calculate_t = time.time()
self.offline_search_run = True
self.search_run = self.offline_search_run or self.online_search_run
self.search_elapsed = end_calculate_t - start_calculate_t
if self.verbose:
if self.n_results > 0:
print('Got {} matches'.format(self.n_results))
else:
print ('Got no matches in given region')
def image_db_search(self, image_id, phase=None, online=False):
'''
This version will search over an image in the database, based on the footprint
:param image_id:
:return:
'''
if phase is None:
print ("PyMPChecker: ** WARNING ** no phase supplied, assuming most recent data")
self.phase = 4
else:
self.phase = phase
g = gc.GOTOdb(phase=self.phase)
# fov = g.query('SELECT fov, date FROM images WHERE id = %s', image_id)
fov = g.query('SELECT fov, '+g.image_date+' FROM '+g.imagedb+' WHERE id = %s', image_id)
self.footprint = np.array([list(i) for i in eval(fov['fov'].values[0])])
# self.obs_date = str(fov['date'].values[0])
self.obs_date = str(fov[g.image_date].values[0])
# if not self.conn:
# self.conn = psycopg2.connect(conn_string)
# self.cursor = self.conn.cursor()
#
# self.comm="""SELECT id,filename,jd,date,target FROM images
# WHERE instrument='UT4' AND filter='L' AND filename LIKE '%median.fits'
# AND target='"""+sel_field+"'"
self.footprint_search(online=online)
def image_search(self, filename, imagetype='IMAGE', online=False):
'''
Check for minor planets within a FITS image
'''
try:
# year, month, day, hour, min, sec, ra_degrees, dec_degrees, sorted_list_radius = extract_fits_params(filename)
obs_date, footprint = extract_fits_params(filename, imagetype=imagetype)
except:
print ("** ERROR ** can't read information from file {}".format(filename))
return
self.obs_date = obs_date
# print (obs_date)
self.footprint = footprint
self.footprint_search(online=online)
def footprint_search(self, online=False):
'''
This routine is used by both the image search routines, since they both rely
on searching within a GOTO footprint (but get the footprint differently)
:return:
'''
if self.verbose:
print ("Running footprint search over footprint\n{}".format(self.footprint))
if ((self.footprint[1,0] > self.footprint[0,0]) |
(self.footprint[2,0] > self.footprint[3,0])):
# The FoV spans the RA = 0 line here, so we need to be a bit more careful
# about how we calculate the centre
# Example is for image #470457 or 470498 (use this for image_db_search)
print ("Special here for crossing RA=0.0")
center = [np.mean(self.footprint[:,0]+[360.0,0.,0.,360.0]),
np.mean(self.footprint[:,1])]
half_size_array = [max(abs(self.footprint[:,0]+[360.0,0.0,0.0,360.0]
-center[0]))*np.cos(
|
np.deg2rad(center[1])
|
numpy.deg2rad
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
"""Usage: burstie.py NAME
[-t T]
[-p P]
[-b B]
[-w W]
[-q Q]
[-s S]
[--seed SEED]
[--dt DT]
[--sigma SIGMA]
Wilcon-Cowan EI model of oscillatory bursting.
Arguments
NAME name of the results file
Options:
-h help show this screen
-t T simultation run time [default: 3.0]
-p P E drive at burst [default: 2]
-b B burst onset time [default: 1]
-w W burst onset length [default: 0.1]
-q Q avg I drive [default: 1]
-s S std dev of window variations [default: 0.1]
--seed SEED random seed
--dt DT time resolution [default: 1e-3]
--sigma SIGMA Population noise [default: 1e-2]
"""
from __future__ import division, print_function
from docopt import docopt
import numpy as np
from pykdf.kdf import save_kdf
from brian2 import *
from fakespikes import rates
def ie(t,
P,
t_burst,
w,
c1=15.0,
c2=15.0,
c3=15.0,
c4=3.0,
Q=1,
dt=1e-3,
min_P=0,
sigma=0.01):
# --
time = t * second
time_step = dt * second
# -
# Fixed parameters.
re = 1.0
ri = 0.5
kn = 1.0
k = 1.0
tau_e = 5 * msecond
tau_i = 10 * msecond
# -
# Define the burst, as part of the drive to E, i.e, variable P.
times = rates.create_times(t, dt)
P = rates.square_pulse(times, P, t_burst, w, dt, min_a=min_P)
# Scale it
P = P * (2** -0.03)
# Format for Brian2
P = TimedArray(P, dt=time_step)
# -
eqs = """
dE/dt = -E/tau_e + ((1 - re * E) * (1 / (1 + exp(-(k * c1 * E - k * c2 * I+ k * P(t) - 2))) - 1/(1 + exp(2*1.0)))) / tau_e + (sigma / tau_e**.5 * xi_e) : 1
dI/dt = -I/tau_i + ((1 - ri * I) * (1 / (1 + exp(-2 * (kn * c3 * E - kn * c4 * I + kn * Q - 2.5))) - 1/(1 + exp(2*2.5)))) / tau_i + (sigma / tau_i**.5 * xi_i) : 1
"""
pops = NeuronGroup(1, model=eqs, namespace={'Q': Q})
pops.E = 0
pops.I = 0
# --
# Record
mon = StateMonitor(pops, ('E', 'I'), record=True)
# --
# Run
defaultclock.dt = time_step
run(time)
return mon.I.flatten(), mon.E.flatten()
if __name__ == "__main__":
args = docopt(__doc__, version='alpha')
try:
seed = int(args['--seed'])
except TypeError:
seed = None
pass
np.random.seed(seed)
# -
# Process params
t = float(args['-t'])
dt = float(args['--dt'])
t_burst = float(args['-b'])
w = float(args['-w'])
Q = float(args['-q'])
P = float(args['-p'])
s = float(args['-s'])
sigma = float(args['--sigma'])
# Only add noise to the window length
if not
|
np.allclose(s, 0)
|
numpy.allclose
|
import librosa
import numpy as np
import moviepy.editor as mpy
import random
import torch
from scipy.misc import toimage
from tqdm import tqdm
from pytorch_pretrained_biggan import (BigGAN, one_hot_from_names, truncated_noise_sample,
save_as_images, display_in_terminal)
import boto3
from botocore.exceptions import ClientError
import logging
from config import *
#set model based on resolution
def model_resolution(resolution = '128'):
"""
set model's resolution, default 128
128, 256, or 512
lower = faster generation, lower quality.
"""
model_name='biggan-deep-' + resolution
model = BigGAN.from_pretrained(model_name)
return model
def song_duration(duration = 30):
"""
Song duration in seconds, returns fram_lim
default = 30 seconds
"""
seconds=duration
frame_lim=int(np.floor(seconds*22050/frame_length/batch_size))
return frame_lim
#set pitch sensitivity
def sensitivity_pitch(pitch_sensitivity=220):
"""
INT
Set how quickly images move according to pitch
Default 220
Recommended range: 200 – 295
"""
pitch_sensitivity=(300-pitch_sensitivity) * 512 / frame_length
return pitch_sensitivity
#set tempo sensitivity
def sensitivity_tempo(tempo_sensitivity=0.25):
"""
FLOAT between 0 and 1
Set how quickly images morph due to tempo
Default 0.25
Recommended range: 0.05 – 0.8
"""
tempo_sensitivity = tempo_sensitivity * frame_length / 512
return tempo_sensitivity
#how much the image morphs between frames
#default .5
#recommended range: 0.05 – 0.8
truncation = 0.5
#can reduce this number to make clearer images or increase to reduce computational load
#default: 512
#range: multiples of 64
frame_length = 512
#BigGAN generates the images in batches of size [batch_size].
#default 32
#only reason to lower this is if you run out of cuda memory. will take slightly longer.
batch_size=32
#set device
#use cuda or face a generation time in the hours. You have been warned.
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#set smooth factor
def smooth_factor(smooth_factor=20):
"""
int > 0
smooths the class vectors to prevent small fluctuations in pitch from causing the frames to go back and forth
default 20
recommended range: 10 – 30
***Possibly change this to a variable.***
"""
if smooth_factor > 1:
smooth_factor=int(smooth_factor * 512 / frame_length)
else:
smooth_factor=smooth_factor
return smooth_factor
#get new jitters
def new_jitters(jitter):
"""
update jitter vector every 100 frames by setting ~half of noise vector units to lower sensitivity
"""
jitters=np.zeros(128)
for j in range(128):
if random.uniform(0,1)<0.5:
jitters[j]=1
else:
jitters[j]=1-jitter
return jitters
#get new update directions
def new_update_dir(nv2,update_dir):
"""
changes the direction of the noise vector
"""
for ni,n in enumerate(nv2):
if n >= 2*truncation - sensitivity_tempo():
update_dir[ni] = -1
elif n < -2*truncation + sensitivity_tempo():
update_dir[ni] = 1
return update_dir
#smooth class vectors
def smooth(class_vectors, smooth_factor = smooth_factor()):
if smooth_factor==1:
return class_vectors
class_vectors_terp=[]
for c in range(int(np.floor(len(class_vectors)/smooth_factor)-1)):
ci=c*smooth_factor
cva=np.mean(class_vectors[int(ci):int(ci)+smooth_factor],axis=0)
cvb=np.mean(class_vectors[int(ci)+smooth_factor:int(ci)+smooth_factor*2],axis=0)
for j in range(smooth_factor):
cvc = cva*(1-j/(smooth_factor-1)) + cvb*(j/(smooth_factor-1))
class_vectors_terp.append(cvc)
return np.array(class_vectors_terp)
#normalize class vector between 0-1
def normalize_cv(cv2):
min_class_val = min(i for i in cv2 if i != 0)
for ci,c in enumerate(cv2):
if c==0:
cv2[ci]=min_class_val
cv2=(cv2-min_class_val)/np.ptp(cv2)
return cv2
#creates the class and noise vectors files
def song_analysis(song = None, classes = None, jitter = 0.5, depth = 1):
"""
Inputs:
song: path of 30 second mp3 file
classes: LIST of classes by index from ImageNet, leave as None for four random classes -max 12 classes
jitter: FLOAT 0 to 1
depth: FLOAT 0 to 1
"""
#read song: audio waveform and sampling rate saved
#y = time, sr = sample rate
y, sr = librosa.load(song)
#create spectrogram
spec = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128,fmax=8000, hop_length=frame_length)
#get mean power at each time point
specm=np.mean(spec,axis=0)
#compute power gradient across time points
gradm=np.gradient(specm)
#set max to 1
gradm=gradm/np.max(gradm)
#set negative gradient time points to zero
gradm = gradm.clip(min=0)
#normalize mean power between 0-1
specm=(specm-np.min(specm))/np.ptp(specm)
#create chromagram of pitches X time points
chroma = librosa.feature.chroma_cqt(y=y, sr=sr, hop_length=frame_length)
#sort pitches by overall power
chromasort=np.argsort(np.mean(chroma,axis=1))[::-1]
#select 4 random classes if no classes given.
if classes == None:
cls1000 = list(range(1000))
random.shuffle(cls1000)
classes = cls1000[:4]
num_classes = len(classes)
print('\nGenerating input vectors \n')
#initialize first class vector
cv1=np.zeros(1000)
for pi,p in enumerate(chromasort[:num_classes]):
if num_classes < 12:
cv1[classes[pi]] = chroma[p][np.min([np.where(chrow>0)[0][0] for chrow in chroma])]
else:
cv1[classes[p]] = chroma[p][np.min([np.where(chrow>0)[0][0] for chrow in chroma])]
#initialize first noise vector
nv1 = truncated_noise_sample(truncation=truncation)[0]
#initialize list of class and noise vectors
class_vectors=[cv1]
noise_vectors=[nv1]
#initialize previous vectors (will be used to track the previous frame)
cvlast=cv1
nvlast=nv1
#initialize the direction of noise vector unit updates
update_dir=np.zeros(128)
for ni,n in enumerate(nv1):
if n<0:
update_dir[ni] = 1
else:
update_dir[ni] = -1
#initialize noise unit update
update_last=np.zeros(128)
for i in tqdm(range(len(gradm))):
#print progress
pass
if i%200==0:
jitters=new_jitters(jitter)
#get last noise vector
nv1=nvlast
#set noise vector update based on direction, sensitivity, jitter, and combination of overall power and gradient of power
update = np.array([sensitivity_tempo() for k in range(128)]) * (gradm[i]+specm[i]) * update_dir * jitters
#smooth the update with the previous update (to avoid overly sharp frame transitions)
update=(update+update_last*3)/4
#set last update
update_last=update
#update noise vector
nv2=nv1+update
#append to noise vectors
noise_vectors.append(nv2)
#set last noise vector
nvlast=nv2
#update the direction of noise units
update_dir=new_update_dir(nv2,update_dir)
#get last class vector
cv1=cvlast
#generate new class vector
cv2=np.zeros(1000)
for j in range(num_classes):
cv2[classes[j]] = (cvlast[classes[j]] +
((chroma[chromasort[j]][i])/(sensitivity_pitch())))/(1+(1/((sensitivity_pitch()))))
#if more than 6 classes, normalize new class vector between 0 and 1, else simply set max class val to 1
if num_classes > 6:
cv2=normalize_cv(cv2)
else:
cv2=cv2/np.max(cv2)
#adjust depth
cv2=cv2*depth
#this prevents rare bugs where all classes are the same value
if np.std(cv2[np.where(cv2!=0)]) < 0.0000001:
cv2[classes[0]]=cv2[classes[0]]+0.01
#append new class vector
class_vectors.append(cv2)
#set last class vector
cvlast=cv2
#interpolate between class vectors of bin size [smooth_factor] to smooth frames
class_vectors=smooth(class_vectors,smooth_factor())
#save record of vectors for current video
np.save('class_vectors.npy',class_vectors)
np.save('noise_vectors.npy',noise_vectors)
return noise_vectors, class_vectors
def generate_images(noise_vectors, class_vectors):
"""
Take vectors from song_analysis and generate images
"""
#convert to Tensor
noise_vectors = torch.Tensor(
|
np.array(noise_vectors)
|
numpy.array
|
import numpy as np
import scipy
import matplotlib.pyplot as plt
import os
import re
from numpy import genfromtxt
import cv2
from sklearn.decomposition import NMF,KernelPCA
from scipy.signal import savgol_filter
from pysptools import eea
class spectroscopic_map:
def __init__(self,ir_file,folder):
self.ir_file=ir_file
self.folder=folder
self.exclude=[]
self.em_defined=False
def match_histrogram(self,array_to_match,array):
coeff=np.std(array)/np.std(array_to_match)
array_to_match=array_to_match*coeff
array_to_match=array_to_match-np.mean(array_to_match)+np.mean(array)
return array_to_match
def crawldir(self,topdir=[], ext='sxm'):
fn = dict()
for root, dirs, files in os.walk(topdir):
for name in files:
if len(re.findall('\.'+ext,name)):
addname = os.path.join(root,name)
if root in fn.keys():
fn[root].append(addname)
else:
fn[root] = [addname]
return fn
def load_data(self,hs_resolution_x=16,hs_resolution_y=16,
exclude=False,hi_res_spatial_x=256,hi_res_spatial_y=256,data_format='new',
limit_range=False,sg=False,limit_maps=False,orig_size_x=512,orig_size_y=512):
self.hi_res_spatial_x=hi_res_spatial_x
self.hi_res_spatial_y=hi_res_spatial_y
if exclude!=False:
self.exclude=exclude
if data_format=='old':
self.ir_data=np.genfromtxt(self.ir_file,skip_header=1,delimiter=',',encoding="utf-8")
self.wavelength_list=
|
np.array([self.ir_data[:,0]],dtype='int')
|
numpy.array
|
import numpy as np
import matplotlib.pyplot as plt
import cv2
import math
from skimage import data
import pandas as pd
from itertools import product
from skimage.feature import greycomatrix, greycoprops
from scipy.stats import wilcoxon
from scipy.stats import binom_test
def normalize(x, scale=255):
x = (((x-x.min())/(x.max()-x.min()))*scale)
return x
class MSMFE:
def __init__(self, ref, imgs=None, vmin=0, vmax=255, nbit=8, ks=5, verbose=False,
features = ['Autocorrelation', 'ClusterProminence', 'ClusterShade', 'ClusterTendency', 'Contrast',
'Correlation', 'DifferenceEntropy', 'DifferenceVariance', 'Energy', 'Entropy',
'Id', 'Idm', 'Idmn', 'Idn', 'Imc1', 'Imc2', 'InverseVariance', 'JointAverage',
'MCC', 'MaximumProbability', 'SumAverage', 'SumEntropy', 'SumSquares']):
if verbose: print('\tInitializing ...')
ref = self.normalize(ref)
if imgs is not None:
self.keys = imgs.keys()
for key in imgs.keys():
imgs[key] = self.normalize(imgs[key])
if verbose: print('\tCreating GLCM(s) ...')
self.vmin = vmin
self.vmax = vmax
self.nbit = nbit
self.ks = ks
self.glcm_ref = self.fast_glcm(ref)
self.glcm_imgs = {}
self.features = features
self.error = {}
self.img_feature_maps = {}
self.feature_maps_ref = self.feature_maps(self.glcm_ref, features)
self.imgs = imgs
self.verbose=verbose
if verbose: print('\tDone creating.')
def get_names(self):
names = list(self.keys) + ['_Reference']
return names
def normalize(self, img, max=1, scale=255):
#Needs max set to one to account for PixelMiner not producing pixels up to 1
img = (img - img.min())/(max-img.min())
img *= scale
#img = img.astype(np.uint8)
return img
def get_feature_maps(self):
if self.imgs is not None:
for key in self.keys:
glcm = self.fast_glcm(self.imgs[key])
self.img_feature_maps[key] = self.feature_maps(glcm, self.features)
self.img_feature_maps['Reference'] = self.feature_maps_ref
return self.img_feature_maps
else:
return self.feature_maps_ref
def get_error(self, return_diff=False):
if self.imgs is not None:
for key in self.keys:
glcm = self.fast_glcm(self.imgs[key])
self.img_feature_maps[key] = self.feature_maps(glcm, self.features)
if return_diff:
diff_df = pd.DataFrame(index=self.keys, columns=self.features)
error_df = pd.DataFrame(index=self.keys, columns=self.features)
for feature in self.features:
#if self.verbose: print('\tDoing feature ...', feature, 'x'+str(len(self.keys)))
for key in self.keys:
#print('\t\t'+key)
#print('\t\t'+str(self.img_feature_maps.keys()))
img = self.img_feature_maps[key][feature]
ref = self.feature_maps_ref[feature]
diff = ref - img
if return_diff:
diff_df.at[key, feature] = diff.mean()
error = ((diff) ** 2).mean()
error_df.at[key, feature] = error
if return_diff:
return error_df, diff_df
else:
return error_df
else:
print('Input needs an image and a reference image to calculate error.')
def get_saliency(self, feature):
saliencies = []
for key in self.keys:
img = self.feature_maps[feature][key]
ref = self.feature_maps_ref[feature]
saliencies.append((ref - img) ** 2)
saliencies = np.asarray(saliencies)
return saliencies
def calculate_matrix(self, img, voxelCoordinates=None):
r"""
Compute GLCMs for the input image for every direction in 3D.
Calculated GLCMs are placed in array P_glcm with shape (i/j, a)
i/j = total gray-level bins for image array,
a = directions in 3D (generated by imageoperations.generateAngles)
"""
quant = normalize(img, scale=self.nbit).astype(np.int8)
degrees = [0, np.pi/4, np.pi/2, (3*np.pi)]
distance = [1]
P_glcm = greycomatrix(quant, distance, degrees, levels=self.nbit)
P_glcm = np.moveaxis(P_glcm, -2, 0)
P_glcm = P_glcm.astype(np.float32)
sumP_glcm = np.sum(P_glcm, (1, 2)).astype(np.float32)
sumP_glcm[sumP_glcm == 0] = np.nan
P_glcm /= sumP_glcm[:, None, None, :]
P_glcm = np.moveaxis(P_glcm, -1, 0).squeeze()
return P_glcm
def fast_glcm(self, img, conv=True, scale=False):
min, max = self.vmin, self.vmax
shape = img.shape
if len(shape) > 2:
print('Shape of', shape, 'is invalid, images must be 2d.')
return
h,w = img.shape
# digitize
bins = np.linspace(min, max, self.nbit+1)[1:]
#print('Bins:', bins)
gl = np.digitize(img, bins) - 1
gl.shape
#print('Unique:', np.unique(gl))
#print('GL:', gl.min(), gl.max())
shifts = np.zeros((4, h, w))
shifts[0] = np.append( gl[:, 1:], gl[:, -1:], axis=1) # one
shifts[1] = np.append( gl[1:, :], gl[-1:, :], axis=0) # two
shifts[2] = np.append(shifts[0][1:, :], shifts[0][-1:, :], axis=0) # three
shifts[3] = np.append(shifts[0][:1, :], shifts[0][:-1, :], axis=0) # four
#plt.imshow(gl)
#plt.show()
#plt.imshow(shifts[0])
#plt.show()
# make glcm
glcm = np.zeros((4, self.nbit, self.nbit, h, w), dtype=np.uint8)
for n, shift in enumerate(shifts):
for i in range(self.nbit):
for j in range(self.nbit):
mask = ((gl==i) & (shift==j))
glcm[n, i, j, mask] = 1
if conv:
kernel = np.ones((self.ks, self.ks), dtype=np.uint8)
for i in range(self.nbit):
for j in range(self.nbit):
glcm[n, i, j] = cv2.filter2D(glcm[n, i, j], -1, kernel)
glcm = glcm.astype(np.float32)
if scale:
matrix = self.calculate_matrix(img)
#matrix = glcm.sum((3, 4))
#print('SHAPE OF THE SCIKIT IMAGE MATRIX:', matrix.shape)
glcm = matrix[:, :, :, None, None] * glcm
#for direction in range(4):
# matrix[direction] = self.normalize(matrix[direction], scale=1)
glcm = np.moveaxis(glcm, 0, -1)
return glcm
def get_means(self, img, glcm):
h,w = img.shape
mean_i = np.zeros((h,w), dtype=np.float32)
for i in range(self.nbit):
for j in range(self.nbit):
mean_i += glcm[i,j] * i / (self.nbit)**2
mean_j = np.zeros((h,w), dtype=np.float32)
for j in range(self.nbit):
for i in range(self.nbit):
mean_j += glcm[i,j] * j / (self.nbit)**2
return mean_i, mean_j
def get_stds(self, img, glcm):
h,w = img.shape
mean_i, mean_j = self.get_means(img, glcm)
std_i = np.zeros((h,w), dtype=np.float32)
for i in range(self.nbit):
for j in range(self.nbit):
std_i += (glcm[i,j] * i - mean_i)**2
std_i = np.sqrt(std_i)
std_j = np.zeros((h,w), dtype=np.float32)
for j in range(self.nbit):
for i in range(self.nbit):
std_j += (glcm[i,j] * j - mean_j)**2
std_j = np.sqrt(std_j)
return mean_i, mean_j, std_i, std_j
def get_max(self, glcm):
max_ = np.max(glcm, axis=(0,1))
return(max_)
def feature_maps(self, glcm, features):
glcm = normalize(glcm, scale=2)
#h, w = glcm.shape[-3], glcm.shape[-2]
#glcm *= 16
#print('GLCM:', glcm.min(), glcm.max())
'''
for q in range(4):
count = 1
for o in range(8):
for p in range(8):
plt.xticks([])
plt.yticks([])
plt.subplot(8, 8, count)
test = glcm[o, p, :, :, q]
plt.imshow(test, vmax=25)
count+=1
plt.show()
'''
eps = np.spacing(1)
bitVector = np.arange(0,self.nbit,1)
i, j = np.meshgrid(bitVector, bitVector, indexing='ij', sparse=True)
iAddj = i + j
iSubj = np.abs(i-j)
ux = i[:, :, None, None, None] * glcm
uy = j[:, :, None, None, None] * glcm
#print('UX, UY:', ux.shape, uy.shape, ux.min(), ux.max())
'''
for q in range(4):
count = 1
for o in range(8):
for p in range(8):
plt.xticks([])
plt.yticks([])
plt.subplot(8, 8, count)
test = ux[o, p, :, :, q]
plt.imshow(test, vmax=25)
count+=1
plt.show()
'''
px = np.sum(glcm, 1)
px = px[:, None, :, :, :]
py = np.sum(glcm, 0)
py = py[None, :, :, :, :]
#for m in range(4):
# #plt.subplot(2,2,m+1)
# plt.title(str(ux[:, :, m].min()) + ' ' + str(ux [:, :, m].max()))
# plt.imshow(ux[:, :, m])
# plt.show()
ux = np.sum((i[:, :, None, None, None] * glcm), (0, 1))
ux = normalize(ux, scale=self.nbit)
uy = np.sum((j[:, :, None, None, None] * glcm), (0, 1))
uy = normalize(uy, scale=self.nbit)
'''
print()
print('GLCM stuff:')
print(glcm.min(), glcm.max())
print()
print('IJ Stuff:')
print(i[:, :, None, None, None].shape)
print(j[:, :, None, None, None].shape)
print()
print('U stuff:')
print(ux.shape)
print(uy.shape)
for n in range(4):
plt.title('ux')
plt.imshow(ux[:, :, n])
plt.show()
'''
kValuesSum = np.arange(0, (self.nbit * 2)-1, dtype='float')
#kValuesSum = np.arange(2, (self.nbit * 2) + 1, dtype='float')
kDiagIntensity = np.array([iAddj == k for k in kValuesSum])
GLCMDiagIntensity = np.array([kDiagIntensity[int(k)][:, :, None, None, None] * glcm for k in kValuesSum])
pxAddy = np.sum(GLCMDiagIntensity, (1, 2))
kValuesDiff = np.arange(0, self.nbit, dtype='float')
#kValuesDiff = np.arange(0, self.nbit, dtype='float')
kDiagContrast = np.array([iSubj == k for k in kValuesDiff])
GLCMDiagIntensity = np.array([kDiagContrast[int(k)][:, :, None, None, None] * glcm for k in kValuesDiff])
pxSuby = np.sum(GLCMDiagIntensity, (1, 2))
HXY = (-1) * np.sum((glcm * np.log2(glcm + eps)), (0, 1))
features_dict = {}
if 'Autocorrelation' in features:
ac = np.sum(glcm * (i * j)[:, :, None, None, None], (0, 1))
features_dict['Autocorrelation'] = np.nanmean(ac, -1)
if 'ClusterProminence' in features:
cp = np.sum((glcm * (((i + j)[:, :, None, None, None] - ux - uy) ** 4)), (0, 1))
features_dict['ClusterProminence'] = np.nanmean(cp, -1)
if 'ClusterShade' in features:
cs = np.sum((glcm * (((i + j)[:, :, None, None, None] - ux - uy) ** 3)), (0, 1))
features_dict['ClusterShade'] = np.nanmean(cs, -1)
if 'ClusterTendency' in features:
ct = np.sum((glcm * (((i + j)[:, :, None, None, None] - ux - uy) ** 2)), (0, 1))
features_dict['ClusterTendency'] = np.nanmean(ct, -1)
if 'Contrast' in features:
cont = np.sum((glcm * ((np.abs(i - j))[:, :, None, None, None] ** 2)), (0, 1))
features_dict['Contrast'] = np.nanmean(cont, -1)
if 'Correlation' in features:
# shape = (Nv, 1, 1, angles)
sigx = np.sum(glcm * ((i[:, :, None, None, None] - ux) ** 2), (0, 1), keepdims=True) ** 0.5
# shape = (Nv, 1, 1, angles)
sigy = np.sum(glcm * ((j[:, :, None, None, None] - uy) ** 2), (0, 1), keepdims=True) ** 0.5
corm = np.sum(glcm * (i[:, :, None, None, None] - ux) * (j[:, :, None, None, None] - uy), (0, 1), keepdims=True)
corr = corm / (sigx * sigy + eps)
corr[sigx * sigy == 0] = 1 # Set elements that would be divided by 0 to 1.
features_dict['Correlation'] = np.nanmean(corr, (0, 1, -1))
if 'DifferenceAverage' in features:
features_dict['DifferenceAverage'] = np.sum((kValuesDiff[:, None, None, None] * pxSuby), (0, -1))
if 'DifferenceEntropy' in features:
features_dict['DifferenceEntropy'] = (-1) * np.sum((pxSuby * np.log2(pxSuby + eps)), (0, -1))
if 'DifferenceVariance' in features:
diffavg = np.sum((kValuesDiff[:, None, None, None] * pxSuby), 0, keepdims=True)
diffvar = np.sum((pxSuby * ((kValuesDiff[:, None, None, None] - diffavg) ** 2)), (0, -1))
features_dict['DifferenceVariance'] = diffvar
if 'Energy' in features:
sum_squares = np.sum((glcm ** 2), (0, 1))
features_dict['Energy'] = np.nanmean(sum_squares, -1)
if 'Entropy' in features:
features_dict['Entropy'] = np.sum(HXY, -1)
if 'Id' in features:
features_dict['Id'] = np.sum(pxSuby / (1 + kValuesDiff[:, None, None, None]), (0, -1))
if 'Idm' in features:
features_dict['Idm'] = np.sum(pxSuby / (1 + (kValuesDiff[:, None, None, None] ** 2)), (0, -1))
if 'Idmn' in features:
features_dict['Idmn'] = np.sum(pxSuby / (1 + ((kValuesDiff[:, None, None, None] ** 2) / (self.nbit ** 2))), (0,-1))
if 'Idn' in features:
features_dict['Idn'] = np.sum(pxSuby / (1 + (kValuesDiff[:, None, None, None] / self.nbit)), (0, -1))
if 'Imc1' in features:
# entropy of px # shape = (Nv, angles)
HX = (-1) * np.sum((px * np.log2(px + eps)), (0, 1))
# entropy of py # shape = (Nv, angles)
HY = (-1) * np.sum((py * np.log2(py + eps)), (0, 1))
# shape = (Nv, angles)
HXY1 = (-1) * np.sum((glcm * np.log2(px * py + eps)), (0, 1))
div = np.fmax(HX, HY)
imc1 = HXY - HXY1
imc1[div != 0] /= div[div != 0]
imc1[div == 0] = 0 # Set elements that would be divided by 0 to 0
features_dict['Imc1'] = np.nanmean(imc1, -1)
#print('IMC1:', features_dict['Imc1'].shape)
if 'Imc2' in features:
# shape = (Nv, angles)
HXY2 = (-1) * np.sum(((px * py) * np.log2(px * py + eps)), (0, 1))
imc2 = (1 - np.e ** (-2 * (HXY2 - HXY)))
min = imc2.min()
imc2 += np.abs(min)
#print(imc2.min(), imc2.max())
imc2 = imc2 ** 0.5
imc2[HXY2 == HXY] = 0
features_dict['Imc2'] = np.nanmean(imc2, -1)
if 'InverseVariance' in features:
features_dict['InverseVariance'] = np.sum(pxSuby[1:, :, :, :] / kValuesDiff[1:, None, None, None] ** 2, (0, -1)) # Skip k = 0 (division by 0)
if 'JointAverage' in features:
features_dict['JointAverage'] = ux.mean(-1)
if 'MCC' in features:
# Calculate Q (shape (i, i, d)). To prevent division by 0, add epsilon (such a division can occur when in a ROI
# along a certain angle, voxels with gray level i do not have neighbors
nom = glcm[:, :, :, :, :] * glcm[:, :, :, :, :]
den = px[:, 0, :, :, :] * py[:, 0, :, :, :]
den = np.expand_dims(den, 1)
Q = (nom / (den + eps)) # sum over k (4th axis --> index 3)
for gl in range(1, glcm.shape[1]):
Q += ((glcm[:, None, gl, :, :] * glcm[None, :, gl, :, :]) / # slice: v, i, j, k, d
(px[:, None, 0, :, :] * py[None, :, gl, :, :] + eps)) # sum over k (4th axis --> index 3)
#print('Q not Anon', Q.shape)
# calculation of eigenvalues if performed on last 2 dimensions, therefore, move the angles dimension (d) forward
Q_eigenValue = np.linalg.eigvals(Q.transpose((2, 3, 4, 0, 1)))
Q_eigenValue.sort() # sorts along last axis --> eigenvalues, low to high
if Q_eigenValue.shape[3] < 2:
return 1 # flat region
#print(Q_eigenValue.shape)
MCC = np.sqrt(Q_eigenValue[:, :, :,-2]) # 2nd highest eigenvalue
#print(MCC.shape)
features_dict['MCC'] = np.nanmean(MCC, 2).real
if 'MaximumProbability' in features:
maxprob = np.amax(glcm, (0, 1))
features_dict['MaximumProbability'] = np.nanmean(maxprob, -1)
if 'SumAverage' in features:
sumavg = np.sum((kValuesSum[:, None, None, None] * pxAddy), 0)
features_dict['SumAverage'] = np.nanmean(sumavg, -1)
if 'SumEntropy' in features:
sumentr = (-1) * np.sum((pxAddy * np.log2(pxAddy + eps)), 0)
features_dict['SumEntropy'] = np.nanmean(sumentr, -1)
if 'SumSquares' in features:
ix = (i[:, :, None, None, None] - ux) ** 2
ss = np.sum(glcm * ix, (0, 1))
features_dict['SumSquares'] = np.nanmean(ss, -1)
return features_dict
#if __name__ == '__main__':
def stringerate(number, length):
number = str(number)
strlen = len(number)
zeros = "0" * (length - strlen)
return zeros + number
errors = []
for i in range(50):
id = stringerate(i+1, 4)
path = r'H:\Data\W\W_DATA_SET_W{}_tru_one.npy'.format(id)
print(path)
ref = np.load(path)
imgs = {}
imgs['PixelMiner'] = np.load(r'H:\Data\W\W_DATA_SET_W0001_PixelCNN.npy')
imgs['Window Sinc'] =
|
np.load(r'H:\Data\W\W_DATA_SET_W0001_CosineWindowedSinc.npy')
|
numpy.load
|
'''
6 April 2020
Python file for generating anomalies in sample conductivity distributions.
by <NAME> and <NAME>
in collaboration with <NAME> and <NAME>
from Solid State Physics Group
at the University of Manchester
'''
import numpy as np
#import numpy.random as rand
from random import SystemRandom
import numpy.linalg as la
rand = SystemRandom()
def multivariateGaussian(x, mu, sigma, normalised=False):
if normalised:
denominator = 1. / (2 * np.pi * np.sqrt(np.linalg.det(sigma)))
else:
denominator = 1.
x_centred = x - mu
#path = np.einsum_path('ij, jk, ki->i', x_centred, np.linalg.inv(sigma), x_centred.T, optimize='optimal')[0]
numerator = np.exp(-0.5 * np.einsum('ij, jk, ki->i', x_centred, np.linalg.inv(sigma), x_centred.T, optimize='optimal'))
return numerator / denominator
def generateContinuousConductivity(a, centre, number_of_gaussians, mu, sigma, npix, weightGauss=None, pts=None, tri=None):
# array to store permitivity in different square
if centre is None:
centre=[0, 0]
if (number_of_gaussians) == 0:
if pts is not None and tri is not None:
return np.ones((npix, npix)), np.ones(tri.shape[0])
else:
return np.ones((npix, npix)), None
# assumed that background permitivity is 1 (and therefore difference with uniform will be 0)
permSquares = np.zeros((int(npix), int(npix)), dtype='f4')
if pts is not None and tri is not None:
permTri = np.zeros(tri.shape[0], dtype='f4')
# initialises an array to store the coordinates of centres of squares (pixels)
centresSquares = np.empty((npix, npix, 2), dtype='f4')
# initialising the j vector to prepare for tiling
j = np.arange(npix)
# tiling j to itself npix times (makes array shape (npix, npix))
j = np.tile(j, (npix, 1))
# i and j are transposes of each other
i = j
j = j.T
# assigning values to C_sq
centresSquares[i, j, :] = np.transpose([a / 2 * ((2 * i + 1) / npix - 1) + centre[0], a / 2 * ((2 * j + 1) / npix - 1) + centre[1]])
if pts is not None and tri is not None:
centresTriangles = np.mean(pts[tri], axis=1)
centresSquares = centresSquares.reshape((npix * npix, 2))
if weightGauss is None:
weightGauss = rand.uniform(size=(number_of_gaussians,), low=0., high=0.1)
for i in range(number_of_gaussians):
if type(weightGauss) is np.ndarray:
weight = weightGauss[i]
elif type(weightGauss) is float:
weight = weightGauss
else:
raise TypeError("weight is not float or array of floats")
permSquares[:] += (weight/number_of_gaussians) * multivariateGaussian(centresSquares, mu[i], sigma[i]).reshape(npix, npix)
if pts is not None and tri is not None:
permTri[:] += (weight/number_of_gaussians) * multivariateGaussian(centresTriangles, mu[i], sigma[i])
if pts is not None and tri is not None:
if (np.abs(permSquares) < 5e-2).any():
a = np.random.randint(low = 4, high = 14) * 0.1
permSquares += a
permTri += a
'''
fig, ax = plt.subplots(figsize=(6, 4))
im = ax.imshow(np.real(permSquares) - np.ones((npix, npix)), interpolation='none', cmap=plt.cm.viridis, origin='lower', extent=[-1,1,-1,1])
fig.colorbar(im)
ax.axis('equal')
#plt.show
'''
if pts is not None and tri is not None:
return permSquares, permTri
else:
return permSquares, None
def randomiseGaussianParams(a, centre, npix):
if centre is None:
centre = [0, 0]
# randomise parameters of gaussians
number_of_gaussians = rand.randint(low=0, high=5)
if (number_of_gaussians) == 0:
return 0, 0, 0
mu = np.empty(shape=(number_of_gaussians, 2))
mu[:, 0] = rand.normal(size=(number_of_gaussians), loc=0, scale=0.5)
mu[:, 1] = rand.normal(size=(number_of_gaussians), loc=0, scale=0.5)
sigma = np.empty(shape=(number_of_gaussians, 2, 2))
sigma[:, 0, 0] = rand.uniform(size=(number_of_gaussians), low = 0.2, high = 5.)
sigma[:, 1, 1] = rand.uniform(size=(number_of_gaussians), low = 0.2, high = 5.)
sigma[:, 1, 0] = rand.uniform(size=(number_of_gaussians), low = -np.sqrt(sigma[:,0,0]*sigma[:,1,1]), high = np.sqrt(sigma[:,0,0]*sigma[:,1,1]))
sigma[:, 0, 1] = sigma[:, 1, 0]
return number_of_gaussians, mu, sigma
def randomiseGaussianParam(a=2., centre=None, npix=64):
if centre is None:
centre = [0, 0]
# randomise parameters of gaussians
mu = np.empty(shape=(1, 2))
mu[:, 0] = rand.uniform(-1, 1)
mu[:, 1] = rand.uniform(-1, 1)
#mu[:, 0] = rand.normal(size=(number_of_gaussians), loc=0, scale=0.5)
#mu[:, 1] = rand.normal(size=(number_of_gaussians), loc=0, scale=0.5)
sigma = np.empty(shape=(1, 2, 2))
sigma[:, 0, 0] = rand.uniform(0.08, 0.8)
sigma[:, 1, 1] = rand.uniform(0.08, 0.8)
sigma[:, 1, 0] = rand.uniform(-np.sqrt(sigma[:,0,0]*sigma[:,1,1]), np.sqrt(sigma[:,0,0]*sigma[:,1,1]))
sigma[:, 0, 1] = sigma[:, 1, 0]
return mu, sigma
def triangle_area(x, y):
'''
function that area given 2d coordinates of all vertices of triangle
takes:
x - array storing the x-coordinates of all vertices [3, 1] float
y - array storing the y-coordinates of all vertices [3, 1] float
returns:
area of the triangle
'''
return 0.5 * np.absolute(x[0] * (y[1] - y[2]) + x[1] * (y[2] - y[0]) + x[2] * (y[0] - y[1]))
def generate_examplary_output(a, npix, anomaly, centre=None):
'''
a function that generates true conductivity map to be used in training of U-net
takes:
a - side of square - float
npix - number of pixels along each axis - int
anomaly - dictionary of anomalies characteristics
centre - centre of coordinate system - array shape (2,)
returns:
true conductivity distribution - array shape (npix, npix)
'''
if centre is None:
centre = [0, 0]
# array to store permitivity in different square
# assumed that background permitivity is 1 (and therefore difference with uniform will be 0)
perm_sq = np.ones((npix, npix), dtype='f4')
# initialises an array to store the coordinates of centres of squares (pixels)
C_sq = np.empty((npix, npix, 2), dtype='f4')
# initialising the j vector to prepare for tiling
j = np.arange(npix)
# tiling j to itself npix times (makes array shape (npix, npix))
j = np.tile(j, (npix, 1))
# i and j are transposes of each other
i = j
j = j.T
# assigning values to C_sq
C_sq[i, j, :] = np.transpose([a / 2 * ((2 * i + 1) / npix - 1) + centre[0], a / 2 * ((2 * j + 1) / npix - 1) + centre[1]])
# return an empty array if there are no anomalies generated
if anomaly is None:
return perm_sq - 1
# putting anomalies on map one by one
for l in range(len(anomaly)):
if anomaly[l]['name'] == 'ellipse':
# check what squares have their centres inside the ellipse and set permittivity values
x = anomaly[l]['x']
y = anomaly[l]['y']
a_ = anomaly[l]['a']
b = anomaly[l]['b']
angle = anomaly[l]['angle']
# equation for a rotated ellipse in 2d cartesians
indices = np.sum(np.power([(np.cos(angle)*(C_sq[:, :, 0] - x) - np.sin(angle) * (C_sq[:, : , 1] - y))/a_,
(np.sin(angle)*(C_sq[:, :, 0] - x) + np.cos(angle) * (C_sq[:, : , 1] - y))/b], 2),
axis=0) < 1
# setting relative permittivity values
perm_sq[indices] = anomaly[l]['perm']
# check what squares are crossed by the line element and set their permittivity values to zero
elif anomaly[l]['name'] == 'line':
x = anomaly[l]['x'] # of centre
y = anomaly[l]['y']
theta = anomaly[l]['angle_line']
length = anomaly[l]['len']
# coordinates of endpoints of the line
p_start = np.array([x + (length * np.cos(theta))/2, y + (length * np.sin(theta))/2])
p_end = np.array([x - (length * np.cos(theta))/2, y - (length * np.sin(theta))/2])
# find min and max x and y for any coordinates, so we have lower left and upper right corners of rectangle, whose diagonal is our line
x_min_max = np.sort([x + (length * np.cos(theta))/2, x - (length * np.cos(theta))/2])
y_min_max = np.sort([y + (length * np.sin(theta))/2, y - (length * np.sin(theta))/2])
# checking whether pixel is in that rectangle by setting a limit on x and y of its centre
if abs(y_min_max[0] - y_min_max[1]) < a / (npix/4):
# the loop increases the allowed distances from the line if line is very close to horizontal
index_1 = (C_sq[:,:,0] > x_min_max[0]) * (C_sq[:,:,0] < x_min_max[1]) * (C_sq[:,:,1] > y_min_max[0] - a / (npix * np.sqrt(2))) * (C_sq[:,:,1] < y_min_max[1] + a / (npix * np.sqrt(2)))
elif abs(x_min_max[0] - x_min_max[1]) < a / (npix/4):
# the loop increases the allowed distances from the line if line is very close to vertical
index_1 = (C_sq[:,:,0] > x_min_max[0] - a / (npix/4)) * (C_sq[:,:,0] < x_min_max[1] + a / (npix/4)) * (C_sq[:,:,1] > y_min_max[0]) * (C_sq[:,:,1] < y_min_max[1])
else:
index_1 = (C_sq[:,:,0] > x_min_max[0]) * (C_sq[:,:,0] < x_min_max[1]) * (C_sq[:,:,1] > y_min_max[0]) * (C_sq[:,:,1] < y_min_max[1])
# checking whether distance from the centre to the line is smaller than the diagonal of the square
indices = (np.absolute(np.cross(p_end - p_start,
np.array([p_start[0] - C_sq[:, :, 0], p_start[1] - C_sq[:, :, 1]]).T)
/ la.norm(p_end - p_start))
< a / (npix * np.sqrt(2)))
indices =
|
np.transpose(indices)
|
numpy.transpose
|
# -*- coding: utf-8 -*-
# Copyright 2018-2022 the orix developers
#
# This file is part of orix.
#
# orix is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# orix is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with orix. If not, see <http://www.gnu.org/licenses/>.
from typing import List, Tuple
import numpy as np
from scipy.spatial import cKDTree
def _get_start_and_end_index(
number_of_steps: int,
include_start: bool,
include_end: bool,
positive_and_negative: bool,
) -> Tuple[int, int]:
if positive_and_negative:
start = -number_of_steps
else:
start = 0
if not include_start:
start = start + 1
end = number_of_steps
if include_end:
end = end + 1
return start, end
def _number_of_equidistant_steps(resolution: float, length: float) -> int:
maximum_grid_spacing = np.tan(np.deg2rad(resolution))
return int(np.ceil(length / maximum_grid_spacing))
def _sample_length_equidistant(
number_of_steps: int,
length: float,
include_start: bool = True,
include_end: bool = False,
positive_and_negative: bool = True,
) -> np.ndarray:
start_index, end_index = _get_start_and_end_index(
number_of_steps,
include_start,
include_end,
positive_and_negative,
)
grid_spacing = length / number_of_steps
grid_on_edge = np.arange(start_index, end_index) * grid_spacing
return grid_on_edge
def _number_of_equiangular_steps(resolution: float, length: float) -> int:
total_angle = np.arctan(length)
return int(np.ceil(total_angle / np.deg2rad(resolution)))
def _sample_length_equiangular(
number_of_steps: int,
length: float,
include_start: bool = True,
include_end: bool = False,
positive_and_negative: bool = True,
) -> np.ndarray:
total_angle = np.arctan(length)
start_index, end_index = _get_start_and_end_index(
number_of_steps,
include_start,
include_end,
positive_and_negative,
)
linear_grid = np.arange(start_index, end_index)
angular_increment = total_angle / number_of_steps
grid_on_edge = np.tan(linear_grid * angular_increment)
return grid_on_edge
def _edge_grid_normalized_cube(resolution: float) -> np.ndarray:
center_of_face_to_edge = 1.0
number_of_steps = _number_of_equidistant_steps(resolution, center_of_face_to_edge)
return _sample_length_equidistant(number_of_steps, center_of_face_to_edge)
def _edge_grid_spherified_edge_cube(resolution: float) -> np.ndarray:
center_of_face_to_edge = 1.0
number_of_steps = _number_of_equiangular_steps(resolution, center_of_face_to_edge)
return _sample_length_equiangular(number_of_steps, center_of_face_to_edge)
def _edge_grid_spherified_corner_cube(resolution: float) -> np.ndarray:
center_of_face_to_corner = np.sqrt(2)
number_of_steps = _number_of_equiangular_steps(resolution, center_of_face_to_corner)
grid_on_diagonal = _sample_length_equiangular(
number_of_steps, center_of_face_to_corner
)
return grid_on_diagonal / center_of_face_to_corner
def _compose_from_faces(
corners: np.ndarray,
faces: List[Tuple[int, int, int]],
n: int,
) -> np.ndarray:
"""Refine a grid starting from a platonic solid; adapted from meshzoo.
Parameters
----------
corners
Coordinates of vertices for starting shape. Shape of the array should
be (N, 3).
faces
Each tuple in the list corresponds to the vertex indices making
up a triangular face of the mesh.
n
Number of times the mesh is refined.
Returns
-------
vertices
The coordinates of the refined mesh vertices, an array of shape (N, 3).
References
----------
:cite:`meshzoo`
"""
# create corner nodes
vertices = [corners]
# create edges
edges = set()
for face in faces:
edges.add(tuple(sorted([face[0], face[1]])))
edges.add(tuple(sorted([face[1], face[2]])))
edges.add(tuple(sorted([face[2], face[0]])))
# create edge nodes:
t = np.linspace(1 / n, 1.0, n - 1, endpoint=False)
for edge in edges:
i0, i1 = edge
new_vertices = np.outer(1 - t, corners[i0]) + np.outer(t, corners[i1])
vertices.append(new_vertices)
for face in faces:
face_corner_indices = face
bary = (
np.hstack(
[[np.full(n - i - 1, i), np.arange(1, n - i)] for i in range(1, n)]
)
/ n
)
bary = np.array([1.0 - bary[0] - bary[1], bary[1], bary[0]])
corner_verts = np.array([corners[i] for i in face_corner_indices])
vertices_cart =
|
np.dot(corner_verts.T, bary)
|
numpy.dot
|
import numpy as np
# http://www.netlib.org/templates/matlab/
nx = 8
A = np.zeros((nx*nx,nx*nx))
def inDomain(i,j):
if i < 0 or j < 0 or i >= nx or j >= nx:
return 0
return 1
div = np.zeros((nx,nx))
b = np.zeros((nx * nx))
div[3,1] = 1
div[4,1] = 1
for i in range(nx):
for j in range(nx):
idx = j * nx + i
b[idx] = div[i,j]
for k in range(nx*nx):
A[k,k] = 4
i = int(k % nx)
j = int(k / nx)
if inDomain(i-1, j):
A[k,k-1] = -1
if inDomain(i+1, j):
A[k,k+1] = -1
if inDomain(i, j-1):
A[k,k-nx] = -1
if inDomain(i, j+1):
A[k,k+nx] = -1
x = np.zeros((nx * nx))
p = np.zeros((nx * nx)) # 方向
res = b - np.dot(A,x) # 残差
bnrm2 =
|
np.linalg.norm(b)
|
numpy.linalg.norm
|
import os
import h5py
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore
from .area_mask import MaskAssemble
from .find_center import find_center
from .pyqtgraph_mod import LineROI
from .file_reader import read_raw_file
pg.setConfigOptions(imageAxisOrder='row-major')
class SimpleMask(object):
def __init__(self, pg_hdl, infobar):
self.data_raw = None
self.shape = None
self.qmap = None
self.mask = None
self.mask_kernel = None
self.saxs_lin = None
self.saxs_log = None
self.saxs_log_min = None
self.plot_log = True
self.new_partition = None
self.meta = None
self.hdl = pg_hdl
self.infobar = infobar
self.extent = None
self.hdl.scene.sigMouseMoved.connect(self.show_location)
self.bad_pixel_set = set()
self.qrings = []
self.idx_map = {
0: "scattering",
1: "scattering * mask",
2: "mask",
3: "dqmap_partition",
4: "sqmap_partition",
5: "preview"
}
def is_ready(self):
if self.meta is None or self.data_raw is None:
return False
else:
return True
def find_center(self):
if self.saxs_lin is None:
return
mask = self.mask
# center = (self.meta['bcy'], self.meta['bcx'])
center = find_center(self.saxs_lin, mask=mask, center_guess=None,
scale='log')
return center
def mask_evaluate(self, target, **kwargs):
msg = self.mask_kernel.evaluate(target, **kwargs)
# preview the mask
mask = self.mask_kernel.get_one_mask(target)
self.data_raw[5][:, :] = mask
return msg
def mask_apply(self, target):
mask = self.mask_kernel.get_one_mask(target)
self.mask_kernel.enable(target)
self.mask = np.logical_and(self.mask, mask)
self.data_raw[1] = self.saxs_log * self.mask
self.data_raw[2] = self.mask
if target == "mask_qring":
self.qrings = self.mask_kernel.workers[target].get_qrings()
if self.plot_log:
log_min = np.min(self.saxs_log[self.mask > 0])
self.data_raw[1][np.logical_not(self.mask)] = log_min
else:
lin_min = np.min(self.saxs_lin[self.mask > 0])
self.data_raw[1][np.logical_not(self.mask)] = lin_min
def get_pts_with_similar_intensity(self, cen=None, radius=50,
variation=50):
vmin = max(0, int(cen[0] - radius))
vmax = min(self.shape[0], int(cen[0] + radius))
hmin = max(0, int(cen[1] - radius))
hmax = min(self.shape[1], int(cen[1] + radius))
crop = self.saxs_lin[vmin:vmax, hmin:hmax]
val = self.saxs_lin[cen]
idx = np.abs(crop - val) <= variation / 100.0 * val
pos = np.array(np.nonzero(idx))
pos[0] += vmin
pos[1] += hmin
pos = np.roll(pos, shift=1, axis=0)
return pos.T
def save_partition(self, save_fname):
# if no partition is computed yet
if self.new_partition is None:
return
with h5py.File(save_fname, 'w') as hf:
if '/data' in hf:
del hf['/data']
data = hf.create_group('data')
data.create_dataset('mask', data=self.mask)
for key, val in self.new_partition.items():
data.create_dataset(key, data=val)
# directories that remain the same
dt = h5py.vlen_dtype(np.dtype('int32'))
version = data.create_dataset('Version', (1,), dtype=dt)
version[0] = [5]
maps = data.create_group("Maps")
dt = h5py.special_dtype(vlen=str)
map1name = maps.create_dataset('map1name', (1,), dtype=dt)
map1name[0] = 'q'
map2name = maps.create_dataset('map2name', (1,), dtype=dt)
map2name[0] = 'phi'
empty_arr = np.array([])
maps.create_dataset('q', data=self.qmap['q'])
maps.create_dataset('phi', data=self.qmap['phi'])
maps.create_dataset('x', data=empty_arr)
maps.create_dataset('y', data=empty_arr)
for key, val in self.meta.items():
if key in ['datetime', 'energy', 'det_dist', 'pix_dim', 'bcx',
'bcy', 'saxs']:
continue
val = np.array(val)
if val.size == 1:
val = val.reshape(1, 1)
data.create_dataset(key, data=val)
print('partition map is saved')
# generate 2d saxs
def read_data(self, fname=None, **kwargs):
reader = read_raw_file(fname)
saxs = reader.get_scattering(**kwargs)
if saxs is None:
print('cannot read the scattering data from raw data file.')
return
self.meta = reader.load_meta()
# keep same
self.data_raw = np.zeros(shape=(6, *saxs.shape))
self.mask = np.ones(saxs.shape, dtype=np.bool)
saxs_nonzero = saxs[saxs > 0]
# use percentile instead of min to be robust
self.saxs_lin_min = np.percentile(saxs_nonzero, 1)
self.saxs_log_min = np.log10(self.saxs_lin_min)
self.saxs_lin = saxs.astype(np.float32)
self.min_val = np.min(saxs[saxs > 0])
self.saxs_log = np.log10(saxs + self.min_val)
self.shape = self.data_raw[0].shape
# reset the qrings after data loading
self.qrings = []
self.qmap = self.compute_qmap()
self.mask_kernel = MaskAssemble(self.shape, self.saxs_log)
self.mask_kernel.update_qmap(self.qmap)
self.extent = self.compute_extent()
# self.meta['saxs'] = saxs
self.data_raw[0] = self.saxs_log
self.data_raw[1] = self.saxs_log * self.mask
self.data_raw[2] = self.mask
def compute_qmap(self):
k0 = 2 * np.pi / (12.398 / self.meta['energy'])
v = np.arange(self.shape[0], dtype=np.uint32) - self.meta['bcy']
h = np.arange(self.shape[1], dtype=np.uint32) - self.meta['bcx']
vg, hg = np.meshgrid(v, h, indexing='ij')
r = np.sqrt(vg * vg + hg * hg) * self.meta['pix_dim']
# phi = np.arctan2(vg, hg)
# to be compatible with matlab xpcs-gui; phi = 0 starts at 6 clock
# and it goes clockwise;
phi = np.arctan2(hg, vg)
phi[phi < 0] = phi[phi < 0] + np.pi * 2.0
phi = np.max(phi) - phi # make it clockwise
alpha = np.arctan(r / self.meta['det_dist'])
qr = np.sin(alpha) * k0
qr = 2 * np.sin(alpha / 2) * k0
qx = qr * np.cos(phi)
qy = qr * np.sin(phi)
phi = np.rad2deg(phi)
# keep phi and q as np.float64 to keep the precision.
qmap = {
'phi': phi,
'alpha': alpha.astype(np.float32),
'q': qr,
'qx': qx.astype(np.float32),
'qy': qy.astype(np.float32)
}
return qmap
def get_qp_value(self, x, y):
x = int(x)
y = int(y)
shape = self.qmap['q'].shape
if 0 <= x < shape[1] and 0 <= y < shape[0]:
return self.qmap['q'][y, x], self.qmap['phi'][y, x]
else:
return None, None
def compute_extent(self):
k0 = 2 * np.pi / (12.3980 / self.meta['energy'])
x_range = np.array([0, self.shape[1]]) - self.meta['bcx']
y_range = np.array([-self.shape[0], 0]) + self.meta['bcy']
x_range = x_range * self.meta['pix_dim'] / self.meta['det_dist'] * k0
y_range = y_range * self.meta['pix_dim'] / self.meta['det_dist'] * k0
# the extent for matplotlib imshow is:
# self._extent = xmin, xmax, ymin, ymax = extent
# convert to a tuple of 4 elements;
return (*x_range, *y_range)
def show_location(self, pos):
if not self.hdl.scene.itemsBoundingRect().contains(pos) or \
self.shape is None:
return
shape = self.shape
mouse_point = self.hdl.getView().mapSceneToView(pos)
col = int(mouse_point.x())
row = int(mouse_point.y())
if col < 0 or col >= shape[1]:
return
if row < 0 or row >= shape[0]:
return
qx = self.qmap['qx'][row, col]
qy = self.qmap['qy'][row, col]
phi = self.qmap['phi'][row, col]
val = self.data_raw[self.hdl.currentIndex][row, col]
# msg = f'{self.idx_map[self.hdl.currentIndex]}: ' + \
msg = f'[x={col:4d}, y={row:4d}, ' + \
f'qx={qx:.3e}Å⁻¹, qy={qy:.3e}Å⁻¹, phi={phi:.1f}deg], ' + \
f'val={val:.03e}'
self.infobar.clear()
self.infobar.setText(msg)
return None
def show_saxs(self, cmap='jet', log=True, invert=False,
plot_center=True, plot_index=0, **kwargs):
if self.meta is None or self.data_raw is None:
return
# self.hdl.reset_limits()
self.hdl.clear()
# self.data = np.copy(self.data_raw)
# print('show_saxs', np.min(self.data[1]))
center = (self.meta['bcx'], self.meta['bcy'])
self.plot_log = log
if not log:
self.data_raw[0] = self.saxs_lin
else:
self.data_raw[0] = self.saxs_log
# if invert:
# temp = np.max(self.data[0]) - self.data[0]
# self.data[0] = temp
self.hdl.setImage(self.data_raw)
self.hdl.adjust_viewbox()
self.hdl.set_colormap(cmap)
# plot center
if plot_center:
t = pg.ScatterPlotItem()
t.addPoints(x=[center[0]], y=[center[1]], symbol='+', size=15)
self.hdl.add_item(t, label='center')
self.hdl.setCurrentIndex(plot_index)
return
def apply_drawing(self):
if self.meta is None or self.data_raw is None:
return
ones = np.ones(self.data_raw[0].shape, dtype=np.bool)
mask_n = np.zeros_like(ones, dtype=np.bool)
mask_e = np.zeros_like(mask_n)
mask_i = np.zeros_like(mask_n)
for k, x in self.hdl.roi.items():
if not k.startswith('roi_'):
continue
mask_temp = np.zeros_like(ones, dtype=np.bool)
# return slice and transfrom
sl, _ = x.getArraySlice(self.data_raw[1], self.hdl.imageItem)
y = x.getArrayRegion(ones, self.hdl.imageItem)
# sometimes the roi size returned from getArraySlice and
# getArrayRegion are different;
nz_idx = np.nonzero(y)
h_beg = np.min(nz_idx[1])
h_end = np.max(nz_idx[1])
v_beg = np.min(nz_idx[0])
v_end = np.max(nz_idx[0])
sl_v = slice(sl[0].start, sl[0].start + v_end - v_beg + 1)
sl_h = slice(sl[1].start, sl[1].start + h_end - h_beg + 1)
mask_temp[sl_v, sl_h] = y[v_beg:v_end + 1, h_beg: h_end + 1]
if x.sl_mode == 'exclusive':
mask_e[mask_temp] = 1
elif x.sl_mode == 'inclusive':
mask_i[mask_temp] = 1
self.hdl.remove_rois(filter_str='roi_')
if np.sum(mask_i) == 0:
mask_i = 1
mask_p = np.logical_not(mask_e) * mask_i
return mask_p
def add_drawing(self, num_edges=None, radius=60, color='r',
sl_type='Polygon', width=3, sl_mode='exclusive',
second_point=None, label=None, movable=True):
# label: label of roi; default is None, which is for roi-draw
if label is not None and label in self.hdl.roi:
self.hdl.remove_item(label)
# cen = (shape[1] // 2, shape[2] // 2)
cen = (self.meta['bcx'], self.meta['bcy'])
if sl_mode == 'inclusive':
pen = pg.mkPen(color=color, width=width, style=QtCore.Qt.DotLine)
else:
pen = pg.mkPen(color=color, width=width)
handle_pen = pg.mkPen(color=color, width=width)
kwargs = {
'pen': pen,
'removable': True,
'hoverPen': pen,
'handlePen': handle_pen,
'movable': movable
}
if sl_type == 'Ellipse':
new_roi = pg.EllipseROI(cen, [60, 80], **kwargs)
# add scale handle
new_roi.addScaleHandle([0.5, 0], [0.5, 1], )
new_roi.addScaleHandle([0.5, 1], [0.5, 0])
new_roi.addScaleHandle([0, 0.5], [1, 0.5])
new_roi.addScaleHandle([1, 0.5], [0, 0.5])
elif sl_type == 'Circle':
if second_point is not None:
radius = np.sqrt((second_point[1] - cen[1]) ** 2 +
(second_point[0] - cen[0]) ** 2)
new_roi = pg.CircleROI(pos=[cen[0] - radius, cen[1] - radius],
radius=radius,
**kwargs)
elif sl_type == 'Polygon':
if num_edges is None:
num_edges = np.random.random_integers(6, 10)
# add angle offset so that the new rois don't overlap with each
# other
offset = np.random.random_integers(0, 359)
theta = np.linspace(0, np.pi * 2, num_edges + 1) + offset
x = radius * np.cos(theta) + cen[0]
y = radius * np.sin(theta) + cen[1]
pts = np.vstack([x, y]).T
new_roi = pg.PolyLineROI(pts, closed=True, **kwargs)
elif sl_type == 'Rectangle':
new_roi = pg.RectROI(cen, [30, 150], **kwargs)
new_roi.addScaleHandle([0, 0], [1, 1])
# new_roi.addRotateHandle([0, 1], [0.5, 0.5])
elif sl_type == 'Line':
if second_point is None:
return
width = kwargs.pop('width', 1)
new_roi = LineROI(cen, second_point, width,
**kwargs)
else:
raise TypeError('type not implemented. %s' % sl_type)
new_roi.sl_mode = sl_mode
roi_key = self.hdl.add_item(new_roi, label)
new_roi.sigRemoveRequested.connect(lambda: self.remove_roi(roi_key))
return new_roi
def get_qring_values(self):
result = {}
cen = (self.meta['bcx'], self.meta['bcy'])
for key in ['qring_qmin', 'qring_qmax']:
if key in self.hdl.roi:
x = tuple(self.hdl.roi[key].state['size'])[0] / 2.0 + cen[0]
value, _ = self.get_qp_value(x, cen[1])
else:
value = None
result[key] = value
for key in ['qring_pmin', 'qring_pmax']:
if key in self.hdl.roi:
value = self.hdl.roi[key].state['angle']
value = value - 90
value = value - np.floor(value / 360.0) * 360.0
else:
value = None
result[key] = value
return result
def remove_roi(self, roi_key):
self.hdl.remove_item(roi_key)
def get_partition(self, qnum, pnum, qmin=None, qmax=None, pmin=None,
pmax=None, style='linear'):
mask = self.mask
qmap = self.qmap['q'] * mask
pmap_org = self.qmap['phi'] * mask
if qmin is None or qmax is None:
qmin = np.min(self.qmap['q'][mask > 0])
qmax =
|
np.max(self.qmap['q'][mask > 0])
|
numpy.max
|
# -*- coding: utf-8 -*-
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Useful functions."""
import fractions
import numbers
from functools import lru_cache
import numpy as np
from mindquantum.config.config import _GLOBAL_MAT_VALUE
from .type_value_check import (
_check_input_type,
_check_int_type,
_check_value_should_between_close_set,
_check_value_should_not_less,
)
__all__ = ['random_circuit', 'mod', 'normalize', 'random_state']
def random_circuit(n_qubits, gate_num, sd_rate=0.5, ctrl_rate=0.2, seed=None):
"""
Generate a random circuit.
Args:
n_qubits (int): Number of qubits of random circuit.
gate_num (int): Number of gates in random circuit.
sd_rate (float): The rate of single qubit gate and double qubits gates.
ctrl_rate (float): The possibility that a gate has a control qubit.
seed (int): Random seed to generate random circuit.
Examples:
>>> from mindquantum.utils import random_circuit
>>> random_circuit(3, 4, 0.5, 0.5, 100)
q1: ──Z────RX(0.944)────────●────────RX(-0.858)──
│ │ │ │
q2: ──●────────●────────RZ(-2.42)────────●───────
"""
import mindquantum.core.gates as gates
from mindquantum import Circuit
_check_int_type('n_qubits', n_qubits)
_check_int_type('gate_num', gate_num)
_check_input_type('sd_rate', float, sd_rate)
_check_input_type('ctrl_rate', float, ctrl_rate)
if seed is None:
seed = np.random.randint(1, 2**23)
_check_int_type('seed', seed)
_check_value_should_not_less('n_qubits', 1, n_qubits)
_check_value_should_not_less('gate_num', 1, gate_num)
_check_value_should_between_close_set('sd_rate', 0, 1, sd_rate)
_check_value_should_between_close_set('ctrl_rate', 0, 1, ctrl_rate)
_check_value_should_between_close_set('seed', 0, 2**32 - 1, seed)
if n_qubits == 1:
sd_rate = 1
ctrl_rate = 0
single = {
'param': [gates.RX, gates.RY, gates.RZ, gates.PhaseShift],
'non_param': [gates.X, gates.Y, gates.Z, gates.H],
}
double = {'param': [gates.XX, gates.YY, gates.ZZ], 'non_param': [gates.SWAP]}
c = Circuit()
np.random.seed(seed)
qubits = range(n_qubits)
for _ in range(gate_num):
if n_qubits == 1:
q1, q2 = int(qubits[0]), None
else:
q1, q2 = np.random.choice(qubits, 2, replace=False)
q1, q2 = int(q1), int(q2)
if np.random.random() < sd_rate:
if np.random.random() > ctrl_rate:
q2 = None
if np.random.random() < 0.5:
gate = np.random.choice(single['param'])
p = np.random.uniform(-np.pi * 2, np.pi * 2)
c += gate(p).on(q1, q2)
else:
gate = np.random.choice(single['non_param'])
c += gate.on(q1, q2)
else:
if np.random.random() < 0.75:
gate = np.random.choice(double['param'])
p = np.random.uniform(-np.pi * 2, np.pi * 2)
c += gate(p).on([q1, q2])
else:
gate =
|
np.random.choice(double['non_param'])
|
numpy.random.choice
|
import numpy as np
import numba
import scipy.optimize as sopt
import json
sin = np.sin
cos = np.cos
atan2 = np.arctan2
sqrt = np.sqrt
sign = np.sign
exp = np.exp
class onehalf_class:
def __init__(self):
self.t_end = 10.000000
self.Dt = 0.0010000
self.decimation = 10.000000
self.itol = 1e-6
self.Dt_max = 0.001000
self.Dt_min = 0.001000
self.solvern = 5
self.imax = 100
self.N_x = 21
self.N_y = 33
self.N_z = 10
self.N_store = 10000
self.params_list = ['S_base', 'g_01_02', 'b_01_02', 'bs_01_02', 'g_02_03', 'b_02_03', 'bs_02_03', 'U_01_n', 'U_02_n', 'U_03_n', 'S_n_01', 'Omega_b_01', 'H_01', 'T1d0_01', 'T1q0_01', 'X_d_01', 'X_q_01', 'X1d_01', 'X1q_01', 'D_01', 'R_a_01', 'K_delta_01', 'K_sec_01', 'K_a_01', 'K_ai_01', 'T_r_01', 'V_min_01', 'V_max_01', 'K_aw_01', 'Droop_01', 'T_gov_1_01', 'T_gov_2_01', 'T_gov_3_01', 'K_imw_01', 'omega_ref_01', 'T_wo_01', 'T_1_01', 'T_2_01', 'K_stab_01', 'V_lim_01', 'S_n_03', 'Omega_b_03', 'K_p_03', 'T_p_03', 'K_q_03', 'T_q_03', 'X_v_03', 'R_v_03', 'R_s_03', 'C_u_03', 'K_u_0_03', 'K_u_max_03', 'V_u_min_03', 'V_u_max_03', 'R_uc_03', 'K_h_03', 'R_lim_03', 'V_u_lt_03', 'V_u_ht_03', 'Droop_03', 'DB_03', 'T_cur_03', 'R_lim_max_03', 'K_fpfr_03', 'P_f_min_03', 'P_f_max_03', 'K_p_pll_03', 'K_i_pll_03', 'K_speed_03', 'K_p_agc', 'K_i_agc']
self.params_values_list = [100000.0, 64.70588235294117, -258.8235294117647, 0.0, 12.131762250617438, -7.801776366956552, 0.0, 400.0, 400.0, 400.0, 10000000.0, 314.1592653589793, 6.5, 8.0, 0.4, 1.8, 1.7, 0.3, 0.55, 1.0, 0.0025, 0.001, 0.0, 300, 1e-06, 0.02, -10000.0, 5.0, 10, 0.05, 1.0, 2.0, 10.0, 0.01, 1.0, 10.0, 0.1, 0.1, 1.0, 0.1, 20000.0, 314.1592653589793, 0.01, 0.1, 0.1, 0.1, 0.1, 0.01, 0.02, 5.0, 0.005, 0.1, 80, 160, 0.1, 1.0, 0.2, 85, 155, 0.05, 0.001, 10.0, 100.0, 0.0, -1.0, 1.0, 126.0, 3948.0, 1.0, 0.01, 0.01]
self.inputs_ini_list = ['P_01', 'Q_01', 'P_02', 'Q_02', 'P_03', 'Q_03', 'v_ref_01', 'v_pss_01', 'p_c_01', 'p_r_01', 'q_s_ref_03', 'v_u_ref_03', 'omega_ref_03', 'p_gin_0_03', 'p_g_ref_03', 'ramp_p_gin_03']
self.inputs_ini_values_list = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.03, 0.0, 0.778, 0.0, 0.0, 126.0, 1.0, 0.6, 0.4, 0.0]
self.inputs_run_list = ['P_01', 'Q_01', 'P_02', 'Q_02', 'P_03', 'Q_03', 'v_ref_01', 'v_pss_01', 'p_c_01', 'p_r_01', 'q_s_ref_03', 'v_u_ref_03', 'omega_ref_03', 'p_gin_0_03', 'p_g_ref_03', 'ramp_p_gin_03']
self.inputs_run_values_list = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.03, 0.0, 0.778, 0.0, 0.0, 126.0, 1.0, 0.6, 0.4, 0.0]
self.outputs_list = ['V_01', 'V_02', 'V_03', 'p_e_01', 'p_gin_03', 'p_g_ref_03', 'p_l_03', 'soc_03', 'p_fpfr_03', 'p_f_sat_03']
self.x_list = ['delta_01', 'omega_01', 'e1q_01', 'e1d_01', 'v_c_01', 'xi_v_01', 'x_gov_1_01', 'x_gov_2_01', 'xi_imw_01', 'x_wo_01', 'x_lead_01', 'delta_03', 'xi_p_03', 'xi_q_03', 'e_u_03', 'p_ghr_03', 'k_cur_03', 'inc_p_gin_03', 'theta_pll_03', 'xi_pll_03', 'xi_freq']
self.y_run_list = ['V_01', 'theta_01', 'V_02', 'theta_02', 'V_03', 'theta_03', 'i_d_01', 'i_q_01', 'p_g_01', 'q_g_01', 'v_f_01', 'p_m_ref_01', 'p_m_01', 'z_wo_01', 'v_pss_01', 'omega_03', 'e_qv_03', 'i_d_03', 'i_q_03', 'p_s_03', 'q_s_03', 'p_m_03', 'p_t_03', 'p_u_03', 'v_u_03', 'k_u_03', 'k_cur_sat_03', 'p_gou_03', 'p_f_03', 'r_lim_03', 'omega_pll_03', 'omega_coi', 'p_agc']
self.xy_list = self.x_list + self.y_run_list
self.y_ini_list = ['V_01', 'theta_01', 'V_02', 'theta_02', 'V_03', 'theta_03', 'i_d_01', 'i_q_01', 'p_g_01', 'q_g_01', 'v_f_01', 'p_m_ref_01', 'p_m_01', 'z_wo_01', 'v_pss_01', 'omega_03', 'e_qv_03', 'i_d_03', 'i_q_03', 'p_s_03', 'q_s_03', 'p_m_03', 'p_t_03', 'p_u_03', 'v_u_03', 'k_u_03', 'k_cur_sat_03', 'p_gou_03', 'p_f_03', 'r_lim_03', 'omega_pll_03', 'omega_coi', 'p_agc']
self.xy_ini_list = self.x_list + self.y_ini_list
self.t = 0.0
self.it = 0
self.it_store = 0
self.xy_prev = np.zeros((self.N_x+self.N_y,1))
self.initialization_tol = 1e-6
self.N_u = len(self.inputs_run_list)
self.sopt_root_method='hybr'
self.sopt_root_jac=True
self.u_ini_list = self.inputs_ini_list
self.u_ini_values_list = self.inputs_ini_values_list
self.u_run_list = self.inputs_run_list
self.u_run_values_list = self.inputs_run_values_list
self.N_u = len(self.u_run_list)
Fx_ini_rows,Fx_ini_cols,Fy_ini_rows,Fy_ini_cols,Gx_ini_rows,Gx_ini_cols,Gy_ini_rows,Gy_ini_cols = nonzeros()
self.Fx_ini_rows = np.array(Fx_ini_rows)
if len(Fx_ini_rows) == 1:
self.Fx_ini_rows = np.array([[Fx_ini_rows]]).reshape(1,)
self.Fx_ini_cols = np.array([[Fx_ini_cols]]).reshape(1,)
self.Fx_ini_cols = np.array(Fx_ini_cols)
self.Fy_ini_rows = np.array(Fy_ini_rows)
self.Fy_ini_cols = np.array(Fy_ini_cols)
self.Gx_ini_rows = np.array(Gx_ini_rows)
self.Gx_ini_cols = np.array(Gx_ini_cols)
self.Gy_ini_rows = np.array(Gy_ini_rows)
self.Gy_ini_cols = np.array(Gy_ini_cols)
self.yini2urun = list(set(self.inputs_run_list).intersection(set(self.y_ini_list)))
self.uini2yrun = list(set(self.y_run_list).intersection(set(self.inputs_ini_list)))
self.update()
def update(self):
self.N_steps = int(np.ceil(self.t_end/self.Dt))
dt = [
('t_end', np.float64),
('Dt', np.float64),
('decimation', np.float64),
('itol', np.float64),
('Dt_max', np.float64),
('Dt_min', np.float64),
('solvern', np.int64),
('imax', np.int64),
('N_steps', np.int64),
('N_store', np.int64),
('N_x', np.int64),
('N_y', np.int64),
('N_z', np.int64),
('t', np.float64),
('it', np.int64),
('it_store', np.int64),
('idx', np.int64),
('idy', np.int64),
('f', np.float64, (self.N_x,1)),
('x', np.float64, (self.N_x,1)),
('x_0', np.float64, (self.N_x,1)),
('g', np.float64, (self.N_y,1)),
('y_run', np.float64, (self.N_y,1)),
('y_ini', np.float64, (self.N_y,1)),
('u_run', np.float64, (self.N_u,1)),
('y_0', np.float64, (self.N_y,1)),
('h', np.float64, (self.N_z,1)),
('Fx', np.float64, (self.N_x,self.N_x)),
('Fy', np.float64, (self.N_x,self.N_y)),
('Gx', np.float64, (self.N_y,self.N_x)),
('Gy', np.float64, (self.N_y,self.N_y)),
('Fu', np.float64, (self.N_x,self.N_u)),
('Gu', np.float64, (self.N_y,self.N_u)),
('Hx', np.float64, (self.N_z,self.N_x)),
('Hy', np.float64, (self.N_z,self.N_y)),
('Hu', np.float64, (self.N_z,self.N_u)),
('Fx_ini', np.float64, (self.N_x,self.N_x)),
('Fy_ini', np.float64, (self.N_x,self.N_y)),
('Gx_ini', np.float64, (self.N_y,self.N_x)),
('Gy_ini', np.float64, (self.N_y,self.N_y)),
('T', np.float64, (self.N_store+1,1)),
('X', np.float64, (self.N_store+1,self.N_x)),
('Y', np.float64, (self.N_store+1,self.N_y)),
('Z', np.float64, (self.N_store+1,self.N_z)),
('iters', np.float64, (self.N_store+1,1)),
('store', np.int64),
('Fx_ini_rows', np.int64, self.Fx_ini_rows.shape),
('Fx_ini_cols', np.int64, self.Fx_ini_cols.shape),
('Fy_ini_rows', np.int64, self.Fy_ini_rows.shape),
('Fy_ini_cols', np.int64, self.Fy_ini_cols.shape),
('Gx_ini_rows', np.int64, self.Gx_ini_rows.shape),
('Gx_ini_cols', np.int64, self.Gx_ini_cols.shape),
('Gy_ini_rows', np.int64, self.Gy_ini_rows.shape),
('Gy_ini_cols', np.int64, self.Gy_ini_cols.shape),
('Ac_ini', np.float64, ((self.N_x+self.N_y,self.N_x+self.N_y))),
('fg', np.float64, ((self.N_x+self.N_y,1))),
]
values = [
self.t_end,
self.Dt,
self.decimation,
self.itol,
self.Dt_max,
self.Dt_min,
self.solvern,
self.imax,
self.N_steps,
self.N_store,
self.N_x,
self.N_y,
self.N_z,
self.t,
self.it,
self.it_store,
0, # idx
0, # idy
np.zeros((self.N_x,1)), # f
np.zeros((self.N_x,1)), # x
np.zeros((self.N_x,1)), # x_0
np.zeros((self.N_y,1)), # g
np.zeros((self.N_y,1)), # y_run
np.zeros((self.N_y,1)), # y_ini
np.zeros((self.N_u,1)), # u_run
np.zeros((self.N_y,1)), # y_0
np.zeros((self.N_z,1)), # h
np.zeros((self.N_x,self.N_x)), # Fx
np.zeros((self.N_x,self.N_y)), # Fy
np.zeros((self.N_y,self.N_x)), # Gx
np.zeros((self.N_y,self.N_y)), # Fy
np.zeros((self.N_x,self.N_u)), # Fu
np.zeros((self.N_y,self.N_u)), # Gu
np.zeros((self.N_z,self.N_x)), # Hx
np.zeros((self.N_z,self.N_y)), # Hy
np.zeros((self.N_z,self.N_u)), # Hu
np.zeros((self.N_x,self.N_x)), # Fx_ini
np.zeros((self.N_x,self.N_y)), # Fy_ini
np.zeros((self.N_y,self.N_x)), # Gx_ini
np.zeros((self.N_y,self.N_y)), # Fy_ini
np.zeros((self.N_store+1,1)), # T
np.zeros((self.N_store+1,self.N_x)), # X
np.zeros((self.N_store+1,self.N_y)), # Y
np.zeros((self.N_store+1,self.N_z)), # Z
np.zeros((self.N_store+1,1)), # iters
1,
self.Fx_ini_rows,
self.Fx_ini_cols,
self.Fy_ini_rows,
self.Fy_ini_cols,
self.Gx_ini_rows,
self.Gx_ini_cols,
self.Gy_ini_rows,
self.Gy_ini_cols,
np.zeros((self.N_x+self.N_y,self.N_x+self.N_y)),
np.zeros((self.N_x+self.N_y,1)),
]
dt += [(item,np.float64) for item in self.params_list]
values += [item for item in self.params_values_list]
for item_id,item_val in zip(self.inputs_ini_list,self.inputs_ini_values_list):
if item_id in self.inputs_run_list: continue
dt += [(item_id,np.float64)]
values += [item_val]
dt += [(item,np.float64) for item in self.inputs_run_list]
values += [item for item in self.inputs_run_values_list]
self.struct = np.rec.array([tuple(values)], dtype=np.dtype(dt))
xy0 = np.zeros((self.N_x+self.N_y,))
self.ini_dae_jacobian_nn(xy0)
self.run_dae_jacobian_nn(xy0)
def load_params(self,data_input):
if type(data_input) == str:
json_file = data_input
self.json_file = json_file
self.json_data = open(json_file).read().replace("'",'"')
data = json.loads(self.json_data)
elif type(data_input) == dict:
data = data_input
self.data = data
for item in self.data:
self.struct[0][item] = self.data[item]
if item in self.params_list:
self.params_values_list[self.params_list.index(item)] = self.data[item]
elif item in self.inputs_ini_list:
self.inputs_ini_values_list[self.inputs_ini_list.index(item)] = self.data[item]
elif item in self.inputs_run_list:
self.inputs_run_values_list[self.inputs_run_list.index(item)] = self.data[item]
else:
print(f'parameter or input {item} not found')
def save_params(self,file_name = 'parameters.json'):
params_dict = {}
for item in self.params_list:
params_dict.update({item:self.get_value(item)})
params_dict_str = json.dumps(params_dict, indent=4)
with open(file_name,'w') as fobj:
fobj.write(params_dict_str)
def save_inputs_ini(self,file_name = 'inputs_ini.json'):
inputs_ini_dict = {}
for item in self.inputs_ini_list:
inputs_ini_dict.update({item:self.get_value(item)})
inputs_ini_dict_str = json.dumps(inputs_ini_dict, indent=4)
with open(file_name,'w') as fobj:
fobj.write(inputs_ini_dict_str)
def ini_problem(self,x):
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_ini[:,0] = x[self.N_x:(self.N_x+self.N_y)]
if self.compile:
ini(self.struct,2)
ini(self.struct,3)
else:
ini.py_func(self.struct,2)
ini.py_func(self.struct,3)
fg = np.vstack((self.struct[0].f,self.struct[0].g))[:,0]
return fg
def run_problem(self,x):
t = self.struct[0].t
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_run[:,0] = x[self.N_x:(self.N_x+self.N_y)]
if self.compile:
run(t,self.struct,2)
run(t,self.struct,3)
run(t,self.struct,10)
run(t,self.struct,11)
run(t,self.struct,12)
run(t,self.struct,13)
else:
run.py_func(t,self.struct,2)
run.py_func(t,self.struct,3)
run.py_func(t,self.struct,10)
run.py_func(t,self.struct,11)
run.py_func(t,self.struct,12)
run.py_func(t,self.struct,13)
fg = np.vstack((self.struct[0].f,self.struct[0].g))[:,0]
return fg
def run_dae_jacobian(self,x):
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_run[:,0] = x[self.N_x:(self.N_x+self.N_y)]
run(0.0,self.struct,10)
run(0.0,self.struct,11)
run(0.0,self.struct,12)
run(0.0,self.struct,13)
A_c = np.block([[self.struct[0].Fx,self.struct[0].Fy],
[self.struct[0].Gx,self.struct[0].Gy]])
return A_c
def run_dae_jacobian_nn(self,x):
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_run[:,0] = x[self.N_x:(self.N_x+self.N_y)]
run_nn(0.0,self.struct,10)
run_nn(0.0,self.struct,11)
run_nn(0.0,self.struct,12)
run_nn(0.0,self.struct,13)
def eval_jacobians(self):
run(0.0,self.struct,10)
run(0.0,self.struct,11)
run(0.0,self.struct,12)
return 1
def ini_dae_jacobian(self,x):
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_ini[:,0] = x[self.N_x:(self.N_x+self.N_y)]
if self.compile:
ini(self.struct,10)
ini(self.struct,11)
else:
ini.py_func(self.struct,10)
ini.py_func(self.struct,11)
A_c = np.block([[self.struct[0].Fx_ini,self.struct[0].Fy_ini],
[self.struct[0].Gx_ini,self.struct[0].Gy_ini]])
return A_c
def ini_dae_jacobian_nn(self,x):
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_ini[:,0] = x[self.N_x:(self.N_x+self.N_y)]
ini_nn(self.struct,10)
ini_nn(self.struct,11)
def f_ode(self,x):
self.struct[0].x[:,0] = x
run(self.struct,1)
return self.struct[0].f[:,0]
def f_odeint(self,x,t):
self.struct[0].x[:,0] = x
run(self.struct,1)
return self.struct[0].f[:,0]
def f_ivp(self,t,x):
self.struct[0].x[:,0] = x
run(self.struct,1)
return self.struct[0].f[:,0]
def Fx_ode(self,x):
self.struct[0].x[:,0] = x
run(self.struct,10)
return self.struct[0].Fx
def eval_A(self):
Fx = self.struct[0].Fx
Fy = self.struct[0].Fy
Gx = self.struct[0].Gx
Gy = self.struct[0].Gy
A = Fx - Fy @ np.linalg.solve(Gy,Gx)
self.A = A
return A
def eval_A_ini(self):
Fx = self.struct[0].Fx_ini
Fy = self.struct[0].Fy_ini
Gx = self.struct[0].Gx_ini
Gy = self.struct[0].Gy_ini
A = Fx - Fy @ np.linalg.solve(Gy,Gx)
return A
def reset(self):
for param,param_value in zip(self.params_list,self.params_values_list):
self.struct[0][param] = param_value
for input_name,input_value in zip(self.inputs_ini_list,self.inputs_ini_values_list):
self.struct[0][input_name] = input_value
for input_name,input_value in zip(self.inputs_run_list,self.inputs_run_values_list):
self.struct[0][input_name] = input_value
def simulate(self,events,xy0=0):
# initialize both the ini and the run system
self.initialize(events,xy0=xy0)
# simulation run
for event in events:
# make all the desired changes
self.run([event])
# post process
T,X,Y,Z = self.post()
return T,X,Y,Z
def run(self,events):
# simulation run
for event in events:
# make all the desired changes
for item in event:
self.struct[0][item] = event[item]
daesolver(self.struct) # run until next event
return 1
def rtrun(self,events):
# simulation run
for event in events:
# make all the desired changes
for item in event:
self.struct[0][item] = event[item]
self.struct[0].it_store = self.struct[0].N_store-1
daesolver(self.struct) # run until next event
return 1
def post(self):
# post process result
T = self.struct[0]['T'][:self.struct[0].it_store]
X = self.struct[0]['X'][:self.struct[0].it_store,:]
Y = self.struct[0]['Y'][:self.struct[0].it_store,:]
Z = self.struct[0]['Z'][:self.struct[0].it_store,:]
iters = self.struct[0]['iters'][:self.struct[0].it_store,:]
self.T = T
self.X = X
self.Y = Y
self.Z = Z
self.iters = iters
return T,X,Y,Z
def save_0(self,file_name = 'xy_0.json'):
xy_0_dict = {}
for item in self.x_list:
xy_0_dict.update({item:self.get_value(item)})
for item in self.y_ini_list:
xy_0_dict.update({item:self.get_value(item)})
xy_0_str = json.dumps(xy_0_dict, indent=4)
with open(file_name,'w') as fobj:
fobj.write(xy_0_str)
def load_0(self,file_name = 'xy_0.json'):
with open(file_name) as fobj:
xy_0_str = fobj.read()
xy_0_dict = json.loads(xy_0_str)
for item in xy_0_dict:
if item in self.x_list:
self.xy_prev[self.x_list.index(item)] = xy_0_dict[item]
if item in self.y_ini_list:
self.xy_prev[self.y_ini_list.index(item)+self.N_x] = xy_0_dict[item]
def initialize(self,events=[{}],xy0=0,compile=True):
'''
Parameters
----------
events : dictionary
Dictionary with at least 't_end' and all inputs and parameters
that need to be changed.
xy0 : float or string, optional
0 means all states should be zero as initial guess.
If not zero all the states initial guess are the given input.
If 'prev' it uses the last known initialization result as initial guess.
Returns
-------
T : TYPE
DESCRIPTION.
X : TYPE
DESCRIPTION.
Y : TYPE
DESCRIPTION.
Z : TYPE
DESCRIPTION.
'''
self.compile = compile
# simulation parameters
self.struct[0].it = 0 # set time step to zero
self.struct[0].it_store = 0 # set storage to zero
self.struct[0].t = 0.0 # set time to zero
# initialization
it_event = 0
event = events[it_event]
for item in event:
self.struct[0][item] = event[item]
## compute initial conditions using x and y_ini
if type(xy0) == str:
if xy0 == 'prev':
xy0 = self.xy_prev
else:
self.load_0(xy0)
xy0 = self.xy_prev
elif type(xy0) == dict:
with open('xy_0.json','w') as fobj:
fobj.write(json.dumps(xy0))
self.load_0('xy_0.json')
xy0 = self.xy_prev
else:
if xy0 == 0:
xy0 = np.zeros(self.N_x+self.N_y)
elif xy0 == 1:
xy0 = np.ones(self.N_x+self.N_y)
else:
xy0 = xy0*np.ones(self.N_x+self.N_y)
#xy = sopt.fsolve(self.ini_problem,xy0, jac=self.ini_dae_jacobian )
if self.sopt_root_jac:
sol = sopt.root(self.ini_problem, xy0,
jac=self.ini_dae_jacobian,
method=self.sopt_root_method, tol=self.initialization_tol)
else:
sol = sopt.root(self.ini_problem, xy0, method=self.sopt_root_method)
self.initialization_ok = True
if sol.success == False:
print('initialization not found!')
self.initialization_ok = False
T = self.struct[0]['T'][:self.struct[0].it_store]
X = self.struct[0]['X'][:self.struct[0].it_store,:]
Y = self.struct[0]['Y'][:self.struct[0].it_store,:]
Z = self.struct[0]['Z'][:self.struct[0].it_store,:]
iters = self.struct[0]['iters'][:self.struct[0].it_store,:]
if self.initialization_ok:
xy = sol.x
self.xy_prev = xy
self.struct[0].x[:,0] = xy[0:self.N_x]
self.struct[0].y_run[:,0] = xy[self.N_x:]
## y_ini to u_run
for item in self.inputs_run_list:
if item in self.y_ini_list:
self.struct[0][item] = self.struct[0].y_ini[self.y_ini_list.index(item)]
## u_ini to y_run
for item in self.inputs_ini_list:
if item in self.y_run_list:
self.struct[0].y_run[self.y_run_list.index(item)] = self.struct[0][item]
#xy = sopt.fsolve(self.ini_problem,xy0, jac=self.ini_dae_jacobian )
if self.sopt_root_jac:
sol = sopt.root(self.run_problem, xy0,
jac=self.run_dae_jacobian,
method=self.sopt_root_method, tol=self.initialization_tol)
else:
sol = sopt.root(self.run_problem, xy0, method=self.sopt_root_method)
if self.compile:
# evaluate f and g
run(0.0,self.struct,2)
run(0.0,self.struct,3)
# evaluate run jacobians
run(0.0,self.struct,10)
run(0.0,self.struct,11)
run(0.0,self.struct,12)
run(0.0,self.struct,14)
else:
# evaluate f and g
run.py_func(0.0,self.struct,2)
run.py_func(0.0,self.struct,3)
# evaluate run jacobians
run.py_func(0.0,self.struct,10)
run.py_func(0.0,self.struct,11)
run.py_func(0.0,self.struct,12)
run.py_func(0.0,self.struct,14)
# post process result
T = self.struct[0]['T'][:self.struct[0].it_store]
X = self.struct[0]['X'][:self.struct[0].it_store,:]
Y = self.struct[0]['Y'][:self.struct[0].it_store,:]
Z = self.struct[0]['Z'][:self.struct[0].it_store,:]
iters = self.struct[0]['iters'][:self.struct[0].it_store,:]
self.T = T
self.X = X
self.Y = Y
self.Z = Z
self.iters = iters
return self.initialization_ok
def get_value(self,name):
if name in self.inputs_run_list:
value = self.struct[0][name]
if name in self.x_list:
idx = self.x_list.index(name)
value = self.struct[0].x[idx,0]
if name in self.y_run_list:
idy = self.y_run_list.index(name)
value = self.struct[0].y_run[idy,0]
if name in self.params_list:
value = self.struct[0][name]
if name in self.outputs_list:
value = self.struct[0].h[self.outputs_list.index(name),0]
return value
def get_values(self,name):
if name in self.x_list:
values = self.X[:,self.x_list.index(name)]
if name in self.y_run_list:
values = self.Y[:,self.y_run_list.index(name)]
if name in self.outputs_list:
values = self.Z[:,self.outputs_list.index(name)]
return values
def get_mvalue(self,names):
'''
Parameters
----------
names : list
list of variables names to return each value.
Returns
-------
mvalue : TYPE
list of value of each variable.
'''
mvalue = []
for name in names:
mvalue += [self.get_value(name)]
return mvalue
def set_value(self,name_,value):
if name_ in self.inputs_run_list:
self.struct[0][name_] = value
return
elif name_ in self.params_list:
self.struct[0][name_] = value
return
elif name_ in self.inputs_ini_list:
self.struct[0][name_] = value
return
else:
print(f'Input or parameter {name_} not found.')
def set_values(self,dictionary):
for item in dictionary:
self.set_value(item,dictionary[item])
def report_x(self,value_format='5.2f', decimals=2):
for item in self.x_list:
print(f'{item:5s} = {self.get_value(item):5.{decimals}f}')
def report_y(self,value_format='5.2f', decimals=2):
for item in self.y_run_list:
print(f'{item:5s} = {self.get_value(item):5.{decimals}f}')
def report_u(self,value_format='5.2f', decimals=2):
for item in self.inputs_run_list:
print(f'{item:5s} = {self.get_value(item):5.{decimals}f}')
def report_z(self,value_format='5.2f', decimals=2):
for item in self.outputs_list:
print(f'{item:5s} = {self.get_value(item):5.{decimals}f}')
def report_params(self,value_format='5.2f', decimals=2):
for item in self.params_list:
print(f'{item:5s} = {self.get_value(item):5.{decimals}f}')
def get_x(self):
return self.struct[0].x
def ss(self):
ssate(self.struct,self.xy_prev.reshape(len(self.xy_prev),1))
## y_ini to y_run
self.struct[0].y_run = self.struct[0].y_ini
## y_ini to u_run
for item in self.yini2urun:
self.struct[0][item] = self.struct[0].y_ini[self.y_ini_list.index(item)]
## u_ini to y_run
for item in self.uini2yrun:
self.struct[0].y_run[self.y_run_list.index(item)] = self.struct[0][item]
@numba.njit(cache=True)
def ini(struct,mode):
# Parameters:
S_base = struct[0].S_base
g_01_02 = struct[0].g_01_02
b_01_02 = struct[0].b_01_02
bs_01_02 = struct[0].bs_01_02
g_02_03 = struct[0].g_02_03
b_02_03 = struct[0].b_02_03
bs_02_03 = struct[0].bs_02_03
U_01_n = struct[0].U_01_n
U_02_n = struct[0].U_02_n
U_03_n = struct[0].U_03_n
S_n_01 = struct[0].S_n_01
Omega_b_01 = struct[0].Omega_b_01
H_01 = struct[0].H_01
T1d0_01 = struct[0].T1d0_01
T1q0_01 = struct[0].T1q0_01
X_d_01 = struct[0].X_d_01
X_q_01 = struct[0].X_q_01
X1d_01 = struct[0].X1d_01
X1q_01 = struct[0].X1q_01
D_01 = struct[0].D_01
R_a_01 = struct[0].R_a_01
K_delta_01 = struct[0].K_delta_01
K_sec_01 = struct[0].K_sec_01
K_a_01 = struct[0].K_a_01
K_ai_01 = struct[0].K_ai_01
T_r_01 = struct[0].T_r_01
V_min_01 = struct[0].V_min_01
V_max_01 = struct[0].V_max_01
K_aw_01 = struct[0].K_aw_01
Droop_01 = struct[0].Droop_01
T_gov_1_01 = struct[0].T_gov_1_01
T_gov_2_01 = struct[0].T_gov_2_01
T_gov_3_01 = struct[0].T_gov_3_01
K_imw_01 = struct[0].K_imw_01
omega_ref_01 = struct[0].omega_ref_01
T_wo_01 = struct[0].T_wo_01
T_1_01 = struct[0].T_1_01
T_2_01 = struct[0].T_2_01
K_stab_01 = struct[0].K_stab_01
V_lim_01 = struct[0].V_lim_01
S_n_03 = struct[0].S_n_03
Omega_b_03 = struct[0].Omega_b_03
K_p_03 = struct[0].K_p_03
T_p_03 = struct[0].T_p_03
K_q_03 = struct[0].K_q_03
T_q_03 = struct[0].T_q_03
X_v_03 = struct[0].X_v_03
R_v_03 = struct[0].R_v_03
R_s_03 = struct[0].R_s_03
C_u_03 = struct[0].C_u_03
K_u_0_03 = struct[0].K_u_0_03
K_u_max_03 = struct[0].K_u_max_03
V_u_min_03 = struct[0].V_u_min_03
V_u_max_03 = struct[0].V_u_max_03
R_uc_03 = struct[0].R_uc_03
K_h_03 = struct[0].K_h_03
R_lim_03 = struct[0].R_lim_03
V_u_lt_03 = struct[0].V_u_lt_03
V_u_ht_03 = struct[0].V_u_ht_03
Droop_03 = struct[0].Droop_03
DB_03 = struct[0].DB_03
T_cur_03 = struct[0].T_cur_03
R_lim_max_03 = struct[0].R_lim_max_03
K_fpfr_03 = struct[0].K_fpfr_03
P_f_min_03 = struct[0].P_f_min_03
P_f_max_03 = struct[0].P_f_max_03
K_p_pll_03 = struct[0].K_p_pll_03
K_i_pll_03 = struct[0].K_i_pll_03
K_speed_03 = struct[0].K_speed_03
K_p_agc = struct[0].K_p_agc
K_i_agc = struct[0].K_i_agc
# Inputs:
P_01 = struct[0].P_01
Q_01 = struct[0].Q_01
P_02 = struct[0].P_02
Q_02 = struct[0].Q_02
P_03 = struct[0].P_03
Q_03 = struct[0].Q_03
v_ref_01 = struct[0].v_ref_01
v_pss_01 = struct[0].v_pss_01
p_c_01 = struct[0].p_c_01
p_r_01 = struct[0].p_r_01
q_s_ref_03 = struct[0].q_s_ref_03
v_u_ref_03 = struct[0].v_u_ref_03
omega_ref_03 = struct[0].omega_ref_03
p_gin_0_03 = struct[0].p_gin_0_03
p_g_ref_03 = struct[0].p_g_ref_03
ramp_p_gin_03 = struct[0].ramp_p_gin_03
# Dynamical states:
delta_01 = struct[0].x[0,0]
omega_01 = struct[0].x[1,0]
e1q_01 = struct[0].x[2,0]
e1d_01 = struct[0].x[3,0]
v_c_01 = struct[0].x[4,0]
xi_v_01 = struct[0].x[5,0]
x_gov_1_01 = struct[0].x[6,0]
x_gov_2_01 = struct[0].x[7,0]
xi_imw_01 = struct[0].x[8,0]
x_wo_01 = struct[0].x[9,0]
x_lead_01 = struct[0].x[10,0]
delta_03 = struct[0].x[11,0]
xi_p_03 = struct[0].x[12,0]
xi_q_03 = struct[0].x[13,0]
e_u_03 = struct[0].x[14,0]
p_ghr_03 = struct[0].x[15,0]
k_cur_03 = struct[0].x[16,0]
inc_p_gin_03 = struct[0].x[17,0]
theta_pll_03 = struct[0].x[18,0]
xi_pll_03 = struct[0].x[19,0]
xi_freq = struct[0].x[20,0]
# Algebraic states:
V_01 = struct[0].y_ini[0,0]
theta_01 = struct[0].y_ini[1,0]
V_02 = struct[0].y_ini[2,0]
theta_02 = struct[0].y_ini[3,0]
V_03 = struct[0].y_ini[4,0]
theta_03 = struct[0].y_ini[5,0]
i_d_01 = struct[0].y_ini[6,0]
i_q_01 = struct[0].y_ini[7,0]
p_g_01 = struct[0].y_ini[8,0]
q_g_01 = struct[0].y_ini[9,0]
v_f_01 = struct[0].y_ini[10,0]
p_m_ref_01 = struct[0].y_ini[11,0]
p_m_01 = struct[0].y_ini[12,0]
z_wo_01 = struct[0].y_ini[13,0]
v_pss_01 = struct[0].y_ini[14,0]
omega_03 = struct[0].y_ini[15,0]
e_qv_03 = struct[0].y_ini[16,0]
i_d_03 = struct[0].y_ini[17,0]
i_q_03 = struct[0].y_ini[18,0]
p_s_03 = struct[0].y_ini[19,0]
q_s_03 = struct[0].y_ini[20,0]
p_m_03 = struct[0].y_ini[21,0]
p_t_03 = struct[0].y_ini[22,0]
p_u_03 = struct[0].y_ini[23,0]
v_u_03 = struct[0].y_ini[24,0]
k_u_03 = struct[0].y_ini[25,0]
k_cur_sat_03 = struct[0].y_ini[26,0]
p_gou_03 = struct[0].y_ini[27,0]
p_f_03 = struct[0].y_ini[28,0]
r_lim_03 = struct[0].y_ini[29,0]
omega_pll_03 = struct[0].y_ini[30,0]
omega_coi = struct[0].y_ini[31,0]
p_agc = struct[0].y_ini[32,0]
# Differential equations:
if mode == 2:
struct[0].f[0,0] = -K_delta_01*delta_01 + Omega_b_01*(omega_01 - omega_coi)
struct[0].f[1,0] = (-D_01*(omega_01 - omega_coi) - i_d_01*(R_a_01*i_d_01 + V_01*sin(delta_01 - theta_01)) - i_q_01*(R_a_01*i_q_01 + V_01*cos(delta_01 - theta_01)) + p_m_01)/(2*H_01)
struct[0].f[2,0] = (-e1q_01 - i_d_01*(-X1d_01 + X_d_01) + v_f_01)/T1d0_01
struct[0].f[3,0] = (-e1d_01 + i_q_01*(-X1q_01 + X_q_01))/T1q0_01
struct[0].f[4,0] = (V_01 - v_c_01)/T_r_01
struct[0].f[5,0] = -K_aw_01*(K_a_01*(-v_c_01 + v_pss_01 + v_ref_01) + K_ai_01*xi_v_01 - v_f_01) - v_c_01 + v_pss_01 + v_ref_01
struct[0].f[6,0] = (p_m_ref_01 - x_gov_1_01)/T_gov_1_01
struct[0].f[7,0] = (x_gov_1_01 - x_gov_2_01)/T_gov_3_01
struct[0].f[8,0] = K_imw_01*(p_c_01 - p_g_01) - 1.0e-6*xi_imw_01
struct[0].f[9,0] = (omega_01 - x_wo_01 - 1.0)/T_wo_01
struct[0].f[10,0] = (-x_lead_01 + z_wo_01)/T_2_01
struct[0].f[11,0] = Omega_b_03*(omega_03 - omega_coi)
struct[0].f[12,0] = p_m_03 - p_s_03
struct[0].f[13,0] = -q_s_03 + q_s_ref_03
struct[0].f[14,0] = S_n_03*(p_gou_03 - p_t_03)/(C_u_03*(v_u_03 + 0.1))
struct[0].f[15,0] = Piecewise(np.array([(-r_lim_03, r_lim_03 < -K_h_03*(-p_ghr_03 + p_gou_03)), (r_lim_03, r_lim_03 < K_h_03*(-p_ghr_03 + p_gou_03)), (K_h_03*(-p_ghr_03 + p_gou_03), True)]))
struct[0].f[16,0] = (-k_cur_03 + p_g_ref_03/(inc_p_gin_03 + p_gin_0_03) + Piecewise(np.array([(P_f_min_03, P_f_min_03 > p_f_03), (P_f_max_03, P_f_max_03 < p_f_03), (p_f_03, True)]))/(inc_p_gin_03 + p_gin_0_03))/T_cur_03
struct[0].f[17,0] = -0.001*inc_p_gin_03 + ramp_p_gin_03
struct[0].f[18,0] = K_i_pll_03*xi_pll_03 + K_p_pll_03*(V_03*sin(theta_03)*cos(theta_pll_03) - V_03*sin(theta_pll_03)*cos(theta_03)) - Omega_b_03*omega_coi
struct[0].f[19,0] = V_03*sin(theta_03)*cos(theta_pll_03) - V_03*sin(theta_pll_03)*cos(theta_03)
struct[0].f[20,0] = 1 - omega_coi
# Algebraic equations:
if mode == 3:
struct[0].g[:,:] = np.ascontiguousarray(struct[0].Gy_ini) @ np.ascontiguousarray(struct[0].y_ini)
struct[0].g[0,0] = -P_01/S_base + V_01**2*g_01_02 + V_01*V_02*(-b_01_02*sin(theta_01 - theta_02) - g_01_02*cos(theta_01 - theta_02)) - S_n_01*p_g_01/S_base
struct[0].g[1,0] = -Q_01/S_base + V_01**2*(-b_01_02 - bs_01_02/2) + V_01*V_02*(b_01_02*cos(theta_01 - theta_02) - g_01_02*sin(theta_01 - theta_02)) - S_n_01*q_g_01/S_base
struct[0].g[2,0] = -P_02/S_base + V_01*V_02*(b_01_02*sin(theta_01 - theta_02) - g_01_02*cos(theta_01 - theta_02)) + V_02**2*(g_01_02 + g_02_03) + V_02*V_03*(-b_02_03*sin(theta_02 - theta_03) - g_02_03*cos(theta_02 - theta_03))
struct[0].g[3,0] = -Q_02/S_base + V_01*V_02*(b_01_02*cos(theta_01 - theta_02) + g_01_02*sin(theta_01 - theta_02)) + V_02**2*(-b_01_02 - b_02_03 - bs_01_02/2 - bs_02_03/2) + V_02*V_03*(b_02_03*cos(theta_02 - theta_03) - g_02_03*sin(theta_02 - theta_03))
struct[0].g[4,0] = -P_03/S_base + V_02*V_03*(b_02_03*sin(theta_02 - theta_03) - g_02_03*cos(theta_02 - theta_03)) + V_03**2*g_02_03 - S_n_03*p_s_03/S_base
struct[0].g[5,0] = -Q_03/S_base + V_02*V_03*(b_02_03*cos(theta_02 - theta_03) + g_02_03*sin(theta_02 - theta_03)) + V_03**2*(-b_02_03 - bs_02_03/2) - S_n_03*q_s_03/S_base
struct[0].g[6,0] = R_a_01*i_q_01 + V_01*cos(delta_01 - theta_01) + X1d_01*i_d_01 - e1q_01
struct[0].g[7,0] = R_a_01*i_d_01 + V_01*sin(delta_01 - theta_01) - X1q_01*i_q_01 - e1d_01
struct[0].g[8,0] = V_01*i_d_01*sin(delta_01 - theta_01) + V_01*i_q_01*cos(delta_01 - theta_01) - p_g_01
struct[0].g[9,0] = V_01*i_d_01*cos(delta_01 - theta_01) - V_01*i_q_01*sin(delta_01 - theta_01) - q_g_01
struct[0].g[10,0] = -v_f_01 + Piecewise(np.array([(V_min_01, V_min_01 > K_a_01*(-v_c_01 + v_pss_01 + v_ref_01) + K_ai_01*xi_v_01), (V_max_01, V_max_01 < K_a_01*(-v_c_01 + v_pss_01 + v_ref_01) + K_ai_01*xi_v_01), (K_a_01*(-v_c_01 + v_pss_01 + v_ref_01) + K_ai_01*xi_v_01, True)]))
struct[0].g[11,0] = K_sec_01*p_agc - p_m_ref_01 + p_r_01 + xi_imw_01 - (omega_01 - omega_ref_01)/Droop_01
struct[0].g[12,0] = T_gov_2_01*(x_gov_1_01 - x_gov_2_01)/T_gov_3_01 - p_m_01 + x_gov_2_01
struct[0].g[13,0] = omega_01 - x_wo_01 - z_wo_01 - 1.0
struct[0].g[14,0] = -v_pss_01 + Piecewise(np.array([(-V_lim_01, V_lim_01 < -K_stab_01*(T_1_01*(-x_lead_01 + z_wo_01)/T_2_01 + x_lead_01)), (V_lim_01, V_lim_01 < K_stab_01*(T_1_01*(-x_lead_01 + z_wo_01)/T_2_01 + x_lead_01)), (K_stab_01*(T_1_01*(-x_lead_01 + z_wo_01)/T_2_01 + x_lead_01), True)]))
struct[0].g[15,0] = K_p_03*(p_m_03 - p_s_03 + xi_p_03/T_p_03) - omega_03
struct[0].g[16,0] = K_q_03*(-q_s_03 + q_s_ref_03 + xi_q_03/T_q_03) - e_qv_03
struct[0].g[17,0] = -R_v_03*i_d_03 - V_03*sin(delta_03 - theta_03) + X_v_03*i_q_03
struct[0].g[18,0] = -R_v_03*i_q_03 - V_03*cos(delta_03 - theta_03) - X_v_03*i_d_03 + e_qv_03
struct[0].g[19,0] = V_03*i_d_03*sin(delta_03 - theta_03) + V_03*i_q_03*cos(delta_03 - theta_03) - p_s_03
struct[0].g[20,0] = V_03*i_d_03*cos(delta_03 - theta_03) - V_03*i_q_03*sin(delta_03 - theta_03) - q_s_03
struct[0].g[21,0] = K_fpfr_03*Piecewise(np.array([(P_f_min_03, P_f_min_03 > p_f_03), (P_f_max_03, P_f_max_03 < p_f_03), (p_f_03, True)])) + p_ghr_03 - p_m_03 + p_s_03 - p_t_03 + p_u_03
struct[0].g[22,0] = i_d_03*(R_s_03*i_d_03 + V_03*sin(delta_03 - theta_03)) + i_q_03*(R_s_03*i_q_03 + V_03*cos(delta_03 - theta_03)) - p_t_03
struct[0].g[23,0] = -p_u_03 - k_u_03*(-v_u_03**2 + v_u_ref_03**2)/V_u_max_03**2
struct[0].g[24,0] = R_uc_03*S_n_03*(p_gou_03 - p_t_03)/(v_u_03 + 0.1) + e_u_03 - v_u_03
struct[0].g[25,0] = -k_u_03 + Piecewise(np.array([(K_u_max_03, V_u_min_03 > v_u_03), (K_u_0_03 + (-K_u_0_03 + K_u_max_03)*(-V_u_lt_03 + v_u_03)/(-V_u_lt_03 + V_u_min_03), V_u_lt_03 > v_u_03), (K_u_0_03 + (-K_u_0_03 + K_u_max_03)*(-V_u_ht_03 + v_u_03)/(-V_u_ht_03 + V_u_max_03), V_u_ht_03 < v_u_03), (K_u_max_03, V_u_max_03 < v_u_03), (K_u_0_03, True)]))
struct[0].g[26,0] = -k_cur_sat_03 + Piecewise(np.array([(0.0001, k_cur_03 < 0.0001), (1, k_cur_03 > 1), (k_cur_03, True)]))
struct[0].g[28,0] = -p_f_03 - Piecewise(np.array([((0.5*DB_03 + K_speed_03*omega_pll_03 + omega_03*(1 - K_speed_03) - omega_ref_03)/Droop_03, 0.5*DB_03 - omega_ref_03 < -K_speed_03*omega_pll_03 - omega_03*(1 - K_speed_03)), ((-0.5*DB_03 + K_speed_03*omega_pll_03 + omega_03*(1 - K_speed_03) - omega_ref_03)/Droop_03, 0.5*DB_03 + omega_ref_03 < K_speed_03*omega_pll_03 + omega_03*(1 - K_speed_03)), (0.0, True)]))
struct[0].g[29,0] = -r_lim_03 + Piecewise(np.array([(R_lim_max_03, (omega_03 > 0.5*DB_03 + omega_ref_03) | (omega_03 < -0.5*DB_03 + omega_ref_03)), (0.0, True)])) + Piecewise(np.array([(R_lim_03 + (-R_lim_03 + R_lim_max_03)*(-V_u_lt_03 + v_u_03)/(-V_u_lt_03 + V_u_min_03), V_u_lt_03 > v_u_03), (R_lim_03 + (-R_lim_03 + R_lim_max_03)*(-V_u_ht_03 + v_u_03)/(-V_u_ht_03 + V_u_max_03), V_u_ht_03 < v_u_03), (R_lim_03, True)]))
struct[0].g[30,0] = -omega_pll_03 + (K_i_pll_03*xi_pll_03 + K_p_pll_03*(V_03*sin(theta_03)*cos(theta_pll_03) - V_03*sin(theta_pll_03)*cos(theta_03)))/Omega_b_03
struct[0].g[31,0] = -omega_coi + (H_01*S_n_01*omega_01 + S_n_03*T_p_03*omega_03/(2*K_p_03))/(H_01*S_n_01 + S_n_03*T_p_03/(2*K_p_03))
struct[0].g[32,0] = K_i_agc*xi_freq + K_p_agc*(1 - omega_coi) - p_agc
# Outputs:
if mode == 3:
struct[0].h[0,0] = V_01
struct[0].h[1,0] = V_02
struct[0].h[2,0] = V_03
struct[0].h[3,0] = i_d_01*(R_a_01*i_d_01 + V_01*sin(delta_01 - theta_01)) + i_q_01*(R_a_01*i_q_01 + V_01*cos(delta_01 - theta_01))
struct[0].h[4,0] = inc_p_gin_03 + p_gin_0_03
struct[0].h[5,0] = p_g_ref_03
struct[0].h[6,0] = -p_s_03 + p_t_03
struct[0].h[7,0] = (-V_u_min_03**2 + e_u_03**2)/(V_u_max_03**2 - V_u_min_03**2)
struct[0].h[8,0] = K_fpfr_03*Piecewise(np.array([(P_f_min_03, P_f_min_03 > p_f_03), (P_f_max_03, P_f_max_03 < p_f_03), (p_f_03, True)]))
struct[0].h[9,0] = Piecewise(np.array([(P_f_min_03, P_f_min_03 > p_f_03), (P_f_max_03, P_f_max_03 < p_f_03), (p_f_03, True)]))
if mode == 10:
struct[0].Fx_ini[0,0] = -K_delta_01
struct[0].Fx_ini[0,1] = Omega_b_01
struct[0].Fx_ini[1,0] = (-V_01*i_d_01*cos(delta_01 - theta_01) + V_01*i_q_01*sin(delta_01 - theta_01))/(2*H_01)
struct[0].Fx_ini[1,1] = -D_01/(2*H_01)
struct[0].Fx_ini[2,2] = -1/T1d0_01
struct[0].Fx_ini[3,3] = -1/T1q0_01
struct[0].Fx_ini[4,4] = -1/T_r_01
struct[0].Fx_ini[5,4] = K_a_01*K_aw_01 - 1
struct[0].Fx_ini[5,5] = -K_ai_01*K_aw_01
struct[0].Fx_ini[6,6] = -1/T_gov_1_01
struct[0].Fx_ini[7,6] = 1/T_gov_3_01
struct[0].Fx_ini[7,7] = -1/T_gov_3_01
struct[0].Fx_ini[9,1] = 1/T_wo_01
struct[0].Fx_ini[9,9] = -1/T_wo_01
struct[0].Fx_ini[10,10] = -1/T_2_01
struct[0].Fx_ini[15,15] = Piecewise(np.array([(0, (r_lim_03 < K_h_03*(-p_ghr_03 + p_gou_03)) | (r_lim_03 < -K_h_03*(-p_ghr_03 + p_gou_03))), (-K_h_03, True)]))
struct[0].Fx_ini[16,16] = -1/T_cur_03
struct[0].Fx_ini[16,17] = (-p_g_ref_03/(inc_p_gin_03 + p_gin_0_03)**2 - Piecewise(np.array([(P_f_min_03, P_f_min_03 > p_f_03), (P_f_max_03, P_f_max_03 < p_f_03), (p_f_03, True)]))/(inc_p_gin_03 + p_gin_0_03)**2)/T_cur_03
struct[0].Fx_ini[18,18] = K_p_pll_03*(-V_03*sin(theta_03)*sin(theta_pll_03) - V_03*cos(theta_03)*cos(theta_pll_03))
struct[0].Fx_ini[18,19] = K_i_pll_03
struct[0].Fx_ini[19,18] = -V_03*sin(theta_03)*sin(theta_pll_03) - V_03*cos(theta_03)*cos(theta_pll_03)
if mode == 11:
struct[0].Fy_ini[0,31] = -Omega_b_01
struct[0].Fy_ini[1,0] = (-i_d_01*sin(delta_01 - theta_01) - i_q_01*cos(delta_01 - theta_01))/(2*H_01)
struct[0].Fy_ini[1,1] = (V_01*i_d_01*cos(delta_01 - theta_01) - V_01*i_q_01*sin(delta_01 - theta_01))/(2*H_01)
struct[0].Fy_ini[1,6] = (-2*R_a_01*i_d_01 - V_01*sin(delta_01 - theta_01))/(2*H_01)
struct[0].Fy_ini[1,7] = (-2*R_a_01*i_q_01 - V_01*cos(delta_01 - theta_01))/(2*H_01)
struct[0].Fy_ini[1,12] = 1/(2*H_01)
struct[0].Fy_ini[1,31] = D_01/(2*H_01)
struct[0].Fy_ini[2,6] = (X1d_01 - X_d_01)/T1d0_01
struct[0].Fy_ini[2,10] = 1/T1d0_01
struct[0].Fy_ini[3,7] = (-X1q_01 + X_q_01)/T1q0_01
struct[0].Fy_ini[4,0] = 1/T_r_01
struct[0].Fy_ini[5,10] = K_aw_01
struct[0].Fy_ini[5,14] = -K_a_01*K_aw_01 + 1
struct[0].Fy_ini[6,11] = 1/T_gov_1_01
struct[0].Fy_ini[8,8] = -K_imw_01
struct[0].Fy_ini[10,13] = 1/T_2_01
struct[0].Fy_ini[11,15] = Omega_b_03
struct[0].Fy_ini[11,31] = -Omega_b_03
struct[0].Fy_ini[12,19] = -1
struct[0].Fy_ini[12,21] = 1
struct[0].Fy_ini[13,20] = -1
struct[0].Fy_ini[14,22] = -S_n_03/(C_u_03*(v_u_03 + 0.1))
struct[0].Fy_ini[14,24] = -S_n_03*(p_gou_03 - p_t_03)/(C_u_03*(v_u_03 + 0.1)**2)
struct[0].Fy_ini[14,27] = S_n_03/(C_u_03*(v_u_03 + 0.1))
struct[0].Fy_ini[15,27] = Piecewise(np.array([(0, (r_lim_03 < K_h_03*(-p_ghr_03 + p_gou_03)) | (r_lim_03 < -K_h_03*(-p_ghr_03 + p_gou_03))), (K_h_03, True)]))
struct[0].Fy_ini[15,29] = Piecewise(np.array([(-1, r_lim_03 < -K_h_03*(-p_ghr_03 + p_gou_03)), (1, r_lim_03 < K_h_03*(-p_ghr_03 + p_gou_03)), (0, True)]))
struct[0].Fy_ini[16,28] = Piecewise(np.array([(0, (P_f_min_03 > p_f_03) | (P_f_max_03 < p_f_03)), (1, True)]))/(T_cur_03*(inc_p_gin_03 + p_gin_0_03))
struct[0].Fy_ini[18,4] = K_p_pll_03*(sin(theta_03)*cos(theta_pll_03) - sin(theta_pll_03)*cos(theta_03))
struct[0].Fy_ini[18,5] = K_p_pll_03*(V_03*sin(theta_03)*sin(theta_pll_03) + V_03*cos(theta_03)*cos(theta_pll_03))
struct[0].Fy_ini[18,31] = -Omega_b_03
struct[0].Fy_ini[19,4] = sin(theta_03)*cos(theta_pll_03) - sin(theta_pll_03)*cos(theta_03)
struct[0].Fy_ini[19,5] = V_03*sin(theta_03)*sin(theta_pll_03) + V_03*cos(theta_03)*cos(theta_pll_03)
struct[0].Fy_ini[20,31] = -1
struct[0].Gx_ini[6,0] = -V_01*sin(delta_01 - theta_01)
struct[0].Gx_ini[6,2] = -1
struct[0].Gx_ini[7,0] = V_01*cos(delta_01 - theta_01)
struct[0].Gx_ini[7,3] = -1
struct[0].Gx_ini[8,0] = V_01*i_d_01*cos(delta_01 - theta_01) - V_01*i_q_01*sin(delta_01 - theta_01)
struct[0].Gx_ini[9,0] = -V_01*i_d_01*sin(delta_01 - theta_01) - V_01*i_q_01*cos(delta_01 - theta_01)
struct[0].Gx_ini[10,4] = Piecewise(np.array([(0, (V_min_01 > K_a_01*(-v_c_01 + v_pss_01 + v_ref_01) + K_ai_01*xi_v_01) | (V_max_01 < K_a_01*(-v_c_01 + v_pss_01 + v_ref_01) + K_ai_01*xi_v_01)), (-K_a_01, True)]))
struct[0].Gx_ini[10,5] = Piecewise(np.array([(0, (V_min_01 > K_a_01*(-v_c_01 + v_pss_01 + v_ref_01) + K_ai_01*xi_v_01) | (V_max_01 < K_a_01*(-v_c_01 + v_pss_01 + v_ref_01) + K_ai_01*xi_v_01)), (K_ai_01, True)]))
struct[0].Gx_ini[11,1] = -1/Droop_01
struct[0].Gx_ini[11,8] = 1
struct[0].Gx_ini[12,6] = T_gov_2_01/T_gov_3_01
struct[0].Gx_ini[12,7] = -T_gov_2_01/T_gov_3_01 + 1
struct[0].Gx_ini[13,1] = 1
struct[0].Gx_ini[13,9] = -1
struct[0].Gx_ini[14,10] = Piecewise(np.array([(0, (V_lim_01 < K_stab_01*(T_1_01*(-x_lead_01 + z_wo_01)/T_2_01 + x_lead_01)) | (V_lim_01 < -K_stab_01*(T_1_01*(-x_lead_01 + z_wo_01)/T_2_01 + x_lead_01))), (K_stab_01*(-T_1_01/T_2_01 + 1), True)]))
struct[0].Gx_ini[15,12] = K_p_03/T_p_03
struct[0].Gx_ini[16,13] = K_q_03/T_q_03
struct[0].Gx_ini[17,11] = -V_03*cos(delta_03 - theta_03)
struct[0].Gx_ini[18,11] = V_03*sin(delta_03 - theta_03)
struct[0].Gx_ini[19,11] = V_03*i_d_03*cos(delta_03 - theta_03) - V_03*i_q_03*sin(delta_03 - theta_03)
struct[0].Gx_ini[20,11] = -V_03*i_d_03*sin(delta_03 - theta_03) - V_03*i_q_03*cos(delta_03 - theta_03)
struct[0].Gx_ini[21,15] = 1
struct[0].Gx_ini[22,11] = V_03*i_d_03*cos(delta_03 - theta_03) - V_03*i_q_03*sin(delta_03 - theta_03)
struct[0].Gx_ini[24,14] = 1
struct[0].Gx_ini[26,16] = Piecewise(np.array([(0, (k_cur_03 > 1) | (k_cur_03 < 0.0001)), (1, True)]))
struct[0].Gx_ini[27,17] = k_cur_sat_03
struct[0].Gx_ini[30,18] = K_p_pll_03*(-V_03*sin(theta_03)*sin(theta_pll_03) - V_03*cos(theta_03)*cos(theta_pll_03))/Omega_b_03
struct[0].Gx_ini[30,19] = K_i_pll_03/Omega_b_03
struct[0].Gx_ini[31,1] = H_01*S_n_01/(H_01*S_n_01 + S_n_03*T_p_03/(2*K_p_03))
struct[0].Gx_ini[32,20] = K_i_agc
struct[0].Gy_ini[0,0] = 2*V_01*g_01_02 + V_02*(-b_01_02*sin(theta_01 - theta_02) - g_01_02*cos(theta_01 - theta_02))
struct[0].Gy_ini[0,1] = V_01*V_02*(-b_01_02*cos(theta_01 - theta_02) + g_01_02*sin(theta_01 - theta_02))
struct[0].Gy_ini[0,2] = V_01*(-b_01_02*sin(theta_01 - theta_02) - g_01_02*cos(theta_01 - theta_02))
struct[0].Gy_ini[0,3] = V_01*V_02*(b_01_02*cos(theta_01 - theta_02) - g_01_02*sin(theta_01 - theta_02))
struct[0].Gy_ini[0,8] = -S_n_01/S_base
struct[0].Gy_ini[1,0] = 2*V_01*(-b_01_02 - bs_01_02/2) + V_02*(b_01_02*cos(theta_01 - theta_02) - g_01_02*sin(theta_01 - theta_02))
struct[0].Gy_ini[1,1] = V_01*V_02*(-b_01_02*sin(theta_01 - theta_02) - g_01_02*cos(theta_01 - theta_02))
struct[0].Gy_ini[1,2] = V_01*(b_01_02*cos(theta_01 - theta_02) - g_01_02*sin(theta_01 - theta_02))
struct[0].Gy_ini[1,3] = V_01*V_02*(b_01_02*sin(theta_01 - theta_02) + g_01_02*cos(theta_01 - theta_02))
struct[0].Gy_ini[1,9] = -S_n_01/S_base
struct[0].Gy_ini[2,0] = V_02*(b_01_02*sin(theta_01 - theta_02) - g_01_02*cos(theta_01 - theta_02))
struct[0].Gy_ini[2,1] = V_01*V_02*(b_01_02*cos(theta_01 - theta_02) + g_01_02*sin(theta_01 - theta_02))
struct[0].Gy_ini[2,2] = V_01*(b_01_02*sin(theta_01 - theta_02) - g_01_02*cos(theta_01 - theta_02)) + 2*V_02*(g_01_02 + g_02_03) + V_03*(-b_02_03*sin(theta_02 - theta_03) - g_02_03*cos(theta_02 - theta_03))
struct[0].Gy_ini[2,3] = V_01*V_02*(-b_01_02*cos(theta_01 - theta_02) - g_01_02*sin(theta_01 - theta_02)) + V_02*V_03*(-b_02_03*cos(theta_02 - theta_03) + g_02_03*sin(theta_02 - theta_03))
struct[0].Gy_ini[2,4] = V_02*(-b_02_03*sin(theta_02 - theta_03) - g_02_03*cos(theta_02 - theta_03))
struct[0].Gy_ini[2,5] = V_02*V_03*(b_02_03*cos(theta_02 - theta_03) - g_02_03*sin(theta_02 - theta_03))
struct[0].Gy_ini[3,0] = V_02*(b_01_02*cos(theta_01 - theta_02) + g_01_02*sin(theta_01 - theta_02))
struct[0].Gy_ini[3,1] = V_01*V_02*(-b_01_02*sin(theta_01 - theta_02) + g_01_02*cos(theta_01 - theta_02))
struct[0].Gy_ini[3,2] = V_01*(b_01_02*cos(theta_01 - theta_02) + g_01_02*sin(theta_01 - theta_02)) + 2*V_02*(-b_01_02 - b_02_03 - bs_01_02/2 - bs_02_03/2) + V_03*(b_02_03*cos(theta_02 - theta_03) - g_02_03*sin(theta_02 - theta_03))
struct[0].Gy_ini[3,3] = V_01*V_02*(b_01_02*sin(theta_01 - theta_02) - g_01_02*cos(theta_01 - theta_02)) + V_02*V_03*(-b_02_03*sin(theta_02 - theta_03) - g_02_03*cos(theta_02 - theta_03))
struct[0].Gy_ini[3,4] = V_02*(b_02_03*cos(theta_02 - theta_03) - g_02_03*sin(theta_02 - theta_03))
struct[0].Gy_ini[3,5] = V_02*V_03*(b_02_03*sin(theta_02 - theta_03) + g_02_03*cos(theta_02 - theta_03))
struct[0].Gy_ini[4,2] = V_03*(b_02_03*sin(theta_02 - theta_03) - g_02_03*cos(theta_02 - theta_03))
struct[0].Gy_ini[4,3] = V_02*V_03*(b_02_03*cos(theta_02 - theta_03) + g_02_03*sin(theta_02 - theta_03))
struct[0].Gy_ini[4,4] = V_02*(b_02_03*sin(theta_02 - theta_03) - g_02_03*cos(theta_02 - theta_03)) + 2*V_03*g_02_03
struct[0].Gy_ini[4,5] = V_02*V_03*(-b_02_03*cos(theta_02 - theta_03) - g_02_03*sin(theta_02 - theta_03))
struct[0].Gy_ini[4,19] = -S_n_03/S_base
struct[0].Gy_ini[5,2] = V_03*(b_02_03*cos(theta_02 - theta_03) + g_02_03*sin(theta_02 - theta_03))
struct[0].Gy_ini[5,3] = V_02*V_03*(-b_02_03*sin(theta_02 - theta_03) + g_02_03*cos(theta_02 - theta_03))
struct[0].Gy_ini[5,4] = V_02*(b_02_03*cos(theta_02 - theta_03) + g_02_03*sin(theta_02 - theta_03)) + 2*V_03*(-b_02_03 - bs_02_03/2)
struct[0].Gy_ini[5,5] = V_02*V_03*(b_02_03*sin(theta_02 - theta_03) - g_02_03*cos(theta_02 - theta_03))
struct[0].Gy_ini[5,20] = -S_n_03/S_base
struct[0].Gy_ini[6,0] = cos(delta_01 - theta_01)
struct[0].Gy_ini[6,1] = V_01*sin(delta_01 - theta_01)
struct[0].Gy_ini[6,6] = X1d_01
struct[0].Gy_ini[6,7] = R_a_01
struct[0].Gy_ini[7,0] = sin(delta_01 - theta_01)
struct[0].Gy_ini[7,1] = -V_01*cos(delta_01 - theta_01)
struct[0].Gy_ini[7,6] = R_a_01
struct[0].Gy_ini[7,7] = -X1q_01
struct[0].Gy_ini[8,0] = i_d_01*sin(delta_01 - theta_01) + i_q_01*cos(delta_01 - theta_01)
struct[0].Gy_ini[8,1] = -V_01*i_d_01*cos(delta_01 - theta_01) + V_01*i_q_01*sin(delta_01 - theta_01)
struct[0].Gy_ini[8,6] = V_01*sin(delta_01 - theta_01)
struct[0].Gy_ini[8,7] = V_01*cos(delta_01 - theta_01)
struct[0].Gy_ini[9,0] = i_d_01*cos(delta_01 - theta_01) - i_q_01*sin(delta_01 - theta_01)
struct[0].Gy_ini[9,1] = V_01*i_d_01*sin(delta_01 - theta_01) + V_01*i_q_01*cos(delta_01 - theta_01)
struct[0].Gy_ini[9,6] = V_01*cos(delta_01 - theta_01)
struct[0].Gy_ini[9,7] = -V_01*sin(delta_01 - theta_01)
struct[0].Gy_ini[10,14] = Piecewise(np.array([(0, (V_min_01 > K_a_01*(-v_c_01 + v_pss_01 + v_ref_01) + K_ai_01*xi_v_01) | (V_max_01 < K_a_01*(-v_c_01 + v_pss_01 + v_ref_01) + K_ai_01*xi_v_01)), (K_a_01, True)]))
struct[0].Gy_ini[11,32] = K_sec_01
struct[0].Gy_ini[14,13] = Piecewise(np.array([(0, (V_lim_01 < K_stab_01*(T_1_01*(-x_lead_01 + z_wo_01)/T_2_01 + x_lead_01)) | (V_lim_01 < -K_stab_01*(T_1_01*(-x_lead_01 + z_wo_01)/T_2_01 + x_lead_01))), (K_stab_01*T_1_01/T_2_01, True)]))
struct[0].Gy_ini[15,19] = -K_p_03
struct[0].Gy_ini[15,21] = K_p_03
struct[0].Gy_ini[16,20] = -K_q_03
struct[0].Gy_ini[17,4] = -sin(delta_03 - theta_03)
struct[0].Gy_ini[17,5] = V_03*cos(delta_03 - theta_03)
struct[0].Gy_ini[17,17] = -R_v_03
struct[0].Gy_ini[17,18] = X_v_03
struct[0].Gy_ini[18,4] = -cos(delta_03 - theta_03)
struct[0].Gy_ini[18,5] = -V_03*sin(delta_03 - theta_03)
struct[0].Gy_ini[18,17] = -X_v_03
struct[0].Gy_ini[18,18] = -R_v_03
struct[0].Gy_ini[19,4] = i_d_03*sin(delta_03 - theta_03) + i_q_03*cos(delta_03 - theta_03)
struct[0].Gy_ini[19,5] = -V_03*i_d_03*cos(delta_03 - theta_03) + V_03*i_q_03*sin(delta_03 - theta_03)
struct[0].Gy_ini[19,17] = V_03*sin(delta_03 - theta_03)
struct[0].Gy_ini[19,18] = V_03*cos(delta_03 - theta_03)
struct[0].Gy_ini[20,4] = i_d_03*cos(delta_03 - theta_03) - i_q_03*sin(delta_03 - theta_03)
struct[0].Gy_ini[20,5] = V_03*i_d_03*sin(delta_03 - theta_03) + V_03*i_q_03*cos(delta_03 - theta_03)
struct[0].Gy_ini[20,17] = V_03*cos(delta_03 - theta_03)
struct[0].Gy_ini[20,18] = -V_03*sin(delta_03 - theta_03)
struct[0].Gy_ini[21,28] = K_fpfr_03*Piecewise(np.array([(0, (P_f_min_03 > p_f_03) | (P_f_max_03 < p_f_03)), (1, True)]))
struct[0].Gy_ini[22,4] = i_d_03*sin(delta_03 - theta_03) + i_q_03*cos(delta_03 - theta_03)
struct[0].Gy_ini[22,5] = -V_03*i_d_03*cos(delta_03 - theta_03) + V_03*i_q_03*sin(delta_03 - theta_03)
struct[0].Gy_ini[22,17] = 2*R_s_03*i_d_03 + V_03*sin(delta_03 - theta_03)
struct[0].Gy_ini[22,18] = 2*R_s_03*i_q_03 + V_03*cos(delta_03 - theta_03)
struct[0].Gy_ini[23,24] = 2*k_u_03*v_u_03/V_u_max_03**2
struct[0].Gy_ini[23,25] = -(-v_u_03**2 + v_u_ref_03**2)/V_u_max_03**2
struct[0].Gy_ini[24,22] = -R_uc_03*S_n_03/(v_u_03 + 0.1)
struct[0].Gy_ini[24,24] = -R_uc_03*S_n_03*(p_gou_03 - p_t_03)/(v_u_03 + 0.1)**2 - 1
struct[0].Gy_ini[24,27] = R_uc_03*S_n_03/(v_u_03 + 0.1)
struct[0].Gy_ini[25,24] = Piecewise(np.array([(0, V_u_min_03 > v_u_03), ((-K_u_0_03 + K_u_max_03)/(-V_u_lt_03 + V_u_min_03), V_u_lt_03 > v_u_03), ((-K_u_0_03 + K_u_max_03)/(-V_u_ht_03 + V_u_max_03), V_u_ht_03 < v_u_03), (0, True)]))
struct[0].Gy_ini[27,26] = inc_p_gin_03 + p_gin_0_03
struct[0].Gy_ini[28,15] = -Piecewise(np.array([((1 - K_speed_03)/Droop_03, (0.5*DB_03 + omega_ref_03 < K_speed_03*omega_pll_03 + omega_03*(1 - K_speed_03)) | (0.5*DB_03 - omega_ref_03 < -K_speed_03*omega_pll_03 - omega_03*(1 - K_speed_03))), (0, True)]))
struct[0].Gy_ini[28,30] = -Piecewise(np.array([(K_speed_03/Droop_03, (0.5*DB_03 + omega_ref_03 < K_speed_03*omega_pll_03 + omega_03*(1 - K_speed_03)) | (0.5*DB_03 - omega_ref_03 < -K_speed_03*omega_pll_03 - omega_03*(1 - K_speed_03))), (0, True)]))
struct[0].Gy_ini[29,24] = Piecewise(np.array([((-R_lim_03 + R_lim_max_03)/(-V_u_lt_03 + V_u_min_03), V_u_lt_03 > v_u_03), ((-R_lim_03 + R_lim_max_03)/(-V_u_ht_03 + V_u_max_03), V_u_ht_03 < v_u_03), (0, True)]))
struct[0].Gy_ini[30,4] = K_p_pll_03*(sin(theta_03)*cos(theta_pll_03) - sin(theta_pll_03)*cos(theta_03))/Omega_b_03
struct[0].Gy_ini[30,5] = K_p_pll_03*(V_03*sin(theta_03)*sin(theta_pll_03) + V_03*cos(theta_03)*cos(theta_pll_03))/Omega_b_03
struct[0].Gy_ini[31,15] = S_n_03*T_p_03/(2*K_p_03*(H_01*S_n_01 + S_n_03*T_p_03/(2*K_p_03)))
struct[0].Gy_ini[32,31] = -K_p_agc
@numba.njit(cache=True)
def run(t,struct,mode):
# Parameters:
S_base = struct[0].S_base
g_01_02 = struct[0].g_01_02
b_01_02 = struct[0].b_01_02
bs_01_02 = struct[0].bs_01_02
g_02_03 = struct[0].g_02_03
b_02_03 = struct[0].b_02_03
bs_02_03 = struct[0].bs_02_03
U_01_n = struct[0].U_01_n
U_02_n = struct[0].U_02_n
U_03_n = struct[0].U_03_n
S_n_01 = struct[0].S_n_01
Omega_b_01 = struct[0].Omega_b_01
H_01 = struct[0].H_01
T1d0_01 = struct[0].T1d0_01
T1q0_01 = struct[0].T1q0_01
X_d_01 = struct[0].X_d_01
X_q_01 = struct[0].X_q_01
X1d_01 = struct[0].X1d_01
X1q_01 = struct[0].X1q_01
D_01 = struct[0].D_01
R_a_01 = struct[0].R_a_01
K_delta_01 = struct[0].K_delta_01
K_sec_01 = struct[0].K_sec_01
K_a_01 = struct[0].K_a_01
K_ai_01 = struct[0].K_ai_01
T_r_01 = struct[0].T_r_01
V_min_01 = struct[0].V_min_01
V_max_01 = struct[0].V_max_01
K_aw_01 = struct[0].K_aw_01
Droop_01 = struct[0].Droop_01
T_gov_1_01 = struct[0].T_gov_1_01
T_gov_2_01 = struct[0].T_gov_2_01
T_gov_3_01 = struct[0].T_gov_3_01
K_imw_01 = struct[0].K_imw_01
omega_ref_01 = struct[0].omega_ref_01
T_wo_01 = struct[0].T_wo_01
T_1_01 = struct[0].T_1_01
T_2_01 = struct[0].T_2_01
K_stab_01 = struct[0].K_stab_01
V_lim_01 = struct[0].V_lim_01
S_n_03 = struct[0].S_n_03
Omega_b_03 = struct[0].Omega_b_03
K_p_03 = struct[0].K_p_03
T_p_03 = struct[0].T_p_03
K_q_03 = struct[0].K_q_03
T_q_03 = struct[0].T_q_03
X_v_03 = struct[0].X_v_03
R_v_03 = struct[0].R_v_03
R_s_03 = struct[0].R_s_03
C_u_03 = struct[0].C_u_03
K_u_0_03 = struct[0].K_u_0_03
K_u_max_03 = struct[0].K_u_max_03
V_u_min_03 = struct[0].V_u_min_03
V_u_max_03 = struct[0].V_u_max_03
R_uc_03 = struct[0].R_uc_03
K_h_03 = struct[0].K_h_03
R_lim_03 = struct[0].R_lim_03
V_u_lt_03 = struct[0].V_u_lt_03
V_u_ht_03 = struct[0].V_u_ht_03
Droop_03 = struct[0].Droop_03
DB_03 = struct[0].DB_03
T_cur_03 = struct[0].T_cur_03
R_lim_max_03 = struct[0].R_lim_max_03
K_fpfr_03 = struct[0].K_fpfr_03
P_f_min_03 = struct[0].P_f_min_03
P_f_max_03 = struct[0].P_f_max_03
K_p_pll_03 = struct[0].K_p_pll_03
K_i_pll_03 = struct[0].K_i_pll_03
K_speed_03 = struct[0].K_speed_03
K_p_agc = struct[0].K_p_agc
K_i_agc = struct[0].K_i_agc
# Inputs:
P_01 = struct[0].P_01
Q_01 = struct[0].Q_01
P_02 = struct[0].P_02
Q_02 = struct[0].Q_02
P_03 = struct[0].P_03
Q_03 = struct[0].Q_03
v_ref_01 = struct[0].v_ref_01
v_pss_01 = struct[0].v_pss_01
p_c_01 = struct[0].p_c_01
p_r_01 = struct[0].p_r_01
q_s_ref_03 = struct[0].q_s_ref_03
v_u_ref_03 = struct[0].v_u_ref_03
omega_ref_03 = struct[0].omega_ref_03
p_gin_0_03 = struct[0].p_gin_0_03
p_g_ref_03 = struct[0].p_g_ref_03
ramp_p_gin_03 = struct[0].ramp_p_gin_03
# Dynamical states:
delta_01 = struct[0].x[0,0]
omega_01 = struct[0].x[1,0]
e1q_01 = struct[0].x[2,0]
e1d_01 = struct[0].x[3,0]
v_c_01 = struct[0].x[4,0]
xi_v_01 = struct[0].x[5,0]
x_gov_1_01 = struct[0].x[6,0]
x_gov_2_01 = struct[0].x[7,0]
xi_imw_01 = struct[0].x[8,0]
x_wo_01 = struct[0].x[9,0]
x_lead_01 = struct[0].x[10,0]
delta_03 = struct[0].x[11,0]
xi_p_03 = struct[0].x[12,0]
xi_q_03 = struct[0].x[13,0]
e_u_03 = struct[0].x[14,0]
p_ghr_03 = struct[0].x[15,0]
k_cur_03 = struct[0].x[16,0]
inc_p_gin_03 = struct[0].x[17,0]
theta_pll_03 = struct[0].x[18,0]
xi_pll_03 = struct[0].x[19,0]
xi_freq = struct[0].x[20,0]
# Algebraic states:
V_01 = struct[0].y_run[0,0]
theta_01 = struct[0].y_run[1,0]
V_02 = struct[0].y_run[2,0]
theta_02 = struct[0].y_run[3,0]
V_03 = struct[0].y_run[4,0]
theta_03 = struct[0].y_run[5,0]
i_d_01 = struct[0].y_run[6,0]
i_q_01 = struct[0].y_run[7,0]
p_g_01 = struct[0].y_run[8,0]
q_g_01 = struct[0].y_run[9,0]
v_f_01 = struct[0].y_run[10,0]
p_m_ref_01 = struct[0].y_run[11,0]
p_m_01 = struct[0].y_run[12,0]
z_wo_01 = struct[0].y_run[13,0]
v_pss_01 = struct[0].y_run[14,0]
omega_03 = struct[0].y_run[15,0]
e_qv_03 = struct[0].y_run[16,0]
i_d_03 = struct[0].y_run[17,0]
i_q_03 = struct[0].y_run[18,0]
p_s_03 = struct[0].y_run[19,0]
q_s_03 = struct[0].y_run[20,0]
p_m_03 = struct[0].y_run[21,0]
p_t_03 = struct[0].y_run[22,0]
p_u_03 = struct[0].y_run[23,0]
v_u_03 = struct[0].y_run[24,0]
k_u_03 = struct[0].y_run[25,0]
k_cur_sat_03 = struct[0].y_run[26,0]
p_gou_03 = struct[0].y_run[27,0]
p_f_03 = struct[0].y_run[28,0]
r_lim_03 = struct[0].y_run[29,0]
omega_pll_03 = struct[0].y_run[30,0]
omega_coi = struct[0].y_run[31,0]
p_agc = struct[0].y_run[32,0]
struct[0].u_run[0,0] = P_01
struct[0].u_run[1,0] = Q_01
struct[0].u_run[2,0] = P_02
struct[0].u_run[3,0] = Q_02
struct[0].u_run[4,0] = P_03
struct[0].u_run[5,0] = Q_03
struct[0].u_run[6,0] = v_ref_01
struct[0].u_run[7,0] = v_pss_01
struct[0].u_run[8,0] = p_c_01
struct[0].u_run[9,0] = p_r_01
struct[0].u_run[10,0] = q_s_ref_03
struct[0].u_run[11,0] = v_u_ref_03
struct[0].u_run[12,0] = omega_ref_03
struct[0].u_run[13,0] = p_gin_0_03
struct[0].u_run[14,0] = p_g_ref_03
struct[0].u_run[15,0] = ramp_p_gin_03
# Differential equations:
if mode == 2:
struct[0].f[0,0] = -K_delta_01*delta_01 + Omega_b_01*(omega_01 - omega_coi)
struct[0].f[1,0] = (-D_01*(omega_01 - omega_coi) - i_d_01*(R_a_01*i_d_01 + V_01*sin(delta_01 - theta_01)) - i_q_01*(R_a_01*i_q_01 + V_01*cos(delta_01 - theta_01)) + p_m_01)/(2*H_01)
struct[0].f[2,0] = (-e1q_01 - i_d_01*(-X1d_01 + X_d_01) + v_f_01)/T1d0_01
struct[0].f[3,0] = (-e1d_01 + i_q_01*(-X1q_01 + X_q_01))/T1q0_01
struct[0].f[4,0] = (V_01 - v_c_01)/T_r_01
struct[0].f[5,0] = -K_aw_01*(K_a_01*(-v_c_01 + v_pss_01 + v_ref_01) + K_ai_01*xi_v_01 - v_f_01) - v_c_01 + v_pss_01 + v_ref_01
struct[0].f[6,0] = (p_m_ref_01 - x_gov_1_01)/T_gov_1_01
struct[0].f[7,0] = (x_gov_1_01 - x_gov_2_01)/T_gov_3_01
struct[0].f[8,0] = K_imw_01*(p_c_01 - p_g_01) - 1.0e-6*xi_imw_01
struct[0].f[9,0] = (omega_01 - x_wo_01 - 1.0)/T_wo_01
struct[0].f[10,0] = (-x_lead_01 + z_wo_01)/T_2_01
struct[0].f[11,0] = Omega_b_03*(omega_03 - omega_coi)
struct[0].f[12,0] = p_m_03 - p_s_03
struct[0].f[13,0] = -q_s_03 + q_s_ref_03
struct[0].f[14,0] = S_n_03*(p_gou_03 - p_t_03)/(C_u_03*(v_u_03 + 0.1))
struct[0].f[15,0] = Piecewise(np.array([(-r_lim_03, r_lim_03 < -K_h_03*(-p_ghr_03 + p_gou_03)), (r_lim_03, r_lim_03 < K_h_03*(-p_ghr_03 + p_gou_03)), (K_h_03*(-p_ghr_03 + p_gou_03), True)]))
struct[0].f[16,0] = (-k_cur_03 + p_g_ref_03/(inc_p_gin_03 + p_gin_0_03) + Piecewise(np.array([(P_f_min_03, P_f_min_03 > p_f_03), (P_f_max_03, P_f_max_03 < p_f_03), (p_f_03, True)]))/(inc_p_gin_03 + p_gin_0_03))/T_cur_03
struct[0].f[17,0] = -0.001*inc_p_gin_03 + ramp_p_gin_03
struct[0].f[18,0] = K_i_pll_03*xi_pll_03 + K_p_pll_03*(V_03*sin(theta_03)*cos(theta_pll_03) - V_03*sin(theta_pll_03)*cos(theta_03)) - Omega_b_03*omega_coi
struct[0].f[19,0] = V_03*sin(theta_03)*cos(theta_pll_03) - V_03*sin(theta_pll_03)*cos(theta_03)
struct[0].f[20,0] = 1 - omega_coi
# Algebraic equations:
if mode == 3:
struct[0].g[:,:] = np.ascontiguousarray(struct[0].Gy) @ np.ascontiguousarray(struct[0].y_run) + np.ascontiguousarray(struct[0].Gu) @ np.ascontiguousarray(struct[0].u_run)
struct[0].g[0,0] = -P_01/S_base + V_01**2*g_01_02 + V_01*V_02*(-b_01_02*sin(theta_01 - theta_02) - g_01_02*cos(theta_01 - theta_02)) - S_n_01*p_g_01/S_base
struct[0].g[1,0] = -Q_01/S_base + V_01**2*(-b_01_02 - bs_01_02/2) + V_01*V_02*(b_01_02*cos(theta_01 - theta_02) - g_01_02*sin(theta_01 - theta_02)) - S_n_01*q_g_01/S_base
struct[0].g[2,0] = -P_02/S_base + V_01*V_02*(b_01_02*sin(theta_01 - theta_02) - g_01_02*cos(theta_01 - theta_02)) + V_02**2*(g_01_02 + g_02_03) + V_02*V_03*(-b_02_03*sin(theta_02 - theta_03) - g_02_03*cos(theta_02 - theta_03))
struct[0].g[3,0] = -Q_02/S_base + V_01*V_02*(b_01_02*cos(theta_01 - theta_02) + g_01_02*sin(theta_01 - theta_02)) + V_02**2*(-b_01_02 - b_02_03 - bs_01_02/2 - bs_02_03/2) + V_02*V_03*(b_02_03*cos(theta_02 - theta_03) - g_02_03*sin(theta_02 - theta_03))
struct[0].g[4,0] = -P_03/S_base + V_02*V_03*(b_02_03*sin(theta_02 - theta_03) - g_02_03*cos(theta_02 - theta_03)) + V_03**2*g_02_03 - S_n_03*p_s_03/S_base
struct[0].g[5,0] = -Q_03/S_base + V_02*V_03*(b_02_03*cos(theta_02 - theta_03) + g_02_03*sin(theta_02 - theta_03)) + V_03**2*(-b_02_03 - bs_02_03/2) - S_n_03*q_s_03/S_base
struct[0].g[6,0] = R_a_01*i_q_01 + V_01*cos(delta_01 - theta_01) + X1d_01*i_d_01 - e1q_01
struct[0].g[7,0] = R_a_01*i_d_01 + V_01*sin(delta_01 - theta_01) - X1q_01*i_q_01 - e1d_01
struct[0].g[8,0] = V_01*i_d_01*sin(delta_01 - theta_01) + V_01*i_q_01*cos(delta_01 - theta_01) - p_g_01
struct[0].g[9,0] = V_01*i_d_01*cos(delta_01 - theta_01) - V_01*i_q_01*sin(delta_01 - theta_01) - q_g_01
struct[0].g[10,0] = -v_f_01 + Piecewise(np.array([(V_min_01, V_min_01 > K_a_01*(-v_c_01 + v_pss_01 + v_ref_01) + K_ai_01*xi_v_01), (V_max_01, V_max_01 < K_a_01*(-v_c_01 + v_pss_01 + v_ref_01) + K_ai_01*xi_v_01), (K_a_01*(-v_c_01 + v_pss_01 + v_ref_01) + K_ai_01*xi_v_01, True)]))
struct[0].g[11,0] = K_sec_01*p_agc - p_m_ref_01 + p_r_01 + xi_imw_01 - (omega_01 - omega_ref_01)/Droop_01
struct[0].g[12,0] = T_gov_2_01*(x_gov_1_01 - x_gov_2_01)/T_gov_3_01 - p_m_01 + x_gov_2_01
struct[0].g[13,0] = omega_01 - x_wo_01 - z_wo_01 - 1.0
struct[0].g[14,0] = -v_pss_01 + Piecewise(np.array([(-V_lim_01, V_lim_01 < -K_stab_01*(T_1_01*(-x_lead_01 + z_wo_01)/T_2_01 + x_lead_01)), (V_lim_01, V_lim_01 < K_stab_01*(T_1_01*(-x_lead_01 + z_wo_01)/T_2_01 + x_lead_01)), (K_stab_01*(T_1_01*(-x_lead_01 + z_wo_01)/T_2_01 + x_lead_01), True)]))
struct[0].g[15,0] = K_p_03*(p_m_03 - p_s_03 + xi_p_03/T_p_03) - omega_03
struct[0].g[16,0] = K_q_03*(-q_s_03 + q_s_ref_03 + xi_q_03/T_q_03) - e_qv_03
struct[0].g[17,0] = -R_v_03*i_d_03 - V_03*sin(delta_03 - theta_03) + X_v_03*i_q_03
struct[0].g[18,0] = -R_v_03*i_q_03 - V_03*cos(delta_03 - theta_03) - X_v_03*i_d_03 + e_qv_03
struct[0].g[19,0] = V_03*i_d_03*sin(delta_03 - theta_03) + V_03*i_q_03*cos(delta_03 - theta_03) - p_s_03
struct[0].g[20,0] = V_03*i_d_03*cos(delta_03 - theta_03) - V_03*i_q_03*sin(delta_03 - theta_03) - q_s_03
struct[0].g[21,0] = K_fpfr_03*Piecewise(np.array([(P_f_min_03, P_f_min_03 > p_f_03), (P_f_max_03, P_f_max_03 < p_f_03), (p_f_03, True)])) + p_ghr_03 - p_m_03 + p_s_03 - p_t_03 + p_u_03
struct[0].g[22,0] = i_d_03*(R_s_03*i_d_03 + V_03*sin(delta_03 - theta_03)) + i_q_03*(R_s_03*i_q_03 + V_03*cos(delta_03 - theta_03)) - p_t_03
struct[0].g[23,0] = -p_u_03 - k_u_03*(-v_u_03**2 + v_u_ref_03**2)/V_u_max_03**2
struct[0].g[24,0] = R_uc_03*S_n_03*(p_gou_03 - p_t_03)/(v_u_03 + 0.1) + e_u_03 - v_u_03
struct[0].g[25,0] = -k_u_03 + Piecewise(np.array([(K_u_max_03, V_u_min_03 > v_u_03), (K_u_0_03 + (-K_u_0_03 + K_u_max_03)*(-V_u_lt_03 + v_u_03)/(-V_u_lt_03 + V_u_min_03), V_u_lt_03 > v_u_03), (K_u_0_03 + (-K_u_0_03 + K_u_max_03)*(-V_u_ht_03 + v_u_03)/(-V_u_ht_03 + V_u_max_03), V_u_ht_03 < v_u_03), (K_u_max_03, V_u_max_03 < v_u_03), (K_u_0_03, True)]))
struct[0].g[26,0] = -k_cur_sat_03 + Piecewise(np.array([(0.0001, k_cur_03 < 0.0001), (1, k_cur_03 > 1), (k_cur_03, True)]))
struct[0].g[27,0] = k_cur_sat_03*(inc_p_gin_03 + p_gin_0_03) - p_gou_03
struct[0].g[28,0] = -p_f_03 - Piecewise(np.array([((0.5*DB_03 + K_speed_03*omega_pll_03 + omega_03*(1 - K_speed_03) - omega_ref_03)/Droop_03, 0.5*DB_03 - omega_ref_03 < -K_speed_03*omega_pll_03 - omega_03*(1 - K_speed_03)), ((-0.5*DB_03 + K_speed_03*omega_pll_03 + omega_03*(1 - K_speed_03) - omega_ref_03)/Droop_03, 0.5*DB_03 + omega_ref_03 < K_speed_03*omega_pll_03 + omega_03*(1 - K_speed_03)), (0.0, True)]))
struct[0].g[29,0] = -r_lim_03 + Piecewise(np.array([(R_lim_max_03, (omega_03 > 0.5*DB_03 + omega_ref_03) | (omega_03 < -0.5*DB_03 + omega_ref_03)), (0.0, True)])) + Piecewise(np.array([(R_lim_03 + (-R_lim_03 + R_lim_max_03)*(-V_u_lt_03 + v_u_03)/(-V_u_lt_03 + V_u_min_03), V_u_lt_03 > v_u_03), (R_lim_03 + (-R_lim_03 + R_lim_max_03)*(-V_u_ht_03 + v_u_03)/(-V_u_ht_03 + V_u_max_03), V_u_ht_03 < v_u_03), (R_lim_03, True)]))
struct[0].g[30,0] = -omega_pll_03 + (K_i_pll_03*xi_pll_03 + K_p_pll_03*(V_03*sin(theta_03)*cos(theta_pll_03) - V_03*sin(theta_pll_03)*cos(theta_03)))/Omega_b_03
struct[0].g[31,0] = -omega_coi + (H_01*S_n_01*omega_01 + S_n_03*T_p_03*omega_03/(2*K_p_03))/(H_01*S_n_01 + S_n_03*T_p_03/(2*K_p_03))
struct[0].g[32,0] = K_i_agc*xi_freq + K_p_agc*(1 - omega_coi) - p_agc
# Outputs:
if mode == 3:
struct[0].h[0,0] = V_01
struct[0].h[1,0] = V_02
struct[0].h[2,0] = V_03
struct[0].h[3,0] = i_d_01*(R_a_01*i_d_01 + V_01*sin(delta_01 - theta_01)) + i_q_01*(R_a_01*i_q_01 + V_01*cos(delta_01 - theta_01))
struct[0].h[4,0] = inc_p_gin_03 + p_gin_0_03
struct[0].h[5,0] = p_g_ref_03
struct[0].h[6,0] = -p_s_03 + p_t_03
struct[0].h[7,0] = (-V_u_min_03**2 + e_u_03**2)/(V_u_max_03**2 - V_u_min_03**2)
struct[0].h[8,0] = K_fpfr_03*Piecewise(np.array([(P_f_min_03, P_f_min_03 > p_f_03), (P_f_max_03, P_f_max_03 < p_f_03), (p_f_03, True)]))
struct[0].h[9,0] = Piecewise(np.array([(P_f_min_03, P_f_min_03 > p_f_03), (P_f_max_03, P_f_max_03 < p_f_03), (p_f_03, True)]))
if mode == 10:
struct[0].Fx[0,0] = -K_delta_01
struct[0].Fx[0,1] = Omega_b_01
struct[0].Fx[1,0] = (-V_01*i_d_01*cos(delta_01 - theta_01) + V_01*i_q_01*sin(delta_01 - theta_01))/(2*H_01)
struct[0].Fx[1,1] = -D_01/(2*H_01)
struct[0].Fx[2,2] = -1/T1d0_01
struct[0].Fx[3,3] = -1/T1q0_01
struct[0].Fx[4,4] = -1/T_r_01
struct[0].Fx[5,4] = K_a_01*K_aw_01 - 1
struct[0].Fx[5,5] = -K_ai_01*K_aw_01
struct[0].Fx[6,6] = -1/T_gov_1_01
struct[0].Fx[7,6] = 1/T_gov_3_01
struct[0].Fx[7,7] = -1/T_gov_3_01
struct[0].Fx[9,1] = 1/T_wo_01
struct[0].Fx[9,9] = -1/T_wo_01
struct[0].Fx[10,10] = -1/T_2_01
struct[0].Fx[15,15] = Piecewise(np.array([(0, (r_lim_03 < K_h_03*(-p_ghr_03 + p_gou_03)) | (r_lim_03 < -K_h_03*(-p_ghr_03 + p_gou_03))), (-K_h_03, True)]))
struct[0].Fx[16,16] = -1/T_cur_03
struct[0].Fx[16,17] = (-p_g_ref_03/(inc_p_gin_03 + p_gin_0_03)**2 - Piecewise(np.array([(P_f_min_03, P_f_min_03 > p_f_03), (P_f_max_03, P_f_max_03 < p_f_03), (p_f_03, True)]))/(inc_p_gin_03 + p_gin_0_03)**2)/T_cur_03
struct[0].Fx[18,18] = K_p_pll_03*(-V_03*sin(theta_03)*sin(theta_pll_03) - V_03*cos(theta_03)*cos(theta_pll_03))
struct[0].Fx[18,19] = K_i_pll_03
struct[0].Fx[19,18] = -V_03*sin(theta_03)*sin(theta_pll_03) - V_03*cos(theta_03)*cos(theta_pll_03)
if mode == 11:
struct[0].Fy[0,31] = -Omega_b_01
struct[0].Fy[1,0] = (-i_d_01*sin(delta_01 - theta_01) - i_q_01*cos(delta_01 - theta_01))/(2*H_01)
struct[0].Fy[1,1] = (V_01*i_d_01*cos(delta_01 - theta_01) - V_01*i_q_01*sin(delta_01 - theta_01))/(2*H_01)
struct[0].Fy[1,6] = (-2*R_a_01*i_d_01 - V_01*sin(delta_01 - theta_01))/(2*H_01)
struct[0].Fy[1,7] = (-2*R_a_01*i_q_01 - V_01*cos(delta_01 - theta_01))/(2*H_01)
struct[0].Fy[1,12] = 1/(2*H_01)
struct[0].Fy[1,31] = D_01/(2*H_01)
struct[0].Fy[2,6] = (X1d_01 - X_d_01)/T1d0_01
struct[0].Fy[2,10] = 1/T1d0_01
struct[0].Fy[3,7] = (-X1q_01 + X_q_01)/T1q0_01
struct[0].Fy[4,0] = 1/T_r_01
struct[0].Fy[5,10] = K_aw_01
struct[0].Fy[5,14] = -K_a_01*K_aw_01 + 1
struct[0].Fy[6,11] = 1/T_gov_1_01
struct[0].Fy[8,8] = -K_imw_01
struct[0].Fy[10,13] = 1/T_2_01
struct[0].Fy[11,15] = Omega_b_03
struct[0].Fy[11,31] = -Omega_b_03
struct[0].Fy[12,19] = -1
struct[0].Fy[12,21] = 1
struct[0].Fy[13,20] = -1
struct[0].Fy[14,22] = -S_n_03/(C_u_03*(v_u_03 + 0.1))
struct[0].Fy[14,24] = -S_n_03*(p_gou_03 - p_t_03)/(C_u_03*(v_u_03 + 0.1)**2)
struct[0].Fy[14,27] = S_n_03/(C_u_03*(v_u_03 + 0.1))
struct[0].Fy[15,27] = Piecewise(np.array([(0, (r_lim_03 < K_h_03*(-p_ghr_03 + p_gou_03)) | (r_lim_03 < -K_h_03*(-p_ghr_03 + p_gou_03))), (K_h_03, True)]))
struct[0].Fy[15,29] = Piecewise(np.array([(-1, r_lim_03 < -K_h_03*(-p_ghr_03 + p_gou_03)), (1, r_lim_03 < K_h_03*(-p_ghr_03 + p_gou_03)), (0, True)]))
struct[0].Fy[16,28] = Piecewise(np.array([(0, (P_f_min_03 > p_f_03) | (P_f_max_03 < p_f_03)), (1, True)]))/(T_cur_03*(inc_p_gin_03 + p_gin_0_03))
struct[0].Fy[18,4] = K_p_pll_03*(sin(theta_03)*cos(theta_pll_03) - sin(theta_pll_03)*cos(theta_03))
struct[0].Fy[18,5] = K_p_pll_03*(V_03*sin(theta_03)*sin(theta_pll_03) + V_03*cos(theta_03)*cos(theta_pll_03))
struct[0].Fy[18,31] = -Omega_b_03
struct[0].Fy[19,4] = sin(theta_03)*cos(theta_pll_03) - sin(theta_pll_03)*cos(theta_03)
struct[0].Fy[19,5] = V_03*sin(theta_03)*sin(theta_pll_03) + V_03*cos(theta_03)*cos(theta_pll_03)
struct[0].Fy[20,31] = -1
struct[0].Gx[6,0] = -V_01*sin(delta_01 - theta_01)
struct[0].Gx[6,2] = -1
struct[0].Gx[7,0] = V_01*cos(delta_01 - theta_01)
struct[0].Gx[7,3] = -1
struct[0].Gx[8,0] = V_01*i_d_01*cos(delta_01 - theta_01) - V_01*i_q_01*sin(delta_01 - theta_01)
struct[0].Gx[9,0] = -V_01*i_d_01*sin(delta_01 - theta_01) - V_01*i_q_01*cos(delta_01 - theta_01)
struct[0].Gx[10,4] = Piecewise(np.array([(0, (V_min_01 > K_a_01*(-v_c_01 + v_pss_01 + v_ref_01) + K_ai_01*xi_v_01) | (V_max_01 < K_a_01*(-v_c_01 + v_pss_01 + v_ref_01) + K_ai_01*xi_v_01)), (-K_a_01, True)]))
struct[0].Gx[10,5] = Piecewise(np.array([(0, (V_min_01 > K_a_01*(-v_c_01 + v_pss_01 + v_ref_01) + K_ai_01*xi_v_01) | (V_max_01 < K_a_01*(-v_c_01 + v_pss_01 + v_ref_01) + K_ai_01*xi_v_01)), (K_ai_01, True)]))
struct[0].Gx[11,1] = -1/Droop_01
struct[0].Gx[11,8] = 1
struct[0].Gx[12,6] = T_gov_2_01/T_gov_3_01
struct[0].Gx[12,7] = -T_gov_2_01/T_gov_3_01 + 1
struct[0].Gx[13,1] = 1
struct[0].Gx[13,9] = -1
struct[0].Gx[14,10] = Piecewise(np.array([(0, (V_lim_01 < K_stab_01*(T_1_01*(-x_lead_01 + z_wo_01)/T_2_01 + x_lead_01)) | (V_lim_01 < -K_stab_01*(T_1_01*(-x_lead_01 + z_wo_01)/T_2_01 + x_lead_01))), (K_stab_01*(-T_1_01/T_2_01 + 1), True)]))
struct[0].Gx[15,12] = K_p_03/T_p_03
struct[0].Gx[16,13] = K_q_03/T_q_03
struct[0].Gx[17,11] = -V_03*cos(delta_03 - theta_03)
struct[0].Gx[18,11] = V_03*sin(delta_03 - theta_03)
struct[0].Gx[19,11] = V_03*i_d_03*cos(delta_03 - theta_03) - V_03*i_q_03*sin(delta_03 - theta_03)
struct[0].Gx[20,11] = -V_03*i_d_03*sin(delta_03 - theta_03) - V_03*i_q_03*cos(delta_03 - theta_03)
struct[0].Gx[21,15] = 1
struct[0].Gx[22,11] = V_03*i_d_03*cos(delta_03 - theta_03) - V_03*i_q_03*sin(delta_03 - theta_03)
struct[0].Gx[24,14] = 1
struct[0].Gx[26,16] = Piecewise(np.array([(0, (k_cur_03 > 1) | (k_cur_03 < 0.0001)), (1, True)]))
struct[0].Gx[27,17] = k_cur_sat_03
struct[0].Gx[30,18] = K_p_pll_03*(-V_03*sin(theta_03)*sin(theta_pll_03) - V_03*cos(theta_03)*cos(theta_pll_03))/Omega_b_03
struct[0].Gx[30,19] = K_i_pll_03/Omega_b_03
struct[0].Gx[31,1] = H_01*S_n_01/(H_01*S_n_01 + S_n_03*T_p_03/(2*K_p_03))
struct[0].Gx[32,20] = K_i_agc
struct[0].Gy[0,0] = 2*V_01*g_01_02 + V_02*(-b_01_02*sin(theta_01 - theta_02) - g_01_02*cos(theta_01 - theta_02))
struct[0].Gy[0,1] = V_01*V_02*(-b_01_02*cos(theta_01 - theta_02) + g_01_02*sin(theta_01 - theta_02))
struct[0].Gy[0,2] = V_01*(-b_01_02*sin(theta_01 - theta_02) - g_01_02*cos(theta_01 - theta_02))
struct[0].Gy[0,3] = V_01*V_02*(b_01_02*cos(theta_01 - theta_02) - g_01_02*sin(theta_01 - theta_02))
struct[0].Gy[0,8] = -S_n_01/S_base
struct[0].Gy[1,0] = 2*V_01*(-b_01_02 - bs_01_02/2) + V_02*(b_01_02*cos(theta_01 - theta_02) - g_01_02*sin(theta_01 - theta_02))
struct[0].Gy[1,1] = V_01*V_02*(-b_01_02*sin(theta_01 - theta_02) - g_01_02*cos(theta_01 - theta_02))
struct[0].Gy[1,2] = V_01*(b_01_02*cos(theta_01 - theta_02) - g_01_02*sin(theta_01 - theta_02))
struct[0].Gy[1,3] = V_01*V_02*(b_01_02*sin(theta_01 - theta_02) + g_01_02*cos(theta_01 - theta_02))
struct[0].Gy[1,9] = -S_n_01/S_base
struct[0].Gy[2,0] = V_02*(b_01_02*sin(theta_01 - theta_02) - g_01_02*cos(theta_01 - theta_02))
struct[0].Gy[2,1] = V_01*V_02*(b_01_02*cos(theta_01 - theta_02) + g_01_02*sin(theta_01 - theta_02))
struct[0].Gy[2,2] = V_01*(b_01_02*sin(theta_01 - theta_02) - g_01_02*cos(theta_01 - theta_02)) + 2*V_02*(g_01_02 + g_02_03) + V_03*(-b_02_03*sin(theta_02 - theta_03) - g_02_03*cos(theta_02 - theta_03))
struct[0].Gy[2,3] = V_01*V_02*(-b_01_02*cos(theta_01 - theta_02) - g_01_02*sin(theta_01 - theta_02)) + V_02*V_03*(-b_02_03*cos(theta_02 - theta_03) + g_02_03*sin(theta_02 - theta_03))
struct[0].Gy[2,4] = V_02*(-b_02_03*sin(theta_02 - theta_03) - g_02_03*cos(theta_02 - theta_03))
struct[0].Gy[2,5] = V_02*V_03*(b_02_03*cos(theta_02 - theta_03) - g_02_03*sin(theta_02 - theta_03))
struct[0].Gy[3,0] = V_02*(b_01_02*cos(theta_01 - theta_02) + g_01_02*sin(theta_01 - theta_02))
struct[0].Gy[3,1] = V_01*V_02*(-b_01_02*sin(theta_01 - theta_02) + g_01_02*cos(theta_01 - theta_02))
struct[0].Gy[3,2] = V_01*(b_01_02*cos(theta_01 - theta_02) + g_01_02*sin(theta_01 - theta_02)) + 2*V_02*(-b_01_02 - b_02_03 - bs_01_02/2 - bs_02_03/2) + V_03*(b_02_03*cos(theta_02 - theta_03) - g_02_03*sin(theta_02 - theta_03))
struct[0].Gy[3,3] = V_01*V_02*(b_01_02*sin(theta_01 - theta_02) - g_01_02*cos(theta_01 - theta_02)) + V_02*V_03*(-b_02_03*sin(theta_02 - theta_03) - g_02_03*cos(theta_02 - theta_03))
struct[0].Gy[3,4] = V_02*(b_02_03*cos(theta_02 - theta_03) - g_02_03*sin(theta_02 - theta_03))
struct[0].Gy[3,5] = V_02*V_03*(b_02_03*sin(theta_02 - theta_03) + g_02_03*cos(theta_02 - theta_03))
struct[0].Gy[4,2] = V_03*(b_02_03*sin(theta_02 - theta_03) - g_02_03*cos(theta_02 - theta_03))
struct[0].Gy[4,3] = V_02*V_03*(b_02_03*cos(theta_02 - theta_03) + g_02_03*sin(theta_02 - theta_03))
struct[0].Gy[4,4] = V_02*(b_02_03*sin(theta_02 - theta_03) - g_02_03*cos(theta_02 - theta_03)) + 2*V_03*g_02_03
struct[0].Gy[4,5] = V_02*V_03*(-b_02_03*cos(theta_02 - theta_03) - g_02_03*sin(theta_02 - theta_03))
struct[0].Gy[4,19] = -S_n_03/S_base
struct[0].Gy[5,2] = V_03*(b_02_03*cos(theta_02 - theta_03) + g_02_03*sin(theta_02 - theta_03))
struct[0].Gy[5,3] = V_02*V_03*(-b_02_03*sin(theta_02 - theta_03) + g_02_03*cos(theta_02 - theta_03))
struct[0].Gy[5,4] = V_02*(b_02_03*cos(theta_02 - theta_03) + g_02_03*sin(theta_02 - theta_03)) + 2*V_03*(-b_02_03 - bs_02_03/2)
struct[0].Gy[5,5] = V_02*V_03*(b_02_03*sin(theta_02 - theta_03) - g_02_03*cos(theta_02 - theta_03))
struct[0].Gy[5,20] = -S_n_03/S_base
struct[0].Gy[6,0] = cos(delta_01 - theta_01)
struct[0].Gy[6,1] = V_01*sin(delta_01 - theta_01)
struct[0].Gy[6,6] = X1d_01
struct[0].Gy[6,7] = R_a_01
struct[0].Gy[7,0] = sin(delta_01 - theta_01)
struct[0].Gy[7,1] = -V_01*cos(delta_01 - theta_01)
struct[0].Gy[7,6] = R_a_01
struct[0].Gy[7,7] = -X1q_01
struct[0].Gy[8,0] = i_d_01*sin(delta_01 - theta_01) + i_q_01*cos(delta_01 - theta_01)
struct[0].Gy[8,1] = -V_01*i_d_01*cos(delta_01 - theta_01) + V_01*i_q_01*sin(delta_01 - theta_01)
struct[0].Gy[8,6] = V_01*sin(delta_01 - theta_01)
struct[0].Gy[8,7] = V_01*cos(delta_01 - theta_01)
struct[0].Gy[9,0] = i_d_01*cos(delta_01 - theta_01) - i_q_01*sin(delta_01 - theta_01)
struct[0].Gy[9,1] = V_01*i_d_01*sin(delta_01 - theta_01) + V_01*i_q_01*cos(delta_01 - theta_01)
struct[0].Gy[9,6] = V_01*cos(delta_01 - theta_01)
struct[0].Gy[9,7] = -V_01*sin(delta_01 - theta_01)
struct[0].Gy[10,14] = Piecewise(np.array([(0, (V_min_01 > K_a_01*(-v_c_01 + v_pss_01 + v_ref_01) + K_ai_01*xi_v_01) | (V_max_01 < K_a_01*(-v_c_01 + v_pss_01 + v_ref_01) + K_ai_01*xi_v_01)), (K_a_01, True)]))
struct[0].Gy[11,32] = K_sec_01
struct[0].Gy[14,13] = Piecewise(np.array([(0, (V_lim_01 < K_stab_01*(T_1_01*(-x_lead_01 + z_wo_01)/T_2_01 + x_lead_01)) | (V_lim_01 < -K_stab_01*(T_1_01*(-x_lead_01 + z_wo_01)/T_2_01 + x_lead_01))), (K_stab_01*T_1_01/T_2_01, True)]))
struct[0].Gy[15,19] = -K_p_03
struct[0].Gy[15,21] = K_p_03
struct[0].Gy[16,20] = -K_q_03
struct[0].Gy[17,4] = -sin(delta_03 - theta_03)
struct[0].Gy[17,5] = V_03*cos(delta_03 - theta_03)
struct[0].Gy[17,17] = -R_v_03
struct[0].Gy[17,18] = X_v_03
struct[0].Gy[18,4] = -cos(delta_03 - theta_03)
struct[0].Gy[18,5] = -V_03*sin(delta_03 - theta_03)
struct[0].Gy[18,17] = -X_v_03
struct[0].Gy[18,18] = -R_v_03
struct[0].Gy[19,4] = i_d_03*sin(delta_03 - theta_03) + i_q_03*cos(delta_03 - theta_03)
struct[0].Gy[19,5] = -V_03*i_d_03*cos(delta_03 - theta_03) + V_03*i_q_03*sin(delta_03 - theta_03)
struct[0].Gy[19,17] = V_03*sin(delta_03 - theta_03)
struct[0].Gy[19,18] = V_03*cos(delta_03 - theta_03)
struct[0].Gy[20,4] = i_d_03*cos(delta_03 - theta_03) - i_q_03*sin(delta_03 - theta_03)
struct[0].Gy[20,5] = V_03*i_d_03*sin(delta_03 - theta_03) + V_03*i_q_03*cos(delta_03 - theta_03)
struct[0].Gy[20,17] = V_03*cos(delta_03 - theta_03)
struct[0].Gy[20,18] = -V_03*sin(delta_03 - theta_03)
struct[0].Gy[21,28] = K_fpfr_03*Piecewise(
|
np.array([(0, (P_f_min_03 > p_f_03) | (P_f_max_03 < p_f_03)), (1, True)])
|
numpy.array
|
## Compute and plot curvature of Skafta cauldron
## 26 Mar 2019 EHU
import numpy as np
import scipy.misc as scp
import scipy.signal as signal
from scipy import interpolate
from scipy.ndimage import gaussian_filter, distance_transform_edt
import mpl_toolkits.basemap.pyproj as pyproj
from osgeo import gdal
from netCDF4 import Dataset
#import shapefile
#import datetime
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import LogNorm, SymLogNorm
#from scipy import interpolate
#from scipy.ndimage import gaussian_filter
from cauldron_funcs import *
skafta_region_path = 'Documents/6. MIT/Skaftar collapse/data/arcticDEM/'
nc_20121015_path = skafta_region_path + 'subset_nc/SETSM_WV02_20121015_skaftar_east_ll.nc'
nc_20151010_path = skafta_region_path + 'subset_nc/SETSM_WV02_20151010_skaftar_east_ll.nc'
lon_2012, lat_2012, se_2012 = read_ArcticDEM_nc(nc_20121015_path)
SE_2012 = np.ma.masked_where(se_2012==0, se_2012)
lon_2015, lat_2015, se_2015 = read_ArcticDEM_nc(nc_20151010_path)
SE_2015 = np.ma.masked_where(se_2015==0, se_2015)
SE_2012_restricted = SE_2012[1000:3500, 6000:10000] #slicing to restrict to only the area of Eastern Skafta (speeds up computation)
SE_2015_restricted = SE_2015[1000:3500, 6000:10000]
#SE_2012_rest_fild = SE_2012_restricted.filled(fill_value=SE_2012_restricted.mean())
#SE_2015_rest_fild = SE_2015_restricted.filled(fill_value=SE_2015_restricted.mean())
SE_2012_rest_fild = NearestFill(SE_2012_restricted, mask=SE_2012_restricted.mask)
SE_2015_rest_fild = NearestFill(SE_2015_restricted, mask=SE_2015_restricted.mask)
lon_restricted = lon_2015[6000:10000]
lat_restricted = lat_2015[1000:3500]
SE_2012_western = SE_2012[1800:3200, 2250:5000]
SE_2015_western = SE_2015[1800:3200, 2250:5000]
lon_western = lon_2015[2250:5000]
lat_western = lat_2015[1800:3200]
## Compute and plot curvature of cauldron surface
def savgol2d ( z, window_size, order, derivative=None): #based on SciPy Cookbook entry on savgol
"""
"""
# number of terms in the polynomial expression
n_terms = ( order + 1 ) * ( order + 2) / 2.0
if window_size % 2 == 0:
raise ValueError('window_size must be odd')
if window_size**2 < n_terms:
raise ValueError('order is too high for the window size')
half_size = window_size // 2
# exponents of the polynomial.
# p(x,y) = a0 + a1*x + a2*y + a3*x^2 + a4*y^2 + a5*x*y + ...
# this line gives a list of two item tuple. Each tuple contains
# the exponents of the k-th term. First element of tuple is for x
# second element for y.
# Ex. exps = [(0,0), (1,0), (0,1), (2,0), (1,1), (0,2), ...]
exps = [ (k-n, n) for k in range(order+1) for n in range(k+1) ]
# coordinates of points
ind = np.arange(-half_size, half_size+1, dtype=np.float64)
dx = np.repeat( ind, window_size )
dy = np.tile( ind, [window_size, 1]).reshape(window_size**2, )
# build matrix of system of equation
A = np.empty( (window_size**2, len(exps)) )
for i, exp in enumerate( exps ):
A[:,i] = (dx**exp[0]) * (dy**exp[1])
# pad input array with appropriate values at the four borders
new_shape = z.shape[0] + 2*half_size, z.shape[1] + 2*half_size
Z = np.zeros( (new_shape) )
# top band
band = z[0, :]
Z[:half_size, half_size:-half_size] = band - np.abs( np.flipud( z[1:half_size+1, :] ) - band )
# bottom band
band = z[-1, :]
Z[-half_size:, half_size:-half_size] = band + np.abs( np.flipud( z[-half_size-1:-1, :] ) -band )
# left band
band = np.tile( z[:,0].reshape(-1,1), [1,half_size])
Z[half_size:-half_size, :half_size] = band - np.abs( np.fliplr( z[:, 1:half_size+1] ) - band )
# right band
band = np.tile( z[:,-1].reshape(-1,1), [1,half_size] )
Z[half_size:-half_size, -half_size:] = band + np.abs( np.fliplr( z[:, -half_size-1:-1] ) - band )
# central band
Z[half_size:-half_size, half_size:-half_size] = z
# top left corner
band = z[0,0]
Z[:half_size,:half_size] = band - np.abs( np.flipud(np.fliplr(z[1:half_size+1,1:half_size+1]) ) - band )
# bottom right corner
band = z[-1,-1]
Z[-half_size:,-half_size:] = band + np.abs( np.flipud(np.fliplr(z[-half_size-1:-1,-half_size-1:-1]) ) - band )
# top right corner
band = Z[half_size,-half_size:]
Z[:half_size,-half_size:] = band - np.abs( np.flipud(Z[half_size+1:2*half_size+1,-half_size:]) - band )
# bottom left corner
band = Z[-half_size:,half_size].reshape(-1,1)
Z[-half_size:,:half_size] = band - np.abs( np.fliplr(Z[-half_size:, half_size+1:2*half_size+1]) - band )
# solve system and convolve
if derivative == None:
m = np.linalg.pinv(A)[0].reshape((window_size, -1))
return signal.fftconvolve(Z, m, mode='valid')
elif derivative == 'col':
c = np.linalg.pinv(A)[1].reshape((window_size, -1))
return signal.fftconvolve(Z, -c, mode='valid')
elif derivative == 'row':
r = np.linalg.pinv(A)[2].reshape((window_size, -1))
return signal.fftconvolve(Z, -r, mode='valid')
elif derivative == 'gradient':
c = np.linalg.pinv(A)[1].reshape((window_size, -1))
r = np.linalg.pinv(A)[2].reshape((window_size, -1))
return signal.fftconvolve(Z, -r, mode='valid'), signal.fftconvolve(Z, -c, mode='valid')
elif derivative== 'curvature':
c = np.linalg.pinv(A)[1].reshape((window_size, -1))
r = np.linalg.pinv(A)[2].reshape((window_size, -1))
gradx, grady = signal.fftconvolve(Z, -r, mode='valid'), signal.fftconvolve(Z, -c, mode='valid')
gradmag = [[np.linalg.norm((gradx[i,j], grady[i,j])) for j in range(np.shape(gradx)[1])] for i in range(
|
np.shape(gradx)
|
numpy.shape
|
from itertools import product
from numbers import Number
from numpy import cos, sin, pi, sqrt
import numpy as np
from .utils import check_random_state
COORD_MAP = {'x':
|
np.array([1, 0, 0])
|
numpy.array
|
# Posterior inference.
import pyro
import pyro.distributions as dist
import torch
import numpy as np
import scipy.sparse as sp
import cellbender.remove_background.consts as consts
from typing import Tuple, List, Dict, Optional
from abc import ABC, abstractmethod
import logging
class Posterior(ABC):
"""Base class Posterior handles posterior count inference.
Args:
dataset_obj: Dataset object.
vi_model: Trained RemoveBackgroundPyroModel.
counts_dtype: Data type of posterior count matrix. Can be one of
[np.uint32, np.float]
float_threshold: For floating point count matrices, counts below
this threshold will be set to zero, for the purposes of constructing
a sparse matrix. Unused if counts_dtype is np.uint32
Properties:
mean: Posterior count mean, as a sparse matrix.
"""
def __init__(self,
dataset_obj: 'SingleCellRNACountsDataset', # Dataset
vi_model: 'RemoveBackgroundPyroModel',
counts_dtype: np.dtype = np.uint32,
float_threshold: float = 0.5):
self.dataset_obj = dataset_obj
self.vi_model = vi_model
self.use_cuda = vi_model.use_cuda
self.analyzed_gene_inds = dataset_obj.analyzed_gene_inds
self.count_matrix_shape = dataset_obj.data['matrix'].shape
self.barcode_inds = np.arange(0, self.count_matrix_shape[0])
self.dtype = counts_dtype
self.float_threshold = float_threshold
self._mean = None
self._latents = None
super(Posterior, self).__init__()
@abstractmethod
def _get_mean(self):
"""Obtain mean posterior counts and store in self._mean"""
pass
@property
def mean(self) -> sp.csc_matrix:
if self._mean is None:
self._get_mean()
return self._mean
@property
def latents(self) -> sp.csc_matrix:
if self._latents is None:
self._get_latents()
return self._latents
@property
def variance(self):
raise NotImplemented("Posterior count variance not implemented.")
@torch.no_grad()
def _get_latents(self):
"""Calculate the encoded latent variables."""
data_loader = self.dataset_obj.get_dataloader(use_cuda=self.use_cuda,
analyzed_bcs_only=True,
batch_size=500,
shuffle=False)
z = np.zeros((len(data_loader), self.vi_model.encoder['z'].output_dim))
d = np.zeros(len(data_loader))
p = np.zeros(len(data_loader))
epsilon = np.zeros(len(data_loader))
for i, data in enumerate(data_loader):
if 'chi_ambient' in pyro.get_param_store().keys():
chi_ambient = pyro.param('chi_ambient').detach()
else:
chi_ambient = None
enc = self.vi_model.encoder.forward(x=data, chi_ambient=chi_ambient)
ind = i * data_loader.batch_size
z[ind:(ind + data.shape[0]), :] = enc['z']['loc'].detach().cpu().numpy()
phi_loc = pyro.param('phi_loc')
phi_scale = pyro.param('phi_scale')
d[ind:(ind + data.shape[0])] = \
dist.LogNormal(loc=enc['d_loc'],
scale=pyro.param('d_cell_scale')).mean.detach().cpu().numpy()
p[ind:(ind + data.shape[0])] = enc['p_y'].sigmoid().detach().cpu().numpy()
epsilon[ind:(ind + data.shape[0])] = dist.Gamma(enc['epsilon'] * self.vi_model.epsilon_prior,
self.vi_model.epsilon_prior).mean.detach().cpu().numpy()
self._latents = {'z': z, 'd': d, 'p': p,
'phi_loc_scale': [phi_loc.item(), phi_scale.item()],
'epsilon': epsilon}
@torch.no_grad()
def _param_map_estimates(self,
data: torch.Tensor,
chi_ambient: torch.Tensor) -> Dict[str, torch.Tensor]:
"""Calculate MAP estimates of mu, the mean of the true count matrix, and
lambda, the rate parameter of the Poisson background counts.
Args:
data: Dense tensor minibatch of cell by gene count data.
chi_ambient: Point estimate of inferred ambient gene expression.
Returns:
mu_map: Dense tensor of Negative Binomial means for true counts.
lambda_map: Dense tensor of Poisson rate params for noise counts.
alpha_map: Dense tensor of Dirichlet concentration params that
inform the overdispersion of the Negative Binomial.
"""
# Encode latents.
enc = self.vi_model.encoder.forward(x=data,
chi_ambient=chi_ambient)
z_map = enc['z']['loc']
chi_map = self.vi_model.decoder.forward(z_map)
phi_loc = pyro.param('phi_loc')
phi_scale = pyro.param('phi_scale')
phi_conc = phi_loc.pow(2) / phi_scale.pow(2)
phi_rate = phi_loc / phi_scale.pow(2)
alpha_map = 1. / dist.Gamma(phi_conc, phi_rate).mean
y = (enc['p_y'] > 0).float()
d_empty = dist.LogNormal(loc=pyro.param('d_empty_loc'),
scale=pyro.param('d_empty_scale')).mean
d_cell = dist.LogNormal(loc=enc['d_loc'],
scale=pyro.param('d_cell_scale')).mean
epsilon = dist.Gamma(enc['epsilon'] * self.vi_model.epsilon_prior,
self.vi_model.epsilon_prior).mean
if self.vi_model.include_rho:
rho = pyro.param("rho_alpha") / (pyro.param("rho_alpha")
+ pyro.param("rho_beta"))
else:
rho = None
# Calculate MAP estimates of mu and lambda.
mu_map = self.vi_model.calculate_mu(epsilon=epsilon,
d_cell=d_cell,
chi=chi_map,
y=y,
rho=rho)
lambda_map = self.vi_model.calculate_lambda(epsilon=epsilon,
chi_ambient=chi_ambient,
d_empty=d_empty,
y=y,
d_cell=d_cell,
rho=rho,
chi_bar=self.vi_model.avg_gene_expression)
return {'mu': mu_map, 'lam': lambda_map, 'alpha': alpha_map}
def dense_to_sparse(self,
chunk_dense_counts: np.ndarray) -> Tuple[List, List, List]:
"""Distill a batch of dense counts into sparse format.
Barcode numbering is relative to the tensor passed in.
"""
# TODO: speed up by keeping it a torch tensor as long as possible
if chunk_dense_counts.dtype != np.int32:
if self.dtype == np.uint32:
# Turn the floating point count estimates into integers.
decimal_values, _ = np.modf(chunk_dense_counts) # Stuff after decimal.
roundoff_counts = np.random.binomial(1, p=decimal_values) # Bernoulli.
chunk_dense_counts = np.floor(chunk_dense_counts).astype(dtype=int)
chunk_dense_counts += roundoff_counts
elif self.dtype == np.float32:
# Truncate counts at a threshold value.
chunk_dense_counts = (chunk_dense_counts *
(chunk_dense_counts > self.float_threshold))
else:
raise NotImplementedError(f"Count matrix dtype {self.dtype} is not "
f"supported. Choose from [np.uint32, "
f"np.float32]")
# Find all the nonzero counts in this dense matrix chunk.
nonzero_barcode_inds_this_chunk, nonzero_genes_trimmed = \
np.nonzero(chunk_dense_counts)
nonzero_counts = \
chunk_dense_counts[nonzero_barcode_inds_this_chunk,
nonzero_genes_trimmed].flatten(order='C')
# Get the original gene index from gene index in the trimmed dataset.
nonzero_genes = self.analyzed_gene_inds[nonzero_genes_trimmed]
return nonzero_barcode_inds_this_chunk, nonzero_genes, nonzero_counts
class ImputedPosterior(Posterior):
"""Posterior count inference using imputation to infer cell mean (d * chi).
Args:
dataset_obj: Dataset object.
vi_model: Trained RemoveBackgroundPyroModel.
guide: Variational posterior pyro guide function, optional. Only
specify if the required guide function is not vi_model.guide.
encoder: Encoder that provides encodings of data.
counts_dtype: Data type of posterior count matrix. Can be one of
[np.uint32, np.float]
float_threshold: For floating point count matrices, counts below
this threshold will be set to zero, for the purposes of constructing
a sparse matrix. Unused if counts_dtype is np.uint32
Properties:
mean: Posterior count mean, as a sparse matrix.
encodings: Encoded latent variables, one per barcode in the dataset.
"""
def __init__(self,
dataset_obj: 'SingleCellRNACountsDataset', # Dataset
vi_model: 'RemoveBackgroundPyroModel', # Trained variational inference model
guide=None,
encoder=None, #: Union[CompositeEncoder, None] = None,
counts_dtype: np.dtype = np.uint32,
float_threshold: float = 0.5):
self.vi_model = vi_model
self.use_cuda = vi_model.use_cuda
self.guide = guide if guide is not None else vi_model.encoder
self.encoder = encoder if encoder is not None else vi_model.encoder
self._encodings = None
self._mean = None
super(ImputedPosterior, self).__init__(dataset_obj=dataset_obj,
vi_model=vi_model,
counts_dtype=counts_dtype,
float_threshold=float_threshold)
@torch.no_grad()
def _get_mean(self):
"""Send dataset through a guide that returns mean posterior counts.
Keep track of only what is necessary to distill a sparse count matrix.
"""
data_loader = self.dataset_obj.get_dataloader(use_cuda=self.use_cuda,
analyzed_bcs_only=False,
batch_size=500,
shuffle=False)
barcodes = []
genes = []
counts = []
ind = 0
for data in data_loader:
# Get return values from guide.
dense_counts_torch = self._param_map_estimates(data=data,
chi_ambient=pyro.param("chi_ambient"))
dense_counts = dense_counts_torch.detach().cpu().numpy()
bcs_i_chunk, genes_i, counts_i = self.dense_to_sparse(dense_counts)
# Translate chunk barcode inds to overall inds.
bcs_i = self.barcode_inds[bcs_i_chunk + ind]
# Add sparse matrix values to lists.
barcodes.append(bcs_i)
genes.append(genes_i)
counts.append(counts_i)
# Increment barcode index counter.
ind += data.shape[0] # Same as data_loader.batch_size
# Convert the lists to numpy arrays.
counts = np.array(np.concatenate(tuple(counts)), dtype=self.dtype)
barcodes = np.array(np.concatenate(tuple(barcodes)), dtype=np.uint32)
genes = np.array(np.concatenate(tuple(genes)), dtype=np.uint32)
# Put the counts into a sparse csc_matrix.
self._mean = sp.csc_matrix((counts, (barcodes, genes)),
shape=self.count_matrix_shape)
class ProbPosterior(Posterior):
"""Posterior count inference using a noise count probability distribution.
Args:
dataset_obj: Dataset object.
vi_model: Trained model: RemoveBackgroundPyroModel
fpr: Desired false positive rate for construction of the final regularized
posterior on true counts. False positives are true counts that are
(incorrectly) removed from the dataset.
float_threshold: For floating point count matrices, counts below
this threshold will be set to zero, for the purposes of constructing
a sparse matrix. Unused if counts_dtype is np.uint32
Properties:
mean: Posterior count mean, as a sparse matrix.
encodings: Encoded latent variables, one per barcode in the dataset.
"""
def __init__(self,
dataset_obj: 'SingleCellRNACountsDataset',
vi_model: 'RemoveBackgroundPyroModel',
fpr: float = 0.01,
float_threshold: float = 0.5):
self.vi_model = vi_model
self.use_cuda = vi_model.use_cuda
self.fpr = fpr
self.lambda_multiplier = None
self._encodings = None
self._mean = None
self.random =
|
np.random.RandomState(seed=1234)
|
numpy.random.RandomState
|
import logging
import random
from collections import defaultdict
from copy import deepcopy
from functools import partial
from multiprocessing import Pool
from typing import Any, Dict, Iterator, List, Optional, Sequence, Tuple
import numpy as np
from transformers import PreTrainedTokenizer
from benchmarker.data.data_converter import DataConverter, NotEnoughSalientSpans
from benchmarker.data.model.feature import Feature
from benchmarker.data.model.span import Span
from benchmarker.data.reader.common import DataInstance
from benchmarker.data.slicer import LongPageStrategy
from benchmarker.data.utils import single_line_spans
logger = logging.Logger(__name__)
def noise_span_to_unique_sentinel(tokens, noise_mask, tokenizer):
"""Partially copied from original text-to-text-transfer-transformer repo.
Replace each run of consecutive noise tokens with a different sentinel.
The idea here is to be able to align the dropped spans in the inputs
with the markers in the targets.
We want to generate training examples like
"We hold X to be Y that Z" -> "X these truths Y self evident Z that"
Sentinels assigned in decreasing order within the sequence starting at
vocabulary.size - 1. That is, we appropriate the last tokens in the
vocabulary for additional use as sentinels.
:param tokens: a 1d integer Tensor
:param noise_mask: a boolean Tensor with the same shape as tokens
:param tokenizer: a t5 tokenizer
:return: a Tensor with the same shape and dtype as tokens
"""
vocab_size = tokenizer.vocab_size
prev_token_is_noise = np.pad(noise_mask[:-1], [[1, 0]])
first_noise_tokens = np.logical_and(noise_mask, np.logical_not(prev_token_is_noise))
subsequent_noise_tokens = np.logical_and(noise_mask, prev_token_is_noise)
sentinel_ids = vocab_size - np.cumsum(first_noise_tokens)
sentinel_tokens = tokenizer.convert_ids_to_tokens(sentinel_ids)
tokens = np.where(first_noise_tokens, sentinel_tokens, tokens)
return tokens[np.logical_not(subsequent_noise_tokens)]
def convert_ranges_to_noise_mask(noise_spans_ranges, length):
single_line_span_starts = noise_spans_ranges.flatten()
span_start_indicator = np.zeros(length)
span_start_indicator[single_line_span_starts[:-1]] = 1
span_num = np.cumsum(span_start_indicator)
is_noise = np.equal(span_num % 2, 1)
return is_noise[:length]
def random_spans_noise_mask(length, seg_data, noise_density=0.2, mean_noise_span_length=3.5):
"""Partially coppied from text-to-text-transfer-transformer git repo
:param length: number of tokens
:param seg_data: dictionary with segment/visual data
:param noise_density: what fraction of the tokens to select as noise
:param mean_noise_span_length: average length of noise span, in the end actual
average span length will be lover due to span truncation to the one-line span
"""
num_noise_tokens = int(np.round(length * noise_density))
# avoid degeneracy by ensuring positive numbers of noise and nonnoise tokens.
num_noise_tokens = np.minimum(
|
np.maximum(num_noise_tokens, 1)
|
numpy.maximum
|
import numpy as np
from multiagent.core import World, Agent, Landmark
from multiagent.scenario import BaseScenario
import webcolors
import random
class Scenario(BaseScenario):
def make_world(self):
world = World()
# set any world properties first
# world.dim_c = 2
self.num_predator = 4
self.num_prey = 3
self.penalty_of_existence = 0.01
print("NUMBER OF PREDATORS:",self.num_predator)
print("NUMBER OF PREYS:",self.num_prey)
world.collaborative = True
# add predator
world.agents = [Agent() for i in range(self.num_predator)]
for i, predator in enumerate(world.agents):
predator.name = 'predator %d' % i
predator.collide = False
predator.silent = True
predator.size = 0.1 #was 0.15
# add prey
for i in range(self.num_prey):
world.agents.append(Agent())
for i, prey in enumerate(world.agents[self.num_predator:]):
prey.name = 'prey %d' % i
prey.collide = True
prey.silent = True
prey.size = 0.05 #was 0.15
prey.captured = 0
# make initial conditions
self.reset_world(world)
return world
def check_collision_before_spawning(self,agent,agent_list):
for other_agent in agent_list:
if agent.name == other_agent.name:
continue
delta_pos = agent.state.p_pos - other_agent.state.p_pos
dist = np.sqrt(np.sum(np.square(delta_pos)))
dist_min = (agent.size + other_agent.size) * 1.5
if dist < dist_min:
return True
return False
def reset_world(self, world):
agent_list = []
for i in range(self.num_predator+self.num_prey):
if "predator" in world.agents[i].name:
rgb = np.random.uniform(-1,1,3)
world.agents[i].color = rgb
else:
world.agents[i].color = np.array([0.0,0.0,0.0])
world.agents[i].state.p_pos = np.random.uniform(-1, +1, world.dim_p)
while self.check_collision_before_spawning(world.agents[i], agent_list):
world.agents[i].state.p_pos = np.random.uniform(-1, +1, world.dim_p)
agent_list.append(world.agents[i])
world.agents[i].state.p_vel = np.zeros(world.dim_p)
world.agents[i].state.c = np.zeros(world.dim_c)
world.agents[i].prevDistance = 0.0
def benchmark_data(self, agent, world):
rew = 0
collisions = 0
occupied_landmarks = 0
min_dists = 0
for l in world.landmarks:
dists = [np.sqrt(np.sum(np.square(a.state.p_pos - l.state.p_pos))) for a in world.agents]
min_dists += min(dists)
rew -= min(dists)
if min(dists) < 0.1:
occupied_landmarks += 1
if agent.collide:
for a in world.agents:
if self.is_collision(a, agent):
rew -= 1
collisions += 1
return (rew, collisions, min_dists, occupied_landmarks)
def is_collision(self, agent1, agent2):
if agent1.name == agent2.name:
return False
delta_pos = agent1.state.p_pos - agent2.state.p_pos
dist = np.sqrt(np.sum(np.square(delta_pos)))
dist_min = (agent1.size + agent2.size)
return True if dist < dist_min else False
def reward(self, agent, world):
if "prey" in agent.name:
return None
rew = 0
for a in world.agents[self.num_predator:]:
if self.is_collision(a, agent):
# if you get to the prey you get something
rew += 0.1
for predator in world.agents[:self.num_predator]:
if predator.name == agent.name:
continue
if self.is_collision(a,predator):
# if two predators get to the prey, they get a higher reward
rew += 1.0
rew -= self.penalty_of_existence
return rew
def observation(self, agent, world):
if agent.state.p_pos[0]>1:
agent.state.p_pos[0] = 1
elif agent.state.p_pos[0]<-1:
agent.state.p_pos[0] = -1
elif agent.state.p_pos[1]>1:
agent.state.p_pos[1] = 1
elif agent.state.p_pos[1]<-1:
agent.state.p_pos[1] = -1
if "predator" in agent.name:
curr_agent_index = world.agents.index(agent)
current_predator_critic = [agent.state.p_pos,agent.state.p_vel]
current_predator_actor = [agent.state.p_pos,agent.state.p_vel]
return np.concatenate(current_predator_critic),
|
np.concatenate(current_predator_actor)
|
numpy.concatenate
|
from galry import *
import numpy as np
from matplotlib.colors import hsv_to_rgb
def colormap(x):
"""Colorize a 2D grayscale array.
Arguments:
* x:an NxM array with values in [0,1]
Returns:
* y: an NxMx3 array with a rainbow color palette.
"""
x = np.clip(x, 0., 1.)
# initial and final gradient colors, here rainbow gradient
col0 = np.array([.67, .91, .65]).reshape((1, 1, -1))
col1 = np.array([0., 1., 1.]).reshape((1, 1, -1))
col0 =
|
np.tile(col0, x.shape + (1,))
|
numpy.tile
|
import os
import time
import math
import numpy as np
import os.path as osp
from baselines import logger
from collections import deque
from baselines.common import explained_variance
import pylsl
import pickle
from train_ppo.ppo_model import Model
from train_ppo.trainer_helper import run_dijkstra, judge_action_1D, get_simulated_feedback, get_feedback_from_LSL
from train_ppo.BMM import train_bmm_model
class Runner(object):
def __init__(self, *, env, model, nsteps, gamma, lam,
judge_action=None,
use_rich_reward=False,
use_multiple_starts=False,
use_feedback=False,
use_real_feedback=False,
only_use_hr_until=1000,
trans_to_rl_in=1000,
init_rl_importance=0.2):
self.env = env
self.model = model
nenv = 1
self.obs = np.zeros((nenv,) + env.observation_space.shape, dtype=model.train_model.X.dtype.name)
self.obs_sensor = np.zeros((nenv,) + env.sensor_space.shape, dtype=model.train_model.X.dtype.name)
obs_all = self.env.reset()
self.obs_sensor[:] = obs_all['nonviz_sensor']
self.obs[:] = obs_all['obs']
self.gamma = gamma
self.lam = lam
self.nsteps = nsteps
self.states = model.initial_state
self.dones = False
self.judge_action = judge_action
self.use_rich_reward = use_rich_reward
self.use_multiple_starts = use_multiple_starts
self.use_feedback = use_feedback
self.use_real_feedback = use_real_feedback
self.only_use_hr_until = only_use_hr_until
self.trans_to_rl_in = trans_to_rl_in
self.init_rl_importance = init_rl_importance
self.rl_importance = 0
self.num_step = 0
def run(self):
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [], [], [], [], [], []
mb_states = self.states
epinfos = []
mb_action_idxs, mb_cors = [], []
mb_sparse_rew, mb_rich_rew = [], []
for _ in range(self.nsteps):
self.num_step += 1
self.env.env.action_idx = self.num_step
if self.use_feedback:
# transfer from hr policy to rl policy
# between steps [only_use_hr_until, only_use_hr_until + trans_to_rl_in]
# rl_importance = (self.num_step - self.only_use_hr_until) / self.trans_to_rl_in
if self.dones:
hr_num_step = self.only_use_hr_until + self.trans_to_rl_in
if self.num_step <= self.only_use_hr_until:
self.rl_importance = 0
elif self.num_step <= hr_num_step:
self.env.env.episode_max_len = 120
self.rl_importance = np.random.uniform(0, 1) < \
(self.num_step - self.only_use_hr_until) / self.trans_to_rl_in * \
(1 - self.init_rl_importance) + self.init_rl_importance
else:
self.rl_importance = 1
self.rl_importance = np.clip(self.rl_importance, 0, 1)
# print('rl importance: {}'.format(self.rl_importance))
else:
self.rl_importance = 1
if self.env.env.config['offline']:
good_actions, bad_actions = [], []
for action in range(self.env.action_space.n):
if self.judge_action(self.obs_sensor[0], action):
good_actions.append(action)
else:
bad_actions.append(action)
if not bad_actions or not good_actions:
actions = np.random.choice(self.env.action_space.n)
else:
actions = np.random.choice(np.random.choice([good_actions, bad_actions]))
_, values, self.states, neglogpacs = self.model.step(self.obs)
else:
actions, values, self.states, neglogpacs = self.model.step(self.obs, self.rl_importance)
if self.use_feedback and self.use_real_feedback and self.num_step < self.only_use_hr_until: #
if np.random.uniform() < 0.1 + 0.1 * (1 - self.num_step / self.only_use_hr_until):
actions = 0
mb_obs.append(self.obs.copy())
mb_actions.append([actions])
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append([self.dones])
mb_action_idxs.append(self.num_step)
mb_cors.append(self.obs_sensor.copy())
if self.dones:
obs_all = self.env.reset()
if self.use_multiple_starts:
obs_all = self.env.env.random_reset()
self.obs_sensor[:] = obs_all['nonviz_sensor']
self.obs[:] = obs_all['obs']
rewards = sparse_rewards = rich_rewards = 0
self.dones = False
else:
obs_all, rewards_dict, self.dones, infos = self.env.step(actions)
sparse_rewards = rewards_dict["sparse"]
rich_rewards = rewards_dict["rich"]
self.obs_sensor[:] = obs_all['nonviz_sensor']
self.obs[:] = obs_all['obs']
if infos['episode'] is not None:
maybeepinfo = infos['episode']
if maybeepinfo:
epinfos.append(maybeepinfo)
rewards = rich_rewards if self.use_rich_reward else sparse_rewards
mb_rewards.append([rewards])
mb_sparse_rew.append([sparse_rewards])
mb_rich_rew.append([rich_rewards])
# batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
mb_cors = np.asarray(mb_cors, dtype=self.obs_sensor.dtype)
mb_sparse_rew = np.asarray(mb_sparse_rew, dtype=np.float32)
mb_rich_rew = np.asarray(mb_rich_rew, dtype=np.float32)
return (*map(sf01, (mb_obs, mb_rewards, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_cors, mb_sparse_rew, mb_rich_rew)),
mb_states, mb_action_idxs, epinfos)
def calculate_returns(self, mb_rewards, mb_dones, mb_values):
# discount/bootstrap off value fn
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
nsteps = len(mb_rewards)
last_values = self.model.value(self.obs, self.states, self.dones)
for t in reversed(range(nsteps)):
if t == nsteps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t + 1]
nextvalues = mb_values[t + 1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
return mb_returns
# obs, returns, masks, actions, values, neglogpacs, states = runner.run()
def sf01(arr):
# swap and then flatten axes 0 and 1
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
def constfn(val):
def f(_):
return val
return f
def learn(*, policy, env, raw_env,
use_2D_env=True,
use_other_room=False,
use_rich_reward=False,
use_multiple_starts=False,
use_feedback=True,
use_real_feedback=False,
only_use_hr_until=1000,
trans_to_rl_in=1000,
nsteps=8,
total_timesteps=1000,
ppo_lr=2e-4, cliprange=0.2, ent_coef=.1, vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
ppo_noptepochs=4, ppo_batch_size=32, ppo_minibatch_size=8, init_rl_importance=0.2,
feedback_lr=1e-3, min_feedback_buffer_size=32,
feedback_noptepochs=4, feedback_batch_size=16, feedback_minibatch_size=8,
feedback_training_prop=0.7,
feedback_training_new_prop=0.4,
feedback_use_mixup=False,
hf_loss_type="CCE", hf_loss_param=None,
good_feedback_acc=0.7,
bad_feedback_acc=0.7,
log_interval=10, save_interval=0, reload_name=None, base_path=None):
if isinstance(ppo_lr, float):
ppo_lr = constfn(ppo_lr)
else:
assert callable(ppo_lr)
if isinstance(cliprange, float):
cliprange = constfn(cliprange)
else:
assert callable(cliprange)
total_timesteps = int(total_timesteps)
assert ppo_batch_size % nsteps == 0
ob_space = env.observation_space
ac_space = env.action_space
nenvs = 1
nbatch = nenvs * nsteps
if hf_loss_type == 0:
hf_loss_param = None
make_model = lambda: Model(policy=policy, ob_space=ob_space, ac_space=ac_space,
nbatch_act=nenvs, nbatch_train=ppo_minibatch_size, nbatch_feedback=feedback_minibatch_size,
nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm,
hf_loss_type=hf_loss_type,
hf_loss_param=hf_loss_param)
if save_interval and logger.get_dir():
import cloudpickle
if not base_path:
base_path = os.path.dirname(os.path.abspath(__file__))
if not os.path.isdir(osp.join(base_path, "models")):
os.mkdir(osp.join(base_path, "models"))
with open(osp.join(base_path, "models", 'make_model.pkl'), 'wb') as fh:
fh.write(cloudpickle.dumps(make_model))
if use_real_feedback:
print("looking for an EEG_Pred stream...", end="", flush=True)
feedback_LSL_stream = pylsl.StreamInlet(pylsl.resolve_stream('type', 'EEG_Pred')[0])
print(" done")
model = make_model()
if reload_name:
model.load(reload_name)
target_position = raw_env.robot.get_target_position()
if use_2D_env:
judge_action, *_ = run_dijkstra(raw_env, target_position, use_other_room=use_other_room)
else:
judge_action = judge_action_1D(raw_env, target_position)
runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam,
judge_action=judge_action,
use_rich_reward=use_rich_reward,
use_multiple_starts=use_multiple_starts,
use_feedback=use_feedback,
use_real_feedback=use_real_feedback,
only_use_hr_until=only_use_hr_until,
trans_to_rl_in=trans_to_rl_in,
init_rl_importance=init_rl_importance)
epinfobuf = deque(maxlen=100)
nupdates = total_timesteps // nbatch
state_action_buffer = deque(maxlen=100)
action_idx_buffer = deque(maxlen=100)
feedback_buffer_train = {}
feedback_buffer_train_true = {}
feedback_buffer_valid = {}
feedback_bmms = {}
for a in range(ac_space.n):
feedback_buffer_train[a], feedback_buffer_train_true[a], feedback_buffer_valid[a] = [], [], []
feedback_bmms[a] = 0
performance = {"feedback": [], "sparse_reward": [], "rich_reward": [],
"train_acc": [], "train_true_acc": [], "valid_acc": []}
epi_test_num = [0 for _ in range(ac_space.n)]
ppo_obs, ppo_rewards, ppo_masks, ppo_actions, ppo_values, ppo_neglogpacs = [], [], [], [], [], []
for update in range(1, nupdates + 1):
tstart = time.time()
frac = 1.0 - (update - 1.0) / nupdates
ppo_lrnow = ppo_lr(frac)
cliprangenow = cliprange(frac)
obs, rewards, masks, actions, values, neglogpacs, cors, sparse_rew, rich_rew, _, action_idxs, epinfos = runner.run()
epinfobuf.extend(epinfos)
performance["sparse_reward"].extend(sparse_rew)
performance["rich_reward"].extend(rich_rew)
mblossvals = []
state_action_buffer.extend([[s, a] for s, a in zip(obs, actions)])
action_idx_buffer.extend(action_idxs)
if use_feedback:
if use_real_feedback:
action_idxs, feedbacks, correct_feedbacks = get_feedback_from_LSL(feedback_LSL_stream)
print("Received feedback from LSL", feedbacks)
else:
action_idxs, feedbacks, correct_feedbacks = \
get_simulated_feedback(cors if use_2D_env else obs, actions, action_idxs, judge_action,
good_feedback_acc, bad_feedback_acc)
performance["feedback"].extend(correct_feedbacks)
# add feedbacks into feedback replay buffer
if len(feedbacks):
for a_idx, fb, cfb in zip(action_idxs, feedbacks, correct_feedbacks):
s, a = state_action_buffer[action_idx_buffer.index(a_idx)]
epi_test_num[a] += 1 - feedback_training_prop
# s, fb, cfb = np.ones(13), 1, 1
if epi_test_num[a] > 1:
feedback_buffer_valid[a].append([s, cfb])
epi_test_num[a] -= 1
else:
feedback_buffer_train[a].append([s, fb])
feedback_buffer_train_true[a].append([s, cfb])
# train PPO
if runner.num_step >= only_use_hr_until:
ppo_obs.extend(obs)
ppo_rewards.extend(rewards)
ppo_masks.extend(masks)
ppo_actions.extend(actions)
ppo_values.extend(values)
ppo_neglogpacs.extend(neglogpacs)
if len(ppo_obs) == ppo_batch_size:
ppo_obs = np.asarray(ppo_obs)
ppo_rewards = np.asarray(ppo_rewards)
ppo_masks = np.asarray(ppo_masks)
ppo_actions = np.asarray(ppo_actions)
ppo_values =
|
np.asarray(ppo_values)
|
numpy.asarray
|
import scipy
"""
General methods for computing property statistics from a list of values
"""
import numpy as np
from scipy import stats
from six import string_types
class PropertyStats(object):
"""This class contains statistical operations that are commonly employed
when computing features.
The primary way for interacting with this class is to call the
``calc_stat`` function, which takes the name of the statistic you would
like to compute and the weights/values of data to be assessed. For example,
computing the mean of a list looks like::
x = [1, 2, 3]
PropertyStats.calc_stat(x, 'mean') # Result is 2
PropertyStats.calc_stat(x, 'mean', weights=[0, 0, 1]) # Result is 3
Some of the statistics functions take options (e.g., Holder means). You can
pass them to the the statistics functions by adding them after the name and
two colons. For example, the 0th Holder mean would be::
PropertyStats.calc_stat(x, 'holder_mean::0')
You can, of course, call the statistical functions directly. All take at
least two arguments. The first is the data being assessed and the second,
optional, argument is the weights.
"""
@staticmethod
def calc_stat(data_lst, stat, weights=None):
"""
Compute a property statistic
Args:
data_lst (list of floats): list of values
stat (str) - Name of property to be compute. If there are arguments to the statistics function, these
should be added after the name and separated by two colons. For example, the 2nd Holder mean would
be "holder_mean::2"
weights (list of floats): (Optional) weights for each element in data_lst
Returns:
float - Desired statistic
"""
statistics = stat.split("::")
return getattr(PropertyStats, statistics[0])(data_lst, weights, *statistics[1:])
@staticmethod
def minimum(data_lst, weights=None):
"""Minimum value in a list
Args:
data_lst (list of floats): List of values to be assessed
weights: (ignored)
Returns:
minimum value
"""
return min(data_lst) if not np.any(np.isnan(data_lst)) else float("nan")
@staticmethod
def maximum(data_lst, weights=None):
"""Maximum value in a list
Args:
data_lst (list of floats): List of values to be assessed
weights: (ignored)
Returns:
maximum value
"""
return max(data_lst) if not np.any(np.isnan(data_lst)) else float("nan")
@staticmethod
def range(data_lst, weights=None):
"""Range of a list
Args:
data_lst (list of floats): List of values to be assessed
weights: (ignored)
Returns:
range
"""
return (max(data_lst) - min(data_lst)) if not np.any(np.isnan(data_lst)) else float("nan")
@staticmethod
def mean(data_lst, weights=None):
"""Arithmetic mean of list
Args:
data_lst (list of floats): List of values to be assessed
weights (list of floats): Weights for each value
Returns:
mean value
"""
return np.average(data_lst, weights=weights)
@staticmethod
def inverse_mean(data_lst, weights=None):
"""Mean of the inverse of each entry
Args:
data_lst (list of floats): List of values to be assessed
weights (list of floats): Weights for each value
Returns:
inverse mean
"""
return PropertyStats.mean([1.0 / x for x in data_lst], weights=weights)
@staticmethod
def avg_dev(data_lst, weights=None):
"""Mean absolute deviation of list of element data.
This is computed by first calculating the mean of the list,
and then computing the average absolute difference between each value
and the mean.
Args:
data_lst (list of floats): List of values to be assessed
weights (list of floats): Weights for each value
Returns:
mean absolute deviation
"""
mean = PropertyStats.mean(data_lst, weights)
return np.average(np.abs(np.subtract(data_lst, mean)), weights=weights)
@staticmethod
def std_dev(data_lst, weights=None):
"""Standard deviation of a list of element data
Args:
data_lst (list of floats): List of values to be assessed
weights (list of floats): Weights for each value
Returns:
standard deviation
"""
# Special case: Only one entry
if len(data_lst) == 1:
# This prevents numerical issues in the weighted std_dev
return 0
if weights is None:
return np.std(data_lst)
else:
beta = np.sum(weights) / (np.sum(weights) ** 2 - np.sum(np.power(weights, 2)))
dev = np.power(np.subtract(data_lst, PropertyStats.mean(data_lst, weights=weights)), 2)
return np.sqrt(beta * np.dot(dev, weights))
@staticmethod
def skewness(data_lst, weights=None):
"""Skewness of a list of data
Args:
data_lst (list of floats): List of values to be assessed
weights (list of floats): Weights for each value
Returns:
shewness
"""
# Special case: Only one entry
if len(data_lst) == 1:
# This prevents numerical issues in the weighted std_dev
return 0
if weights is None:
return stats.skew(data_lst)
else:
# Compute the mean
mean = PropertyStats.mean(data_lst, weights)
# Compute the second and 3rd moments of the difference from the mean
total_weight = np.sum(weights)
diff = np.subtract(data_lst, mean)
u3 = np.dot(weights, np.power(diff, 3)) / total_weight
u2 = np.dot(weights, np.power(diff, 2)) / total_weight
if np.isclose(u3, 0):
return 0
return u3 / u2 ** 1.5
@staticmethod
def kurtosis(data_lst, weights=None):
"""Kurtosis of a list of data
Args:
data_lst (list of floats): List of values to be assessed
weights (list of floats): Weights for each value
Returns:
kurtosis
"""
# Special case: Only one entry
if len(data_lst) == 1:
# This prevents numerical issues in the weighted std_dev
return 0
if weights is None:
return stats.kurtosis(data_lst, fisher=False)
else:
# Compute the mean
mean = PropertyStats.mean(data_lst, weights)
# Compute the second and 4th moments of the difference from the mean
total_weight = np.sum(weights)
diff_sq = np.power(np.subtract(data_lst, mean), 2)
u4 = np.dot(weights, np.power(diff_sq, 2))
u2 = np.dot(weights, diff_sq)
if np.isclose(u4, 0):
return 0
return u4 / u2 ** 2 * total_weight
@staticmethod
def geom_std_dev(data_lst, weights=None):
"""
Geometric standard deviation
Args:
data_lst (list of floats): List of values to be assessed
weights (list of floats): Weights for each value
Returns:
geometric standard deviation
"""
# Make fake weights, if none are provided
if weights is None:
weights = np.ones_like(data_lst)
# Compute the geometric std dev
mean = PropertyStats.holder_mean(data_lst, weights, 0)
beta = np.sum(weights) / (np.sum(weights) ** 2 - np.sum(np.power(weights, 2)))
dev = np.log(
|
np.true_divide(data_lst, mean)
|
numpy.true_divide
|
"""
Script classifier_speech_features_application.py
For more information on this file see: README.md
Authors: <NAME>, <NAME>, <NAME>
Date: March 10th, 2021
License: 3-clause BSD (see README in https://github.com/gumaga/-audiotechnik_gruppe_3_finale_abgabe-)
"""
import soundfile as sf
import scipy as sp
import numpy as np
import matplotlib.pyplot as plt
import os
import pickle
import compressor # module containing compressor class
import filterbank # module containing filterbank class
import classifier_speech_features as csf
from pathlib import Path
# object for using functions to load data and extract features
classifier_functions = csf.classifier_speech_features()
# load pretrained classifier
directory = os.path.dirname(os.path.abspath(__file__)) # directory containing this file
data_path = os.path.join(directory, "classifier_file.obj")
data_path_shelve = os.path.join(directory, "classifier_file_shelve")
with open(data_path, 'rb') as pfile:
classifier = pickle.load(pfile)
# load audio file which is going to be classified and compressed
test_files = classifier_functions.load_data('test_files', pretrained_mode=True)
desired_feature = 'mfcc'
feature_variables = classifier_functions.feature_extraction(test_files, desired_feature, return_testrun_values=True)
# unpack touple for better readability
features, signal, fs, signal_length, block_length, forward_length = feature_variables
features = features[0] # get first array out of list, to avoid errors.
# No need for multiple arrays because we only want to read one file
# classify loaded audio file
Y_predict = classifier.predict(features)
# to determine where the compressor should work a leading signal is being
# created. If speech was predicted in 3 out of 4 overlapping blocks, 2
# succesive blocks (i.e. 60 ms) are marked as speech.
blocks_without_overlap = int(np.ceil(signal_length/block_length)) # nr of blocks without overlap
lead = []
ones_vec = np.ones((block_length*2, 1))
zeros_vec = np.zeros((block_length*2, 1))
zz = 0 # counter
for value in np.arange(int(np.ceil(blocks_without_overlap/2))):
if np.sum(Y_predict[zz:zz+4]) >= 3:
lead = np.append(lead, ones_vec)
else:
lead = np.append(lead, zeros_vec)
zz += 4
lead = np.array(lead)
# get lead-vector that has same length as signal, because we are calculating
# a little bit more than that
lead = lead[0:signal_length]
# divide signal into three filter-bands
lower_border_freq = 250
upper_border_freq = 4000
filt = filterbank.filterbank(fs, lower_border_freq, upper_border_freq)
low_sig, mid_sig, high_sig = filt.filter(signal)
# compress signal in filter-bands depending on where speech was detected
comp = compressor.compressor(fs=fs, ratio=2, mu_auto=True)
gain = 1/4 # -12 dB
compressed_low = comp.compress_mono(low_sig, lead)
compressed_mid = comp.compress_mono(mid_sig, lead)
compressed_high = comp.compress_mono(high_sig, lead)
signal_out = gain * compressed_low + compressed_mid + gain * compressed_high
# write signal into file so that you can (hopefully) hear a difference
sf.write('test_signal_compressed.wav', data=signal_out, samplerate=fs)
# plotting classifier output (lead) and the original audio signal
time_vec =
|
np.linspace(0, signal_length/fs, signal_length )
|
numpy.linspace
|
import logging
import re
import traceback
import numpy as np
import pandas as pd
import scipy.stats.stats as stats
import pandas.core.algorithms as algos
#from tqdm.notebook import tqdm
from .conf import MAX_BIN, FORCE_BIN
logger = logging.getLogger(__name__)
class InformationValue:
def __init__(self):
self.max_bin = MAX_BIN
self.force_bin = FORCE_BIN
def mono_bin(self, Y, X, n=MAX_BIN):
df1 = pd.DataFrame({"X": X, "Y": Y})
justmiss = df1[['X', 'Y']][df1.X.isnull()]
notmiss = df1[['X', 'Y']][df1.X.notnull()]
r = 0
while np.abs(r) < 1:
try:
d1 = pd.DataFrame(
{"X": notmiss.X, "Y": notmiss.Y, "Bucket": pd.qcut(notmiss.X, n)})
d2 = d1.groupby('Bucket', as_index=True)
r, p = stats.spearmanr(d2.mean().X, d2.mean().Y)
n = n - 1
except Exception as e:
n = n - 1
if len(d2) == 1:
n = self.force_bin
bins = algos.quantile(notmiss.X, np.linspace(0, 1, n))
if len(np.unique(bins)) == 2:
bins = np.insert(bins, 0, 1)
bins[1] = bins[1] - (bins[1] / 2)
d1 = pd.DataFrame({"X": notmiss.X, "Y": notmiss.Y, "Bucket": pd.cut(
notmiss.X, np.unique(bins), include_lowest=True)})
d2 = d1.groupby('Bucket', as_index=True)
d3 = pd.DataFrame({}, index=[])
d3["MIN_VALUE"] = d2.min().X
d3["MAX_VALUE"] = d2.max().X
d3["COUNT"] = d2.count().Y
d3["EVENT"] = d2.sum().Y
d3["NONEVENT"] = d2.count().Y - d2.sum().Y
d3 = d3.reset_index(drop=True)
if len(justmiss.index) > 0:
d4 = pd.DataFrame({'MIN_VALUE': np.nan}, index=[0])
d4["MAX_VALUE"] = np.nan
d4["COUNT"] = justmiss.count().Y
d4["EVENT"] = justmiss.sum().Y
d4["NONEVENT"] = justmiss.count().Y - justmiss.sum().Y
d3 = d3.append(d4, ignore_index=True)
d3["EVENT_RATE"] = d3.EVENT / d3.COUNT
d3["NON_EVENT_RATE"] = d3.NONEVENT / d3.COUNT
d3["DIST_EVENT"] = d3.EVENT / d3.sum().EVENT
d3["DIST_NON_EVENT"] = d3.NONEVENT / d3.sum().NONEVENT
d3["WOE"] = np.log(d3.DIST_EVENT / d3.DIST_NON_EVENT)
d3["IV"] = (d3.DIST_EVENT - d3.DIST_NON_EVENT) * \
np.log(d3.DIST_EVENT / d3.DIST_NON_EVENT)
d3["VAR_NAME"] = "VAR"
d3 = d3[['VAR_NAME', 'MIN_VALUE', 'MAX_VALUE', 'COUNT', 'EVENT', 'EVENT_RATE',
'NONEVENT', 'NON_EVENT_RATE', 'DIST_EVENT', 'DIST_NON_EVENT', 'WOE', 'IV']]
d3 = d3.replace([np.inf, -np.inf], 0)
d3.IV = d3.IV.sum()
return(d3)
def char_bin(self, Y, X):
df1 = pd.DataFrame({"X": X, "Y": Y})
justmiss = df1[['X', 'Y']][df1.X.isnull()]
notmiss = df1[['X', 'Y']][df1.X.notnull()]
df2 = notmiss.groupby('X', as_index=True)
d3 = pd.DataFrame({}, index=[])
d3["COUNT"] = df2.count().Y
d3["MIN_VALUE"] = df2.sum().Y.index
d3["MAX_VALUE"] = d3["MIN_VALUE"]
d3["EVENT"] = df2.sum().Y
d3["NONEVENT"] = df2.count().Y - df2.sum().Y
if len(justmiss.index) > 0:
d4 = pd.DataFrame({'MIN_VALUE': np.nan}, index=[0])
d4["MAX_VALUE"] = np.nan
d4["COUNT"] = justmiss.count().Y
d4["EVENT"] = justmiss.sum().Y
d4["NONEVENT"] = justmiss.count().Y - justmiss.sum().Y
d3 = d3.append(d4, ignore_index=True)
d3["EVENT_RATE"] = d3.EVENT / d3.COUNT
d3["NON_EVENT_RATE"] = d3.NONEVENT / d3.COUNT
d3["DIST_EVENT"] = d3.EVENT / d3.sum().EVENT
d3["DIST_NON_EVENT"] = d3.NONEVENT / d3.sum().NONEVENT
d3["WOE"] = np.log(d3.DIST_EVENT / d3.DIST_NON_EVENT)
d3["IV"] = (d3.DIST_EVENT - d3.DIST_NON_EVENT) * \
np.log(d3.DIST_EVENT / d3.DIST_NON_EVENT)
d3["VAR_NAME"] = "VAR"
d3 = d3[['VAR_NAME', 'MIN_VALUE', 'MAX_VALUE', 'COUNT', 'EVENT', 'EVENT_RATE',
'NONEVENT', 'NON_EVENT_RATE', 'DIST_EVENT', 'DIST_NON_EVENT', 'WOE', 'IV']]
d3 = d3.replace([np.inf, -np.inf], 0)
d3.IV = d3.IV.sum()
d3 = d3.reset_index(drop=True)
return(d3)
def data_vars(self, df1, target):
stack = traceback.extract_stack()
filename, lineno, function_name, code = stack[-2]
vars_name = re.compile(r'\((.*?)\).*$').search(code).groups()[0]
final = (re.findall(r"[\w']+", vars_name))[-1]
tname = target.name
numerical_features = set(df1.select_dtypes(np.number).columns)
x = df1.dtypes.index
has_target_nan = target.isna().any()
target_map = {}
# if target variable is not np.number type convert to category type
if target.name not in numerical_features:
target = target.astype('category')
target, target_map = target.factorize()
target_map = dict(enumerate(target_map))
# map NaN to -1 if NaN record present
if has_target_nan:
target_map[-1] = 'NaN'
count = -1
for i in x:
if i.upper() not in (final.upper()):
if np.issubdtype(df1[i], np.number) and df1[i].nunique() > 2:
conv = self.mono_bin(target, df1[i])
conv["VAR_NAME"] = i
count = count + 1
else:
conv = self.char_bin(target, df1[i])
conv["VAR_NAME"] = i
count = count + 1
if count == 0:
iv_df = conv
else:
iv_df = iv_df.append(conv, ignore_index=True)
iv = pd.DataFrame({'IV': iv_df.groupby('VAR_NAME').IV.max()})
iv = iv.sort_values("IV", ascending=False)
iv = iv.reset_index()
return (iv_df, iv, target_map)
def compute_numerical_bin(self, target, feature, bin_num=MAX_BIN):
df = pd.DataFrame({'X': feature, 'Y': target})
null_records = df[['X', 'Y']][df.X.isnull()]
records = df[['X', 'Y']][df.X.notnull()]
r = 0
logger.debug(
"starting while loop, inside compute_numerical_bin")
while np.abs(r) < 1:
try:
logger.debug("quantile_cut")
quantile_cut = pd.qcut(records.X, bin_num)
logger.debug("bin_df")
bin_df = pd.DataFrame({'X': records.X, 'Y': records.Y,
'Bucket': quantile_cut})
logger.debug("bin_df groupby")
bin_df = bin_df.groupby('Bucket', as_index=True)
logger.debug("calling spearmanr")
r, p = stats.spearmanr(bin_df.mean().X, bin_df.mean().Y)
logger.debug("r: %s, p: %s" % (r, p))
bin_num = bin_num - 1
except Exception as e:
bin_num = bin_num - 1
logger.debug("out of while loop, column")
if len(bin_df) == 1:
bin_num = self.force_bin
bins = algos.quantile(records.X, np.linspace(0, 1, bin_num))
if len(np.unique(bins)) == 2:
bins = np.insert(bins, 0, 1)
bins[1] = bins[1] - (bins[1] / 2)
quantile_cut = pd.cut(records.X, np.unique(bins),
include_lowest=True)
bin_df = pd.DataFrame({'X': records.X, 'Y': records.Y,
'Bucket': quantile_cut})
bin_df = bin_df.groupby('Bucket', as_index=True)
logger.debug("creating num_iv")
num_iv = pd.DataFrame({}, index=[])
logger.debug("num_iv created")
num_iv["MIN_VALUE"] = bin_df.min().X
num_iv["MAX_VALUE"] = bin_df.max().X
num_iv["COUNT"] = bin_df.count().Y
num_iv["EVENT"] = bin_df.sum().Y
num_iv["NONEVENT"] = bin_df.count().Y - bin_df.sum().Y
num_iv = num_iv.reset_index(drop=True)
logger.debug("reset index")
if len(null_records.index) > 0:
logger.debug("creating temp_num_iv")
temp_num_iv = pd.DataFrame({'MIN_VALUE': np.nan}, index=[0])
logger.debug("done creating temp_num_iv")
temp_num_iv["MAX_VALUE"] = np.nan
temp_num_iv["COUNT"] = null_records.count().Y
temp_num_iv["EVENT"] = null_records.sum().Y
temp_num_iv["NONEVENT"] = (null_records.count().Y -
null_records.sum().Y)
num_iv = num_iv.append(temp_num_iv, ignore_index=True, sort=False)
num_iv["EVENT_RATE"] = num_iv.EVENT / num_iv.COUNT
num_iv["NON_EVENT_RATE"] = num_iv.NONEVENT / num_iv.COUNT
num_iv["DIST_EVENT"] = num_iv.EVENT / num_iv.sum().EVENT
num_iv["DIST_NON_EVENT"] = num_iv.NONEVENT / num_iv.sum().NONEVENT
num_iv["WOE"] = np.log(num_iv.DIST_EVENT / num_iv.DIST_NON_EVENT)
num_iv["IV"] = ((num_iv.DIST_EVENT - num_iv.DIST_NON_EVENT) *
np.log(num_iv.DIST_EVENT / num_iv.DIST_NON_EVENT))
num_iv["VAR_NAME"] = "VAR"
num_iv = num_iv[['VAR_NAME', 'MIN_VALUE', 'MAX_VALUE', 'COUNT',
'EVENT', 'EVENT_RATE', 'NONEVENT', 'NON_EVENT_RATE',
'DIST_EVENT', 'DIST_NON_EVENT', 'WOE', 'IV']]
num_iv = num_iv.replace([np.inf, -np.inf], 0)
num_iv.IV = round(num_iv.IV.sum(), 2)
# clear memory
del df
del records
del quantile_cut
if len(bin_df) == 1:
del bins
del bin_df
if len(null_records.index) > 0:
del temp_num_iv
del null_records
logger.debug("coming out of compute_numerical_bin")
return num_iv
def compute_char_bin(self, target, feature):
df = pd.DataFrame({'X': feature, 'Y': target})
null_records = df[['X', 'Y']][df.X.isnull()]
records = df[['X', 'Y']][df.X.notnull()]
bin_df = records.groupby('X', as_index=True)
char_iv = pd.DataFrame({}, index=[])
char_iv["COUNT"] = bin_df.count().Y
char_iv["MIN_VALUE"] = bin_df.sum().Y.index
char_iv["MAX_VALUE"] = char_iv["MIN_VALUE"]
char_iv["EVENT"] = bin_df.sum().Y
char_iv["NONEVENT"] = bin_df.count().Y - bin_df.sum().Y
if len(null_records.index) > 0:
temp_char_iv = pd.DataFrame({'MIN_VALUE': np.nan}, index=[0])
temp_char_iv["MAX_VALUE"] = np.nan
temp_char_iv["COUNT"] = null_records.count().Y
temp_char_iv["EVENT"] = null_records.sum().Y
temp_char_iv["NONEVENT"] = null_records.count().Y - \
null_records.sum().Y
char_iv = char_iv.append(
temp_char_iv, ignore_index=True, sort=False)
char_iv["EVENT_RATE"] = char_iv.EVENT / char_iv.COUNT
char_iv["NON_EVENT_RATE"] = char_iv.NONEVENT / char_iv.COUNT
char_iv["DIST_EVENT"] = char_iv.EVENT / char_iv.sum().EVENT
char_iv["DIST_NON_EVENT"] = char_iv.NONEVENT / char_iv.sum().NONEVENT
char_iv["WOE"] = np.log(char_iv.DIST_EVENT / char_iv.DIST_NON_EVENT)
char_iv["IV"] = ((char_iv.DIST_EVENT - char_iv.DIST_NON_EVENT) *
|
np.log(char_iv.DIST_EVENT / char_iv.DIST_NON_EVENT)
|
numpy.log
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# dataset src: https://data.london.gov.uk/dataset/smartmeter-energy-use-data-in-london-households
# file: UKPN-LCL-smartmeter-sample (986.99 kB)
# In[2]:
# A Time series is a collection of data points indexed,
# listed or graphed in time order.
# Most commonly, a time series is a sequence taken at
# successive equally spaced points in time.
# Thus it is a sequence of discrete-time data.
# In[3]:
# Load libraries
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import pandas as pd
import numpy as np
# In[5]:
raw_data_filename = "UKPN-LCL-smartmeter-sample.csv"
raw_data_df = pd.read_csv(
raw_data_filename,
header=0
)
# In[32]:
display(raw_data_df.shape)
display(raw_data_df.head(3))
display(raw_data_df.tail(3))
display(raw_data_df.dtypes)
display(raw_data_df.columns.values)
display(raw_data_df.describe(include='all'))
display(raw_data_df.isnull().sum())
display(raw_data_df[raw_data_df['KWH/hh (per half hour) '] == 'Null'].shape) # (1, 6)
# In[47]:
raw_date_kwh_df = raw_data_df[['DateTime', 'KWH/hh (per half hour) ']].copy()
raw_date_kwh_df = raw_date_kwh_df.rename(columns={"KWH/hh (per half hour) ": "KWH_hh"})
# In[49]:
# fix row where "KWH_hh" equals 'Null'
display(raw_date_kwh_df[raw_date_kwh_df['KWH_hh'] == 'Null']) # (1, 6)
raw_date_kwh_df = raw_date_kwh_df.drop([2982])
# In[50]:
# fix dtypes
raw_date_kwh_df.loc[:, 'DateTime'] = pd.to_datetime(raw_date_kwh_df.loc[:, 'DateTime'])
raw_date_kwh_df.loc[:, 'KWH_hh'] = raw_date_kwh_df.loc[:, 'KWH_hh'].astype(float)
# In[51]:
display(raw_date_kwh_df.head())
display(raw_date_kwh_df.dtypes)
# In[60]:
date_kwh_df = raw_date_kwh_df.set_index(raw_date_kwh_df['DateTime'])
date_kwh_df = date_kwh_df.drop(['DateTime'], axis=1)
# In[61]:
display(date_kwh_df.head())
# In[63]:
date_kwh_df.plot(); plt.show()
# In[64]:
# Resampling
# Resampling involves changing the frequency of your
# time series observations.
# One reason why you may be interested in resampling
# your time series data is feature engineering.
# it can be used to provide additional structure or
# insight into the learning problem for supervised learning models.
# In[65]:
weekly = date_kwh_df.resample('W').sum()
# In[68]:
weekly.plot(); plt.show()
# In[72]:
daily = date_kwh_df.resample('D').sum()
daily.plot(); plt.show()
# In[73]:
daily.rolling(30, center=True).sum().plot(); plt.show()
# In[77]:
by_time = date_kwh_df.groupby(date_kwh_df.index.time).mean() # index == 'DateTime'
hourly_ticks = 4 * 60 * 60 *
|
np.arange(6)
|
numpy.arange
|
import os
import time
import json
import shutil
import argparse
import numpy as np
from tqdm import tqdm
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss
from torch.optim import Adam
from torch.utils.data import DataLoader
from transformers import BertModel, BertConfig, BertTokenizer, BertTokenizerFast
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from dataset import SelectionDataset
from transform import SelectionSequentialTransform, SelectionJoinTransform, SelectionConcatTransform
from encoder import PolyEncoder, BiEncoder, CrossEncoder
from sklearn.metrics import label_ranking_average_precision_score
import logging
logging.basicConfig(level=logging.ERROR)
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--bert_model", default='/search/odin/guobk/data/data_polyEncode/vpa/model_small_all', type=str)
parser.add_argument("--eval", action="store_true")
parser.add_argument("--model_type", default='bert', type=str)
parser.add_argument("--output_dir", default='/search/odin/guobk/data/data_polyEncode/vpa/model_small_all', type=str)
parser.add_argument("--train_dir", default='/search/odin/guobk/data/data_polyEncode/vpa/train_data_all/', type=str)
parser.add_argument("--use_pretrain", action="store_true")
parser.add_argument("--architecture", default='poly', type=str, help='[poly, bi, cross]')
parser.add_argument("--max_contexts_length", default=32, type=int)
parser.add_argument("--max_response_length", default=64, type=int)
parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.")
parser.add_argument("--eval_batch_size", default=32, type=int, help="Total batch size for eval.")
parser.add_argument("--print_freq", default=100, type=int, help="Log frequency")
parser.add_argument("--poly_m", default=16, type=int, help="Number of m of polyencoder")
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.01, type=float)
parser.add_argument("--warmup_steps", default=100, type=float)
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=10.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument('--seed', type=int, default=12345, help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument('--gpu', type=int, default=0)
args = parser.parse_args()
print(args)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
set_seed(args)
def eval_running_model(dataloader, test=False):
model.eval()
eval_loss, eval_hit_times = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
r10 = r2 = r1 = r5 = 0
mrr = []
for step, batch in enumerate(dataloader):
batch = tuple(t.to(device) for t in batch)
if args.architecture == 'cross':
text_token_ids_list_batch, text_input_masks_list_batch, text_segment_ids_list_batch, labels_batch = batch
with torch.no_grad():
logits = model(text_token_ids_list_batch, text_input_masks_list_batch, text_segment_ids_list_batch)
loss = F.cross_entropy(logits, torch.argmax(labels_batch, 1))
else:
context_token_ids_list_batch, context_input_masks_list_batch, \
response_token_ids_list_batch, response_input_masks_list_batch, labels_batch = batch
with torch.no_grad():
logits = model(context_token_ids_list_batch, context_input_masks_list_batch,
response_token_ids_list_batch, response_input_masks_list_batch)
loss = F.cross_entropy(logits, torch.argmax(labels_batch, 1))
r2_indices = torch.topk(logits, 2)[1] # R 2 @ 100
r5_indices = torch.topk(logits, 5)[1] # R 5 @ 100
r10_indices = torch.topk(logits, 10)[1] # R 10 @ 100
r1 += (logits.argmax(-1) == 0).sum().item()
r2 += ((r2_indices==0).sum(-1)).sum().item()
r5 += ((r5_indices==0).sum(-1)).sum().item()
r10 += ((r10_indices==0).sum(-1)).sum().item()
# mrr
logits = logits.data.cpu().numpy()
for logit in logits:
y_true = np.zeros(len(logit))
y_true[0] = 1
mrr.append(label_ranking_average_precision_score([y_true], [logit]))
eval_loss += loss.item()
nb_eval_examples += labels_batch.size(0)
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = r1 / nb_eval_examples
if not test:
result = {
'train_loss': tr_loss / nb_tr_steps,
'eval_loss': eval_loss,
'R1': r1 / nb_eval_examples,
'R2': r2 / nb_eval_examples,
'R5': r5 / nb_eval_examples,
'R10': r10 / nb_eval_examples,
'MRR': np.mean(mrr),
'epoch': epoch,
'global_step': global_step,
}
else:
result = {
'eval_loss': eval_loss,
'R1': r1 / nb_eval_examples,
'R2': r2 / nb_eval_examples,
'R5': r5 / nb_eval_examples,
'R10': r10 / nb_eval_examples,
'MRR': np.mean(mrr),
}
return result
class Tokenizer(object):
def __init__(self,path_vocab,do_lower_case):
with open(path_vocab,'r') as f:
self.vocab = f.read().strip().split('\n')
self.vocab = {self.vocab[k]:k for k in range(len(self.vocab))}
self.do_lower_case = do_lower_case
def token_to_ids(self,text,max_len,is_context=True):
if type(text)==str:
text = text.strip()
if self.do_lower_case:
text = text.lower()
res = [self.vocab['[CLS]']]
for i in range(min(max_len-2,len(text))):
if text[i] not in self.vocab:
res.append(self.vocab['[MASK]'])
else:
res.append(self.vocab[text[i]])
res.append(self.vocab['[SEP]'])
segIds = []
segIds = [1 for _ in range(len(res))]
segIds = segIds+[0]*(max_len-len(segIds))
res = res[:max_len]
res = res + [0]*(max_len-len(res))
tokenIds = res
return tokenIds,segIds
else:
tokenIds,segIds = [], []
for t in text:
res = self.token_to_ids(t, max_len)
tokenIds.append(res[0])
segIds.append(res[1])
return tokenIds,segIds
def testLoader(queries, Docs, mytokenzier):
Context_ids, Context_msk = mytokenizer.token_to_ids(queries,max_len=args.max_contexts_length)
Response_ids, Response_msk = mytokenizer.token_to_ids(Docs,max_len=args.max_response_length)
if __name__ == '__main__':
MODEL_CLASSES = {
'bert': (BertConfig, BertTokenizerFast, BertModel),
}
ConfigClass, TokenizerClass, BertModelClass = MODEL_CLASSES[args.model_type]
## init dataset and bert model
tokenizer = TokenizerClass.from_pretrained(os.path.join(args.bert_model, "vocab.txt"), do_lower_case=True, clean_text=False)
context_transform = SelectionJoinTransform(tokenizer=tokenizer, max_len=args.max_contexts_length)
response_transform = SelectionSequentialTransform(tokenizer=tokenizer, max_len=args.max_response_length)
concat_transform = SelectionConcatTransform(tokenizer=tokenizer, max_len=args.max_response_length+args.max_contexts_length)
print('=' * 80)
print('Train dir:', args.train_dir)
print('Output dir:', args.output_dir)
print('=' * 80)
state_save_path = os.path.join(args.output_dir, '{}_{}_pytorch_model.bin'.format(args.architecture, args.poly_m))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
########################################
## build BERT encoder
########################################
bert_config = ConfigClass.from_json_file(os.path.join(args.bert_model, 'config.json'))
bert = BertModelClass(bert_config)
if args.architecture == 'poly':
model = PolyEncoder(bert_config, bert=bert, poly_m=args.poly_m)
elif args.architecture == 'bi':
model = BiEncoder(bert_config, bert=bert)
elif args.architecture == 'cross':
model = CrossEncoder(bert_config, bert=bert)
else:
raise Exception('Unknown architecture.')
model.resize_token_embeddings(len(tokenizer))
model.to(device)
print('Loading parameters from', state_save_path)
model.load_state_dict(torch.load(state_save_path))
mytokenizer = Tokenizer(path_vocab=os.path.join(args.bert_model, "vocab.txt"),do_lower_case = True)
with open('/search/odin/guobk/data/vpaSupData/Docs-0809.json','r') as f:
Docs = json.load(f)
with open('/search/odin/guobk/data/vpaSupData/Q-all-test-20210809-rec.json','r') as f:
Queries = json.load(f)
Docs = [d['content'] for d in Docs]
Queries = [d['input'] for d in Queries]
Context_ids, Context_msk = mytokenizer.token_to_ids(Queries,max_len=args.max_contexts_length)
Response_ids, Response_msk = mytokenizer.token_to_ids(Docs,max_len=args.max_response_length)
Response_ids, Response_msk = np.array(Response_ids), np.array(Response_msk)
Response_ids, Response_msk = np.expand_dims(Response_ids,axis=0), np.expand_dims(Response_msk,axis=0)
Response_ids, Response_msk = torch.from_numpy(Response_ids).to(device), torch.from_numpy(Response_msk).to(device)
batch_size = 100
Res = []
for step in range(27,36):
context_ids = Context_ids[step]
context_msk = Context_msk[step]
context_ids, context_msk = np.array(context_ids),
|
np.array(context_msk)
|
numpy.array
|
# This code is based on: https://github.com/msmsajjadi/precision-recall-distributions/blob/master/prd_score.py
"""Precision and recall computation based on samples from two distributions.
Given a set of generated samples and samples from the test set, both embedded in some feature space (say, embeddings of
Inception Net), it computes the precision and recall via the algorithm presented in [arxiv.org/abs/1806.00035]."""
from matplotlib import pyplot as plt
import numpy as np
import sklearn.cluster
def compute_prd(eval_dist, ref_dist, num_angles=1001, epsilon=1e-10):
"""Computes the PRD curve for discrete distributions.
This function computes the PRD curve for the discrete distribution [eval_dist] with respect to the reference
distribution [ref_dist]. This implements the algorithm in [arxiv.org/abs/1806.2281349]. The PRD will be computed for
an equiangular grid of [num_angles] values between [0, pi/2].
Args:
eval_dist: 1D NumPy array or list of floats with probabilities of the states under distribution to be evaluated.
ref_dist: 1D NumPy array or list of floats with probabilities of the states under the reference distribution.
num_angles:Number of angles for which to compute PRD. Must be in [3, 1e6]. The default value is 1001.
epsilon: Angle for PRD computation in the edge cases 0 and pi/2. The PRD will be computed for epsilon and
pi/2-epsilon, respectively. The default value is 1e-10.
Returns:
precision: NumPy array of shape [num_angles] with the precision for the different ratios.
recall: NumPy array of shape [num_angles] with the recall for the different ratios.
Raises:
ValueError: If not 0 < epsilon <= 0.1.
ValueError: If num_angles < 3."""
if not (epsilon > 0 and epsilon < 0.1):
raise ValueError('epsilon must be in (0, 0.1] but is %s.' % str(epsilon))
if not (num_angles >= 3 and num_angles <= 1e6):
raise ValueError('num_angles must be in [3, 1e6] but is %d.' % num_angles)
# Compute slopes for linearly spaced angles between [0, pi/2]
angles = np.linspace(epsilon, np.pi/2 - epsilon, num=num_angles)
slopes = np.tan(angles)
# Broadcast slopes so that second dimension will be states of the distribution
slopes_2d = np.expand_dims(slopes, 1)
# Broadcast distributions so that first dimension represents the angles
ref_dist_2d = np.expand_dims(ref_dist, 0)
eval_dist_2d =
|
np.expand_dims(eval_dist, 0)
|
numpy.expand_dims
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
from mindspore.common.api import ms_function
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.common.tensor import Tensor
from mindspore.nn import Cell
from mindspore.ops.operations import Tile
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
input_x0 = np.arange(2).reshape((2, 1, 1)).astype(np.float32)
mul0 = (8, 1, 1)
input_x1 = np.arange(32).reshape((2, 4, 4)).astype(np.float32)
mul1 = (2, 2, 2)
input_x2 = np.arange(1).reshape((1, 1, 1)).astype(np.float32)
mul2 = (1, 1, 1)
input_32_x0 = np.arange(2).reshape((2, 1, 1)).astype(np.int32)
mul_32_0 = (8, 1, 1)
input_32_x1 = np.arange(32).reshape((2, 4, 4)).astype(np.int32)
mul_32_1 = (2, 2, 2)
input_32_x2 = np.arange(1).reshape((1, 1, 1)).astype(np.int32)
mul_32_2 = (1, 1, 1)
input_16_x0 = np.arange(2).reshape((2, 1, 1)).astype(np.int16)
mul_16_0 = (8, 1, 1)
input_16_x1 = np.arange(32).reshape((2, 4, 4)).astype(np.int16)
mul_16_1 = (2, 2, 2)
input_16_x2 = np.arange(1).reshape((1, 1, 1)).astype(np.int16)
mul_16_2 = (1, 1, 1)
input_8_x0 = np.arange(2).reshape((2, 1, 1)).astype(np.uint8)
mul_8_0 = (8, 1, 1)
input_8_x1 = np.arange(32).reshape((2, 4, 4)).astype(np.int8)
mul_8_1 = (2, 2, 2)
input_8_x2 = np.arange(1).reshape((1, 1, 1)).astype(np.uint8)
mul_8_2 = (1, 1, 1)
class Net(Cell):
def __init__(self):
super(Net, self).__init__()
self.Tile = Tile()
self.input_x0 = Parameter(initializer(Tensor(input_x0), input_x0.shape), name='x0')
self.mul0 = mul0
self.input_x1 = Parameter(initializer(Tensor(input_x1), input_x1.shape), name='x1')
self.mul1 = mul1
self.input_x2 = Parameter(initializer(Tensor(input_x2), input_x2.shape), name='x2')
self.mul2 = mul2
@ms_function
def construct(self):
output = (self.Tile(self.input_x0, self.mul0),
self.Tile(self.input_x1, self.mul1),
self.Tile(self.input_x2, self.mul2))
return output
class Net32(Cell):
def __init__(self):
super(Net32, self).__init__()
self.Tile = Tile()
self.input_32_x0 = Parameter(initializer(Tensor(input_32_x0), input_32_x0.shape), name='x0')
self.mul_32_0 = mul_32_0
self.input_32_x1 = Parameter(initializer(Tensor(input_32_x1), input_32_x1.shape), name='x1')
self.mul_32_1 = mul_32_1
self.input_32_x2 = Parameter(initializer(Tensor(input_32_x2), input_32_x2.shape), name='x2')
self.mul_32_2 = mul_32_2
@ms_function
def construct(self):
output = (self.Tile(self.input_32_x0, self.mul_32_0),
self.Tile(self.input_32_x1, self.mul_32_1),
self.Tile(self.input_32_x2, self.mul_32_2))
return output
class Net16(Cell):
def __init__(self):
super(Net16, self).__init__()
self.Tile = Tile()
self.input_16_x0 = Parameter(initializer(Tensor(input_16_x0), input_16_x0.shape), name='x0')
self.mul_16_0 = mul_16_0
self.input_16_x1 = Parameter(initializer(Tensor(input_16_x1), input_16_x1.shape), name='x1')
self.mul_16_1 = mul_16_1
self.input_16_x2 = Parameter(initializer(Tensor(input_16_x2), input_16_x2.shape), name='x2')
self.mul_16_2 = mul_16_2
@ms_function
def construct(self):
output = (self.Tile(self.input_16_x0, self.mul_16_0),
self.Tile(self.input_16_x1, self.mul_16_1),
self.Tile(self.input_16_x2, self.mul_16_2))
return output
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_tile():
net = Net()
output = net()
expect0 = np.tile(input_x0, mul0)
diff0 = output[0].asnumpy() - expect0
error0 = np.ones(shape=expect0.shape) * 1.0e-5
assert np.all(diff0 < error0)
assert output[0].shape == expect0.shape
expect1 = np.tile(input_x1, mul1)
diff1 = output[1].asnumpy() - expect1
error1 = np.ones(shape=expect1.shape) * 1.0e-5
assert np.all(diff1 < error1)
assert output[1].shape == expect1.shape
expect2 = np.tile(input_x2, mul2)
diff2 = output[2].asnumpy() - expect2
error2 = np.ones(shape=expect2.shape) * 1.0e-5
assert np.all(diff2 < error2)
assert output[2].shape == expect2.shape
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_tile_32():
net = Net32()
output = net()
expect0 = np.tile(input_32_x0, mul_32_0)
diff0 = output[0].asnumpy() - expect0
error0 = np.ones(shape=expect0.shape) * 1.0e-5
assert np.all(diff0 < error0)
assert output[0].shape == expect0.shape
expect1 = np.tile(input_32_x1, mul_32_1)
diff1 = output[1].asnumpy() - expect1
error1 = np.ones(shape=expect1.shape) * 1.0e-5
assert np.all(diff1 < error1)
assert output[1].shape == expect1.shape
expect2 = np.tile(input_32_x2, mul_32_2)
diff2 = output[2].asnumpy() - expect2
error2 = np.ones(shape=expect2.shape) * 1.0e-5
assert np.all(diff2 < error2)
assert output[2].shape == expect2.shape
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_tile_16():
net = Net16()
output = net()
expect0 = np.tile(input_16_x0, mul_16_0)
diff0 = output[0].asnumpy() - expect0
error0 = np.ones(shape=expect0.shape) * 1.0e-5
assert np.all(diff0 < error0)
assert output[0].shape == expect0.shape
expect1 = np.tile(input_16_x1, mul_16_1)
diff1 = output[1].asnumpy() - expect1
error1 = np.ones(shape=expect1.shape) * 1.0e-5
assert np.all(diff1 < error1)
assert output[1].shape == expect1.shape
expect2 = np.tile(input_16_x2, mul_16_2)
diff2 = output[2].asnumpy() - expect2
error2 =
|
np.ones(shape=expect2.shape)
|
numpy.ones
|
# -*- coding:utf-8 -*-
import cv2
import numpy as np
import time
import os
if __name__ == '__main__':
imagepath= 'd:/QATM_pytorch/Video1-capture/'
filenames=os.listdir(imagepath)
kernel_2 = np.ones((2,2),np.uint8) # 2x2的卷积核
kernel_3 = np.ones((3,3),np.uint8) # 3x3的卷积核
kernel_4 =
|
np.ones((4,4),np.uint8)
|
numpy.ones
|
import numpy as np
import torch
import torch.nn.functional as F
from torch.optim import Adam
from classifier_control.classifier.models.base_model import BaseModel
from classifier_control.classifier.utils.actor_network import ActorNetwork
from classifier_control.classifier.utils.general_utils import AttrDict
from classifier_control.classifier.utils.q_network import QNetwork
class QFunction(BaseModel):
INFINITELY_FAR = 1000
def __init__(self, overrideparams, logger=None):
super().__init__(logger)
self._hp = self._default_hparams()
self.overrideparams = overrideparams
self.override_defaults(overrideparams) # override defaults with config file
self.postprocess_params()
assert self._hp.batch_size != -1 # make sure that batch size was overridden
self.build_network()
self._use_pred_length = False
self.target_network_counter = 0
self.update_pi_counter = 0
self.hm_counter = 0 # counts when to compute and log a heatmap of the Q-function
def init_optimizers(self, hp):
self.critic_optimizer = Adam(self.qnetworks.parameters(), lr=hp.lr)
self.optimizer = self.critic_optimizer
if self.actor_critic:
self.actor_optimizer = Adam(self.pi_net.parameters(), lr=hp.lr)
if self._hp.min_q_lagrange:
self.log_alpha = torch.tensor(0.0, requires_grad=True, device=self.get_device())
self.alpha_optimizer = Adam(
[self.log_alpha, ],
lr=1e-3,
)
def optim_step(self, output):
losses = self.loss(output)
if self._hp.min_q_lagrange:
self.alpha_optimizer.zero_grad()
lagrange_loss = self.min_q_lagrange_loss
lagrange_loss.backward(retain_graph=True)
self.alpha_optimizer.step()
self.critic_optimizer.zero_grad()
losses.total_loss.backward(retain_graph=self.actor_critic)
self.critic_optimizer.step()
if self.actor_critic:
for p in self.qnetworks.parameters():
p.requires_grad = False
actor_loss = self.actor_loss()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
for p in self.qnetworks.parameters():
p.requires_grad = True
losses.actor_loss = actor_loss
# Target network updates
""" Target network lagging update """
self.target_network_counter = self.target_network_counter + 1
if self.target_network_counter % self._hp.update_target_rate == 0:
self.target_network_counter = 0
if self._hp.target_network_update == 'replace':
self.target_qnetworks.load_state_dict(self.qnetworks.state_dict())
if self._hp.eval_target_nets:
self.target_qnetworks.eval()
if self.actor_critic:
self.target_pi_net.load_state_dict(self.pi_net.state_dict())
if self._hp.eval_target_nets:
self.target_pi_net.eval()
elif self._hp.target_network_update == 'polyak':
with torch.no_grad():
for q_net, t_q_net in zip(self.qnetworks, self.target_qnetworks):
for p, p_targ in zip(q_net.parameters(), t_q_net.parameters()):
p_targ.data.mul_(self._hp.polyak)
p_targ.data.add_((1 - self._hp.polyak) * p.data)
# Copy over batchnorm statistics
for p, p_targ in zip(q_net.buffers(), t_q_net.buffers()):
p_targ.data.mul_(0)
p_targ.data.add_(p.data)
if self.actor_critic:
for p, p_targ in zip(self.pi_net.parameters(), self.target_pi_net.parameters()):
p_targ.data.mul_(self._hp.polyak)
p_targ.data.add_((1 - self._hp.polyak) * p.data)
# Copy over batchnorm statistics
for p, p_targ in zip(self.pi_net.buffers(), self.target_pi_net.buffers()):
p_targ.data.mul_(0)
p_targ.data.add_(p.data)
return losses
def actor_loss(self):
image_pairs = self.get_sg_pair(self.images)
self.target_actions_taken = self.pi_net(image_pairs)
return -self.network_out_2_qval(self.qnetworks[0](image_pairs, self.target_actions_taken)).mean()
@property
def num_network_outputs(self):
return 1
def _default_hparams(self):
default_dict = AttrDict({
'use_skips':False,
'ngf': 8,
'action_size': 2,
'state_size': 30,
'nz_enc': 64,
'linear_layer_size': 128,
'classifier_restore_path':None, # not really needed here.,
'low_dim':False,
'gamma':0.0,
'terminal': True,
'update_target_rate': 1,
'action_range': [-1.0, 1.0],
'action_stds': [0.6, 0.6, 0.3, 0.3],
'est_max_samples': 100,
'binary_reward': [0, 1],
'n_step': 1,
'min_q': False,
'min_q_weight': 1.0,
'min_q_lagrange': False,
'min_q_eps': 0.1,
'sigmoid': False,
'optimize_actions': 'random_shooting',
'target_network_update': 'replace',
'polyak': 0.995,
'sg_sample': 'half_unif_half_first',
'geom_sample_p': 0.5,
'bellman_weight': 1.0,
'td_loss': 'mse',
'add_negative_sample': False,
'negative_sample_type': 'copy_arm', # also rand_arm, batch_goal
'gaussian_blur': False,
'twin_critics': False,
'add_action_noise': False,
'action_scaling': 1.0,
'eval_target_nets': True,
})
# add new params to parent params
parent_params = super()._default_hparams()
for k in default_dict.keys():
parent_params.add_hparam(k, default_dict[k])
return parent_params
def build_network(self, build_encoder=True):
q_network_type = QNetwork
num_q_fns = 2 if self._hp.twin_critics else 1
self.qnetworks = torch.nn.ModuleList([q_network_type(self._hp, self.num_network_outputs) for _ in range(num_q_fns)])
with torch.no_grad():
self.target_qnetworks = torch.nn.ModuleList([q_network_type(self._hp, self.num_network_outputs) for _ in range(num_q_fns)])
for i, t_qn in enumerate(self.target_qnetworks):
t_qn.load_state_dict(self.qnetworks[i].state_dict())
if self._hp.eval_target_nets:
t_qn.eval()
if self.actor_critic:
self.pi_net = ActorNetwork(self._hp)
with torch.no_grad():
self.target_pi_net = ActorNetwork(self._hp)
self.target_pi_net.load_state_dict(self.pi_net.state_dict())
if self._hp.eval_target_nets:
self.target_pi_net.eval()
def train(self, mode=True):
super(QFunction, self).train(mode)
if self._hp.eval_target_nets:
if self.actor_critic:
self.target_pi_net.eval()
for t_qn in self.target_qnetworks:
t_qn.eval()
return self
def network_out_2_qval(self, network_outputs):
"""
:param softmax_values: Tensor of softmax values of dimension [..., num_bins]
:return: Tensor of dimension [...] containing scalar q values.
"""
if self._hp.sigmoid:
network_outputs = F.sigmoid(network_outputs)
return network_outputs.squeeze()
def forward(self, inputs):
"""
forward pass at training time
:param
images shape = batch x time x channel x height x width
:return: model_output
"""
#### Train vs Test
if "demo_seq_images" in inputs.keys():
tlen = inputs.demo_seq_images.shape[1]
self.inputs = inputs
inputs.actions = inputs.actions * self._hp.action_scaling
image_pairs, acts = self.sample_image_triplet_actions(inputs.demo_seq_images,
inputs.actions, tlen, 1,
inputs.states)
self.images = image_pairs
if self._hp.low_dim:
image_0 = self.images[:, :self._hp.state_size]
image_g = self.images[:, 2*self._hp.state_size:]
else:
image_0 = self.images[:, :3]
image_g = self.images[:, 6:]
image_pairs = torch.cat([image_0, image_g], dim=1)
self.acts = acts
network_out = [qnet(image_pairs, acts) for qnet in self.qnetworks]
self.network_out = network_out
qval = [self.network_out_2_qval(n_out) for n_out in network_out]
else:
if self._hp.low_dim:
image_pairs = torch.cat([inputs["current_state"], inputs['goal_state']], dim=1)
else:
image_pairs = torch.cat([inputs["current_img"], inputs["goal_img"]], dim=1)
if 'actions' in inputs:
network_out = [qnet(image_pairs, inputs['actions']) for qnet in
self.target_qnetworks] # Just use the first one if we have two critics
qval = [self.network_out_2_qval(n_out) for n_out in network_out]
qs, _ = self.get_worst_of_qs(torch.stack(qval))
return qs.detach().cpu().numpy()
qval = self.get_max_q(image_pairs)
qval = torch.squeeze(qval).detach().cpu().numpy()
return qval
def get_best_of_qs(self, qvals):
return torch.max(qvals, dim=0)
def get_worst_of_qs(self, qvals):
return torch.min(qvals, dim=0)
def get_max_q(self, image_pairs, return_raw=False):
"""
:param image_pairs: image pairs (s)
:param return_raw: whether or not to return pre-network_out_2_q_val outputs
:return: max_a Q(s, a)
"""
if self._hp.optimize_actions == 'random_shooting':
ensem = []
for target_qnet in self.target_qnetworks:
qs = self.compute_action_samples(image_pairs, target_qnet, parallel=True, detach_grad=True, use_hp=False)
max_qs, inds = self.get_best_of_qs(self.network_out_2_qval(qs))
ensem.append(max_qs.detach())
max_qs, inds = self.get_worst_of_qs(torch.stack(ensem))
if return_raw:
max_q_raw_outs = max_qs[inds, torch.arange(len(inds))]
elif self.actor_critic:
with torch.no_grad():
best_actions = self.target_pi_net(image_pairs)
if self._hp.add_action_noise and self.training:
best_actions += torch.clamp(torch.normal(mean=0, std=0.1, size=best_actions.shape).cuda(), min=-0.2, max=0.2)
max_q_raw_outs = [q_net(image_pairs, best_actions) for q_net in self.target_qnetworks]
max_qs = [self.network_out_2_qval(raw_outs) for raw_outs in max_q_raw_outs]
max_qs, inds = self.get_worst_of_qs(torch.stack(max_qs))
max_q_raw_outs = torch.stack(max_q_raw_outs)[inds, torch.arange(len(image_pairs))]
if return_raw:
return max_qs, max_q_raw_outs
return max_qs
@property
def actor_critic(self):
return self._hp.optimize_actions == 'actor_critic'
def get_arm_state(self, states):
if states.shape[-1] > 18:
return torch.cat((states[..., :9], states[..., self._hp.state_size//2:self._hp.state_size//2+9]), axis=-1)
elif states.shape[-1] == 6:
return states
else:
raise NotImplementedError('state shape does not fit expectations')
def mse_reward(self, image_pairs):
split_ind = image_pairs.shape[1]//2
start_im, goal_im = image_pairs[:, :split_ind], image_pairs[:, split_ind:]
return -torch.mean((start_im - goal_im) ** 2)
def sample_sg_indices(self, tlen, bs, sampling_strat):
if sampling_strat == 'uniform_pair':
"""
Sample two sets of indices, and then compute the min to be the start index and max to be the goal.
This gives uniform probability over all possible selected _pairs_
"""
i0 = torch.randint(0, tlen, (bs,), device=self.get_device(), dtype=torch.long)
i1 = torch.randint(0, tlen-1, (bs,), device=self.get_device(), dtype=torch.long)
i1[i1 == i0] = tlen-1
return torch.min(i0, i1), torch.max(i0, i1)
elif sampling_strat == 'uniform_distance':
"""
Sample the distances between the pairs uniformly at random
"""
distance = torch.randint(1, tlen, (bs,), device=self.get_device(), dtype=torch.long)
i0 = torch.LongTensor([torch.randint(0, tlen-distance[b], (1,)) for b in range(bs)]).to(self.get_device())
i1 = i0 + distance
return i0, i1
elif sampling_strat == 'uniform_negatives':
"""
Sample the starting state first, then the goal from remaining possibilities. Note this is different from "uniform_pair".
"""
i0 = torch.randint(0, tlen - 2, (bs,), device=self.get_device(), dtype=torch.long)
i1 = torch.LongTensor([torch.randint(i0[b] + 2, tlen, (1,)) for b in range(bs)]).to(self.get_device())
return torch.min(i0, i1), torch.max(i0, i1)
elif sampling_strat == 'geometric':
"""
Sample the first index, and then sample the second according to a geometric distribution with parameter p
"""
i0 = torch.randint(0, tlen-1, (bs,), device=self.get_device(), dtype=torch.long)
dist = torch.distributions.geometric.Geometric(self._hp.geom_sample_p).sample((bs,)).to(self.get_device()).long() + 1
return i0, torch.clamp(i0+dist, max=tlen-1)
elif sampling_strat == 'half_unif_half_first':
"""
Sample half of the batch uniformly, and the other half so that ig = i0 + 1
"""
i0_first = torch.randint(0, tlen-1, (bs//2,), device=self.get_device(), dtype=torch.long)
ig_first = i0_first + 1
i0, ig = self.sample_sg_indices(tlen, bs//2, 'uniform_negatives')
return torch.cat([i0_first, i0]), torch.cat([ig_first, ig])
else:
assert NotImplementedError(f'Sampling method {sampling_strat} not implemented!')
def add_negative_sample(self, s_t0, s_t1, s_tg, query_goal, acts, t0, t1, tg):
curr_bs = self._hp.batch_size
state_shape = [1] * len(s_t0.shape)
state_shape[0] = 2
s_t0 = s_t0.repeat(*state_shape) # These two remain unchanged, but tiled in dim 1
s_t1 = s_t1.repeat(*state_shape)
if self._hp.negative_sample_type == 'nn_idx':
arm_pos_query = self.get_arm_state(query_goal)[..., :self._hp.nn_dim]
close_inds = self.nn_idx.find_knn(arm_pos_query, k=2)[:, 1]
negative_sampled_goals = self.nn_idx.lookup(close_inds)
elif self._hp.negative_sample_type == 'batch_goal':
negative_sampled_goals = torch.roll(s_tg, 1, dims=0)
else:
raise NotImplementedError(f'Negative sample type {self._hp.negative_sample_type} not implemented!')
s_tg = torch.cat((s_tg, negative_sampled_goals))
acts = acts.repeat(2, 1)
t0 = t0.repeat(2)
t1 = t1.repeat(2)
if self._hp.negative_sample_type in ['copy_arm', 'batch_goal', 'nn_idx']:
tg_prime = torch.ones(curr_bs).to(self.get_device()).long() * self.INFINITELY_FAR
elif self._hp.negative_sample_type == 'rand_arm':
tg_prime = tg
tg = torch.cat((tg, tg_prime))
return s_t0, s_t1, s_tg, acts, t0, t1, tg
def sample_image_triplet_actions(self, images, actions, tlen, tdist, states):
if self._hp.state_size == 18:
states = self.get_arm_state(states)
else:
states = states[:, :, :self._hp.state_size]
t0, tg = self.sample_sg_indices(tlen, self._hp.batch_size, self._hp.sg_sample)
t1 = t0 + self._hp.n_step
im_t0 = select_indices(images, t0)
im_t1 = select_indices(images, t1)
im_tg = select_indices(images, tg)
if self._hp.low_dim:
s_t0 = select_indices(states, t0)
s_t1 = select_indices(states, t1)
else:
s_t0, s_t1 = None, None
s_tg = select_indices(states, tg)
acts = select_indices(actions, t0)
self.t0, self.t1, self.tg = t0, t1, tg
if self._hp.add_negative_sample:
if self._hp.low_dim:
s_t0, s_t1, s_tg, acts, t0, t1, tg = self.add_negative_sample(s_t0, s_t1, s_tg, s_tg, acts, t0, t1, tg)
else:
im_t0, im_t1, im_tg, acts, t0, t1, tg = self.add_negative_sample(im_t0, im_t1, im_tg, s_tg, acts, t0, t1, tg)
self.t0, self.t1, self.tg = t0, t1, tg
self.image_pairs = torch.stack([im_t0, im_tg], dim=1)
if self._hp.low_dim:
self.image_pairs_cat = torch.cat([s_t0, s_t1, s_tg], dim=1)
else:
self.image_pairs_cat = torch.cat([im_t0, im_t1, im_tg], dim=1)
return self.image_pairs_cat, acts
def select_close_arm_goals(self, states, s_t0):
# States is [B, T, N]
# get arm states
arm_states = states[:, :, :9]
tsteps = states.shape[1]
s_t0 = s_t0[:, :9]
out = []
for i, state in enumerate(s_t0):
distances = torch.norm(arm_states - state, dim=2) #distances is [B, T]
distances = torch.cat((distances[:i], distances[i+1:]), dim=0) #remove batch that traj is from
distances = distances.view(-1) # flatten into [(B-1)*T]
idx = torch.argmin(distances)
if idx >= tsteps * i:
idx += tsteps
out.append(states[idx//tsteps, idx % tsteps])
return torch.stack(out, dim=0)
def compute_action_samples(self, image_pairs, network, parallel=True, return_actions=False, detach_grad=True, use_hp=True):
if not parallel:
qs = []
actions_l = []
with torch.no_grad():
for _ in range(self._hp.est_max_samples):
actions = torch.FloatTensor(image_pairs.size(0), self._hp.action_size).uniform_(
*self._hp.action_range).to(self._hp.device)
targetq = network(image_pairs, actions)
if detach_grad:
targetq = targetq.detach()
qs.append(targetq)
if return_actions:
actions_l.append(actions)
qs = torch.stack(qs)
if actions_l:
actions = torch.stack(actions_l)
else:
num_samps = self._hp.est_max_samples if use_hp else 100
image_pairs_rep = image_pairs[None] # Add one dimension
repeat_shape = [num_samps] + [1] * len(image_pairs.shape)
image_pairs_rep = image_pairs_rep.repeat(*repeat_shape) # [num_samp, B, s_dim]
image_pairs_rep = image_pairs_rep.view(
*[num_samps * image_pairs.shape[0]] + list(image_pairs_rep.shape[2:]))
actions = torch.FloatTensor(image_pairs_rep.size(0), self._hp.action_size).uniform_(
*self._hp.action_range).to(self.get_device())
targetq = network(image_pairs_rep, actions)
if detach_grad:
targetq = targetq.detach()
qs = targetq.view(num_samps, image_pairs.size(0), -1)
if return_actions:
actions = actions.view(num_samps, image_pairs.size(0), -1)
return qs, actions
else:
return qs
def get_td_error(self, image_pairs, model_output):
max_qs = self.get_max_q(image_pairs)
give_reward = (self.t1 >= self.tg).float()
rew = give_reward * self._hp.binary_reward[1] + (1-give_reward) * self._hp.binary_reward[0]
terminal_flag = (self.t1 >= self.tg).type(torch.ByteTensor).to(self._hp.device)
self.train_batch_rews = rew
discount = self._hp.gamma**(1.0*self._hp.n_step)
if self._hp.terminal:
target = (rew + discount * max_qs * (1 - terminal_flag)) # terminal value
else:
target = rew + discount * max_qs
self.train_target_q_vals = target
if self._hp.td_loss == 'mse':
return sum([F.mse_loss(target, out) for out in model_output])
elif self._hp.td_loss == 'huber':
return sum([F.smooth_l1_loss(target, out) for out in model_output])
def get_sg_pair(self, t):
if self._hp.low_dim:
x = self._hp.state_size
else:
x = 3
return torch.cat((t[:, :x], t[:, 2 * x:]), axis=1)
@property
def cql_sign(self):
return 1
def loss(self, model_output):
if self._hp.low_dim:
image_pairs = self.images[:, self._hp.state_size:]
else:
image_pairs = self.images[:, 3:]
losses = AttrDict()
if self._hp.min_q:
# Implement minq loss
total_min_q_loss = []
self.min_q_lse = 0
for i, q_fn in enumerate(self.qnetworks):
random_q_values = self.network_out_2_qval(self.compute_action_samples(self.get_sg_pair(self.images), q_fn, parallel=True, detach_grad=False))
random_density = np.log(0.5 ** self._hp.action_size) # log uniform density
random_q_values -= random_density
min_q_loss = torch.logsumexp(random_q_values, dim=0) - np.log(self._hp.est_max_samples)
min_q_loss = min_q_loss.mean()
self.min_q_lse += min_q_loss
total_min_q_loss.append(min_q_loss - model_output[i].mean())
total_min_q_loss = self.cql_sign * torch.stack(total_min_q_loss).mean()
if self._hp.min_q_lagrange and hasattr(self, 'log_alpha'):
min_q_weight = self.log_alpha.exp().squeeze()
total_min_q_loss -= self._hp.min_q_eps
else:
min_q_weight = self._hp.min_q_weight
losses.min_q_loss = min_q_weight * total_min_q_loss
self.min_q_lagrange_loss = -1 * losses.min_q_loss
losses.bellman_loss = self._hp.bellman_weight * self.get_td_error(image_pairs, model_output)
losses.total_loss = torch.stack(list(losses.values())).sum()
if 'min_q_loss' in losses:
losses.min_q_loss /= min_q_weight # Divide this back out so we can compare log likelihoods
return losses
def _log_outputs(self, model_output, inputs, losses, step, log_images, phase, prefix=''):
model_output = model_output[0] # take first Q fn if multiple
if phase == 'train':
if self._hp.optimize_actions == 'actor_critic':
self.log_batch_statistics(f'{prefix}policy_update_actions', torch.abs(self.target_actions_taken), step, phase)
self.log_batch_statistics(f'{prefix}target_q_values', self.train_target_q_vals, step, phase)
self.log_batch_statistics(f'{prefix}q_values', model_output, step, phase)
self.log_batch_statistics(f'{prefix}rewards', self.train_batch_rews, step, phase)
if hasattr(self, 'min_q_lse'):
self.log_batch_statistics(f'{prefix}min_q_lse', self.min_q_lse, step, phase)
if hasattr(self, 'log_alpha'):
self._logger.log_scalar(self.log_alpha.exp().item(), f'{prefix}alpha', step, phase)
if log_images:
image_pairs = self.image_pairs
self._logger.log_single_tdist_classifier_image(image_pairs[:self._hp.batch_size//2], image_pairs[self._hp.batch_size//2:], model_output[:self._hp.batch_size],
'{}tdist{}'.format(prefix, "Q"), step, phase)
if self._hp.add_negative_sample:
self._logger.log_single_tdist_classifier_image(image_pairs[self._hp.batch_size:3*self._hp.batch_size // 2],
image_pairs[3*self._hp.batch_size // 2:],
model_output[self._hp.batch_size:],
'{}_negative_sample_tdist{}'.format(prefix, "Q"), step, phase)
def log_batch_statistics(self, name, values, step, phase):
self._logger.log_scalar(torch.mean(values).item(), f'{name}_mean', step, phase)
self._logger.log_scalar(torch.median(values).item(), f'{name}_median', step, phase)
if values.ndim != 0: #Don't log std for scalars
self._logger.log_scalar(torch.std(values).item(), f'{name}_std', step, phase)
def get_heatmap(self, data, x_range, y_range, side_len=101, frac=0.5):
def linspace_to_slice(min, max, num):
lsp = np.linspace(min, max, num)
delta = lsp[1] - lsp[0]
return slice(min, max + delta, delta)
x, y = np.mgrid[
linspace_to_slice(x_range[0], x_range[1], side_len),
linspace_to_slice(y_range[0], y_range[1], side_len)
]
import matplotlib.pyplot as plt
data = np.reshape(data, (side_len, side_len)).copy()
fig, ax = plt.subplots(figsize=(4, 3), dpi=80)
plt.plot(np.linspace(y_range[0], y_range[1], num=side_len), data[:, int(data.shape[0]*frac)])
ax.set(xlabel='obj y pos', ylabel='expected distance')
fig.canvas.draw()
slice_image = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
slice_image = slice_image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
slice_image = np.transpose(slice_image, (2, 0, 1)) # [C, H, W]
plt.clf()
data = data[:-1, :-1]
cmap = plt.get_cmap('hot')
fig, ax = plt.subplots(figsize=(4, 3), dpi=80)
im = ax.pcolormesh(x, y, data, cmap=cmap)
fig.colorbar(im, ax=ax)
# plt.subplots_adjust(left=0.3, right=0, bottom=0.3, top=0)
ax.set(xlabel='obj x pos', ylabel='object y pos')
plt.tight_layout()
fig.canvas.draw()
hmap = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
hmap_image = hmap.reshape(fig.canvas.get_width_height()[::-1] + (3,))
hmap_image = np.transpose(hmap_image, (2, 0, 1)) # [C, H, W]
plt.clf()
return
|
np.concatenate((hmap_image, slice_image), axis=1)
|
numpy.concatenate
|
"""Module handling the creation and use of migration matrices."""
from copy import deepcopy
from warnings import warn
import numpy as np
from .binning import Binning, CartesianProductBinning
class ResponseMatrix:
"""Matrix that describes the detector response to true events.
Parameters
----------
reco_binning : RectangularBinning
The Binning object describing the reco categorization.
truth_binning : RectangularBinning
The Binning object describing the truth categorization.
nuisance_indices : list of ints, optional
List of indices of nuisance truth bins.
These are treated like their efficiency is exactly 1.
impossible_indices :list of ints, optional
List of indices of impossible reco bins.
These are treated like their probability is exactly 0.
response_binning : CartesianProductBinning, optional
The Binning object describing the reco and truth categorization.
Usually this will be generated from the truth and reco binning.
Notes
-----
The truth and reco binnings will be combined with their
`cartesian_product` method.
The truth bins corresonding to the `nuisance_indices` will be treated
like they have a total efficiency of 1.
The reco bins corresonding to the `impossible_indices` will be treated
like they are filled with a probability of 0.
Two response matrices can be combined by adding them ``new_resp = respA +
respB``. This yields a new matrix that is equivalent to one that has been
filled with the data in both ``respA`` and ``respB``. The truth and reco
binnings in ``respA`` and ``respB`` must be identical for this to make
sense.
Attributes
----------
truth_binning : Binning
The :class:`.Binning` object for the truth information of the events.
reco_binning : Binning
The :class:`.Binning` object for the reco information of the events.
response_binning : CartesianProductBinning
The :class:`.CartesianProductBinning` of reco and truth binning.
nuisance_indices : list of int
The truth data indices that will be handled as nuisance bins.
impossible_indices : list of int
The reco data indices that will be treated as impossible to occur.
filled_truth_indices : list of int
The data indices of truth bins that have at least one event in them.
"""
def __init__(
self,
reco_binning,
truth_binning,
nuisance_indices=None,
impossible_indices=None,
response_binning=None,
):
if nuisance_indices is None:
nuisance_indices = []
if impossible_indices is None:
impossible_indices = []
self.truth_binning = truth_binning
self.reco_binning = reco_binning
if response_binning is None:
self.response_binning = CartesianProductBinning(
[reco_binning.clone(dummy=True), truth_binning.clone(dummy=True)]
)
else:
self.response_binning = response_binning
self.nuisance_indices = nuisance_indices
self.impossible_indices = impossible_indices
self._update_filled_indices()
def _update_filled_indices(self):
"""Update the list of filled truth indices."""
self.filled_truth_indices = np.argwhere(
self.get_truth_entries_as_ndarray() > 0
).flatten()
def fill(self, *args, **kwargs):
"""Fill events into the binnings."""
self.truth_binning.fill(*args, **kwargs)
self.reco_binning.fill(*args, **kwargs)
self.response_binning.fill(*args, **kwargs)
self._update_filled_indices()
def _fix_rounding_errors(self):
"""Fix rounding errors that cause impossible matrices."""
resp = self.get_response_values_as_ndarray()
truth = self.get_truth_values_as_ndarray()
resp = resp.reshape((resp.size // truth.size, truth.size), order="C")
resp = np.sum(resp, axis=0)
diff = truth - resp
if np.any(truth < 0):
raise RuntimeError("Illegal response matrix: Negative true weight!")
if np.any(resp < 0):
raise RuntimeError(
"Illegal response matrix: Negative total reconstructed weight!"
)
if np.any(diff < -1e-9): # Allow rounding errors
raise RuntimeError(
"Illegal response matrix: Higher total reconstructed than true weight!"
)
if np.any(diff < 0.0): # But make sure truth is >= reco
fixed_truth = np.where(diff < 0, resp, truth)
self.truth_binning.set_values_from_ndarray(fixed_truth)
def fill_from_csv_file(self, *args, **kwargs):
"""Fill binnings from csv file.
See :meth:`Binning.fill_from_csv_file
<remu.binning.Binning.fill_from_csv_file>`
for a description of the parameters.
See also
--------
fill_up_truth_from_csv_file : Re-fill only truth bins from different file.
"""
Binning.fill_multiple_from_csv_file(
[self.truth_binning, self.reco_binning, self.response_binning],
*args,
**kwargs
)
self._fix_rounding_errors()
self._update_filled_indices()
def fill_up_truth_from_csv_file(self, *args, **kwargs):
"""Re-fill the truth bins with the given csv file.
This can be used to get proper efficiencies if the true signal events
are saved in a separate file from the reconstructed events.
It takes the same parameters as :meth:`fill_from_csv_file`.
Notes
-----
A new truth binning is created and filled with the events from the
provided file. Each bin is compared to the corresponding bin in the
already present truth binning. The larger value of the two is taken as
the new truth. This way, event types that are not present in the pure
truth data, e.g. background, are not affected by this. It can only
*increase* the value of the truth bins, lowering their efficiency.
For each truth bin, one of the following *must* be true for this
operation to make sense:
* All events in the migration matrix are also present in the truth
file. In this case, the additional truth events lower the
efficiency of the truth bin. This is the case, for example, if not
all true signal events are reconstructed.
* All events in the truth file are also present in the migration
matrix. In this case, the events in the truth file have no
influence on the response matrix. This is the case, for example, if
only a subset of the reconstructed background is saved in the truth
file.
If there are events in the response matrix that are not in the truth
tree *and* there are events in the truth tree that are not in the
response matrix, this method will lead to a *wrong* efficiency of the
affected truth bin.
"""
new_truth_binning = deepcopy(self.truth_binning)
new_truth_binning.reset()
new_truth_binning.fill_from_csv_file(*args, **kwargs)
return self._replace_smaller_truth(new_truth_binning)
def fill_up_truth(self, *args, **kwargs):
"""Re-fill the truth bins with the given events file.
This can be used to get proper efficiencies if the true signal events
are stored separate from the reconstructed events.
It takes the same parameters as :meth:`fill`.
Notes
-----
A new truth binning is created and filled with the events from the
provided events. Each bin is compared to the corresponding bin in the
already present truth binning. The larger value of the two is taken as
the new truth. This way, event types that are not present in the pure
truth data, e.g. background, are not affected by this. It can only
*increase* the value of the truth bins, lowering their efficiency.
For each truth bin, one of the following *must* be true for this
operation to make sense:
* All events in the migration matrix are also present in the new truth
events. In this case, the additional truth events lower the
efficiency of the truth bin. This is the case, for example, if not
all true signal events are reconstructed.
* All events in the new truth events are also present in the migration
matrix. In this case, the events in the new truth events have no
influence on the response matrix. This is the case, for example, if
only a subset of the reconstructed background is saved in the truth
file.
If there are events in the response matrix that are not in the new truth
events *and* there are events in the new truth events that are not in the
response matrix, this method will lead to a *wrong* efficiency of the
affected truth bin.
"""
new_truth_binning = deepcopy(self.truth_binning)
new_truth_binning.reset()
new_truth_binning.fill(*args, **kwargs)
return self._replace_smaller_truth(new_truth_binning)
def _replace_smaller_truth(self, new_truth_binning):
new_values = new_truth_binning.get_values_as_ndarray()
new_entries = new_truth_binning.get_entries_as_ndarray()
new_sumw2 = new_truth_binning.get_sumw2_as_ndarray()
old_values = self.truth_binning.get_values_as_ndarray()
old_entries = self.truth_binning.get_entries_as_ndarray()
old_sumw2 = self.truth_binning.get_sumw2_as_ndarray()
if np.any(new_values < 0):
i = np.argwhere(new_values < 0)
raise RuntimeError(
"Filled-up values are negative in %d bins." % (i.size,), stacklevel=3
)
where = new_values > 0
diff_v = new_values - old_values
diff_e = new_entries - old_entries
# Check for bins where the fill-up is less than the original
if np.any(where & (diff_v < -1e-9)):
i = np.argwhere(where & (diff_v < -1e-9))
warn(
"Filled-up values are less than the original filling in %d bins. This should not happen!"
% (i.size,),
stacklevel=3,
)
if np.any(where & (diff_e < 0)):
i = np.argwhere(where & (diff_e < 0))
warn(
"Filled-up entries are less than the original filling in %d bins. This should not happen!"
% (i.size,),
stacklevel=3,
)
where = where & (diff_v >= 0) & (diff_e >= 0)
self.truth_binning.set_values_from_ndarray(
|
np.where(where, new_values, old_values)
|
numpy.where
|
import numpy as np
import os
import glob
import matplotlib.pyplot as plt
from numpy.fft import fft, ifft
import random as rand
from scipy import interpolate
import NLS
import dNLS
import Dysthe as dy
import vDysthe as vdy
import dGT
import csv
from sys import exit
def createstruct(refdir,masterdir):
### Creates the system of directories necessary for the code to work and store the results
subdirs = ['Aug1Data/','Aug2Data/','JulyData/']
for sd in subdirs:
if os.path.isdir(sd+refdir)==False:
print('This is not a valid reference directory. Shutting down.')
exit()
inputpath = sd+refdir
outputpath = sd+masterdir
for dirpath, dirnames, filenames in os.walk(inputpath):
structure = os.path.join(outputpath, dirpath[len(inputpath):])
if not os.path.isdir(structure):
os.mkdir(structure)
else:
print("Folder already exits!")
def readsvals(whichset):
with open(whichset+'SpecialVals.txt') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
vals=[row for idx, row in enumerate(csv_reader) if idx in (4,5,6)]
w0 = vals[0][1]
epsilon = vals[1][1]
Del = vals[2][1]
return (float(w0),float(epsilon),float(Del))
def processnondim(masterdir,newn, L, doIplot = 'no',doIplot_chi = 'go'):
### STEP 1: Get distance information
distv = np.array([0.0,2400000.0,4200000.0,8700000.0]) # Distances between gauges in METERS
### STEP 2: Read in information at each gauge for each event
subdirs = ['Aug1Data/','Aug2Data/','JulyData/']
# Define something that will list directories that are not hidden
def listdirNH(path):
return glob.glob(os.path.join(path, '*'))
j = 0
for sd in subdirs:
whichset = sd+masterdir
files = listdirNH(sd+'Rescaled')
# Initialize some values
n = 0
pi =0
gaugenumber = 0
if doIplot_chi == 'go':
fig1,ax1 = plt.subplots(4)
# Get files
for f in files:
datavals = np.transpose(np.loadtxt(f).view(float))
N = len(datavals[1])
x = datavals[0] # Frequencies
sly = datavals[1] # Magnitudes
### STEP 3: Interpolate the data and get new y values
# Interpolation function
fnc = interpolate.interp1d(x, sly,kind ='cubic')
sly2 = fnc(x)
slymax = max(sly2)
xmaxloc = np.where(slymax==sly2)[0][0]
aa = x[0]
bb = x[-1]
# Choose the spacing of the grid based on the first maximum value
if gaugenumber == 0:
#deltaf=x[xmaxloc]*2/(newn+1) # The new spacing between points
# deltaf = 1 mc/s or 1 mHz from the Snodgrass paper.
deltaf = 1
# Create the new grid of x points based on the spacing deltaf and the location of the max
xn = np.zeros(newn)
xn[newn//2]=x[xmaxloc]
for p in range(newn//2-1):
xn[newn//2+p] = x[xmaxloc]+deltaf*(p)
xn[newn//2-p] = x[xmaxloc]-deltaf*(p)
# Restrict the x range to the x range given by the actual Snodgrass data
xfinal = np.array([])
for ixn in xn:
if np.logical_and(bb>ixn,aa<ixn):
xfinal=np.append(xfinal,ixn)
# Get the new values
ly = fnc(xfinal)*deltaf
# Find the significant wave height
#wvht = 4*np.sqrt(sum(ly))
# Adjust the units on the x axis to get to mHz
xfinal = xfinal*0.001
gaugenumber +=1
### STEP 4: Get the k vector using integer division and clean up
lenly = len(ly)
k = (xfinal)//(2*np.pi/L) # Then divide by 2pi/L (using integer division) to get the k vector
### STEP 5: Generate random phase and real and imaginary parts
randvals = np.random.rand(lenly)*2*np.pi
ascale = np.cos(randvals)*np.sqrt(ly)*0.01 # Rescale y axis
bscale = np.sin(randvals)*np.sqrt(ly)*0.01 # Rescale y axix
# Add the real and imaginary parts together
fakevals = (ascale+1j*bscale)
### STEP 6: Remove duplicate values and generate a 2-sided Hermitian spectrum for testing
ndk = np.arange(k[0],k[-1]+1)
ndy = np.zeros(len(ndk),dtype=complex)
ndk= np.append(ndk,ndk[-1]+1)
ndy = np.append(ndy,0)
pip = 0
for ele in range(len(ndk)):
if ndk[ele] in k:
ndy[ele] = fakevals[pip]
pip+=1
extrazeros = np.zeros(int(ndk[0]))
positivey = np.append(extrazeros,ndy)
negativey = np.conj(np.append(np.flip(ndy[:-1],axis=0),extrazeros[1:]))
# New y values
ynew2=np.append(positivey,negativey)
ynew2 = ynew2*len(ynew2) # Needd this to satisfy Parseval's thm
extrak = np.arange(0,ndk[0])
# New x axis values
k1 = np.append(np.append(np.append(extrak,ndk),-np.flip(ndk[:-1],axis=0)),-np.flip(extrak,axis=0))[:-1]
# Check Parseval's thm, if desired
# dft = 1/len(ynew2)*sum(np.abs(fft(ynew2))**2)
# reg = sum(np.abs(ynew2)**2)
#print(dft,reg)
# Optional plotting
if doIplot=='go':
bbb = np.real(ifft(ynew2))
timex = np.linspace(0,L,len(bbb))
f1,a=plt.subplots(3)
a[0].plot(k1,np.real(ynew2),'.')
a[1].plot(k1,np.imag(ynew2),'.')
a[2].plot(k1,ynew2*np.conj(ynew2),'.',markersize=5)
a[2].plot(k,ly*len(ynew2)/2*0.01**2*len(ynew2)/2,'.')
f1.suptitle('Period: '+ str(L) + ' s')
f1.subplots_adjust(top=0.88)
g,b=plt.subplots(3)
b[0].plot(timex,np.real(bbb))
b[1].plot(timex,np.imag(bbb))
b[2].plot(timex,np.abs(bbb))
b[2].set_ylabel('Wave Height (m)')
b[1].set_ylabel('Wave Height (m)')
b[0].set_ylabel('Wave Height (m)')
b[2].set_xlabel('Time (s)')
b[1].set_xlabel('Time (s)')
b[0].set_xlabel('Time (s)')
g.suptitle(f)
g.subplots_adjust(top=0.88)
plt.show()
### STEP 7: Locate the carrier wave
#halfy = ynew2[:len(ynew2)//2]
carrierfreqs = k1*(2*np.pi/L)
if n==0:
carriermax = max(np.abs(ynew2))
carrierloc = np.where(np.logical_and(np.abs(ynew2)>=carriermax, np.abs(ynew2)<=carriermax))[0][0]
CarFreq = k1[carrierloc]
loc = np.where(k1==CarFreq) # The location of the carrier wave
### STEP 8: Get nondimensionalization constants
g = 9.81 #(m/s^2)
if n==0:
w0 = carrierfreqs[loc] # Get the value from the integer
k0 = w0**2/g # The carrier wavenumber
m = max(np.abs(ynew2)/len(ynew2)) # a0 in the original FD paper
epsilon = 2*m*k0 # The nondimensionalization constant epsilon
# print(f,'Special Values')
# print('delta f', deltaf)
# print('period',L)
# print('Maximum value',m)
# print('Wavenumber',float(k0))
# print('Carrier frequency',float(w0))
# print('epsilon',float(epsilon))
file = open(whichset+'/SpecialVals.txt','w')
file.write('delta f, '+str(float(deltaf))+'\n')
file.write('period, '+str(float(L))+'\n')
file.write('maximum value, '+str(float(m))+'\n')
file.write('wavenumber k0, '+str(float(k0))+'\n')
file.write('carrier frequency w0, '+ str(float(w0))+'\n')
file.write('epsilon, '+str(float(epsilon))+'\n')
file.close()
n = n+1
### STEP 9: Factor out the carrier wave
# Shorten the old spectrums
yhat1 = ynew2[:len(ynew2)//2]
k_new_1 = k1[:len(ynew2)//2]
newcarrierfreqs = carrierfreqs[:len(ynew2)//2]
k_new_2 = k_new_1 - k_new_1[loc]
### STEP 10: Sum to get back to temporal space and save
# Define new t data (and a new number of data points)
NNN = 1024
tnew = np.linspace(0,L-L/NNN,NNN) # ADD TO ICTEMPORALDATA
yN = len(yhat1)
# Find B, the new surface modulating envelope
B=np.zeros(NNN)
for v in range(len(yhat1)):
newvalue = yhat1[v]/yN*np.exp(1j*(2*np.pi/L)*k_new_2[v]*tnew) # Sum a new fourier series
B = B+newvalue
# Optional plotting
if doIplot=='go':
bfig,bax = plt.subplots(2)
bax[0].plot(tnew,np.real(B))
bax[1].plot(tnew,np.imag(B))
bfig.suptitle(str(f))
bfig.subplots_adjust(top=0.88)
figg,axx = plt.subplots()
axx.set_title(f)
axx.plot(NLS.kvec(NNN),1/NNN*np.abs(fft(B)),'.',markersize=10)
axx.plot(k_new_2,1/len(yhat1)*np.abs(yhat1),'.',markersize = 5) ###################
plt.show()
#plt.close('all')
# Save the x and y values for t and B
np.savetxt(whichset + '/Processed/'+f[-10:], np.transpose(np.append(tnew,B,axis=0)).view(float))
### STEP 11: Nondimensionalize values, plot, and save results
ndB = k0/epsilon*B # new "y"
xi = w0*epsilon*tnew # new "x"
# Fix up xi so that it is periodic and has the correct number of points to match B
xi_L = (xi[-1]-xi[0])+(xi[1]-xi[0])
xi_new = np.linspace(0,xi_L-xi_L/NNN,NNN,dtype=complex)
chi = epsilon**2*k0*distv # new "t"
# Optional plotting
if doIplot_chi=='go':
ax1[pi].set_title(r'$\chi$'+' = '+str(chi[pi]))
ax1[pi].plot(xi_new,np.real(ndB))
ax1[pi].plot(xi_new,np.imag(ndB))
pi+=1
# Save the nondimensional x and y axis, chi and non dim B
np.savetxt(whichset + '/NonDim Data/ND'+f[-10:],np.append([xi_new],[ndB],axis=0).view(float))
# Save just the nondimensonal time vector
np.savetxt(whichset + '/chi.txt', chi.view(float))
# Optional plotting of all the dimensionless surfaces together
n=0
if doIplot_chi =='go':
fig1.suptitle(sd+', L = '+str(L))
fig1.tight_layout()
fig1.subplots_adjust(top=0.88)
#plt.show()
plt.savefig(whichset + '/NonDim Figs/allgauges.png',dpi=500)
def dataspecialvals(masterdir,showplots='no'):
# Define something that will list directories that are not hidden
def listdirNH(path):
return glob.glob(os.path.join(path, '*'))
### STEP 1: Read in the xi, B, and chi data
for whichset in ['Aug1Data/'+masterdir,'Aug2Data/'+masterdir,'JulyData/'+masterdir]:
# Choose the name of the file the data will be pulled from
dir = whichset+'/NonDim Data'
dirnames = listdirNH(dir)
dirlength = len(os.listdir(dir))
tvector = np.loadtxt(whichset+'/chi.txt').view(float) #times
values = {} # The x and y values at each location
for name in dirnames:
vdatavals = np.loadtxt(name).view(complex)# xi and B
title = name[-10:-4]
N = len(vdatavals[0])
# Save values to dictionaries
values[title]=np.append([vdatavals[0]],[vdatavals[1]],axis=0)
### STEP 2: Find the sideband values and the carrier wave location
# Find the sideband values to track through the rest of the program
x=values['gauge2'][0]
y=values['gauge2'][1]
# Perform an FFT of the y values
yhat =fft(y) # Fourier amplitudes
yhat1 = 1/N*np.abs(yhat) # Normalized fourier amplitudes
# Define some constants/constant vectors
L = (x[-1]-x[0])+(x[1]-x[0]) # The period
k1=NLS.kvec(N)
k=1/L*k1
# Find max Fourier amplitude and location
m = max(yhat1) # The max amplitude (m)
i = np.where(yhat1 == m)
i = i[0][0]
carrier = k[i]
# plt.plot(k,yhat1,'.')
# plt.plot(carrier,m,'.',markersize = 5)
# plt.show()
#Shift over the sideband vector so that the carrier wave is in the middle (carrier is 0 so this doesn't really matter)
sv = [] # the sideband vector
for j in range(len(yhat1)):
if yhat1[j]>0.00000000001:
sv.append(j)
for gauge in values:
tempyhat = np.abs(fft(values[gauge][1]))
# rfnc = interpolate.interp1d(k1, tempyhat,kind ='cubic') ### Need to make the curve make sense before it can be interpolated
# yhat3 = rfnc()
# Check which values are getting counted as sidebands:
plt.plot(k1,tempyhat,'.')
plt.plot(k1[sv],tempyhat[sv],'.',markersize=10)
if showplots =='yes':
plt.show()
### STEP 3: Find P, M, omega_m, omega_p, and the sidebands at each gauge location (each xi)
# Preallocate space for the data values to be stored
Pvals = np.zeros(len(tvector),dtype = float)
Mvals = np.zeros(len(tvector),dtype = float)
PMvals = np.zeros(len(tvector),dtype = float)
wpeak = np.zeros(len(tvector),dtype = float)
sideband7 = np.zeros((len(tvector),len(sv)),dtype = float)
j= 0
for n in values:
x=values[n][0]
y=values[n][1]
# Define some constants/constant vectors
L = (x[-1]-x[0])+(x[1]-x[0])
# Perform an FFT of the y values
yhat =fft(y) # Fourier amplitudes
yhat1 = 1/N*np.abs(yhat) # Normalized fourier amplitudes
# Find max Fourier amplitude and location
m = max(yhat1) # The max amplitude (m)
i = np.where(yhat1 == m)
i = i[0][-1]
carrier = k[i] # Find the location of the peak frequency
P = NLS.CQ_P1(y,L,k1) # Find P
M = NLS.CQ_M(y,L) # Find M
PM = P/M # Find omega_m
sidebands = yhat1[sv] # Find fourier amplitudes at sidebands
# plt.plot(k,yhat1,'.')
# plt.plot(k[sv],sidebands,'.',markersize = 10)
# plt.show()
Pvals[j] = np.real(P)
Mvals[j] = np.real(M)
PMvals[j] = np.real(PM)
wpeak[j] = np.real(carrier)
sideband7[j]=np.real(sidebands)
j=j+1
# Get the number on each sidebands for labeling purposes
svlabp = []
iop = 0
for gg in range(len(sv)):
if sv[gg] <N//2:
svlabp.append(iop)
iop +=1
svlabn = []
iop = 1
for gg in range(len(sv)):
if np.flip(sv,axis =0)[gg] >N//2:
svlabn.append(-iop)
iop +=1
svlab = np.append(svlabp,np.flip(svlabn,axis=0),axis=0)
np.savetxt(whichset+'/sidebandnums'+'.txt', svlab.view(int))
### STEP 4: Save the Data
dir = whichset+'/Data CQs/NonDim CQ Values/'
savedata = np.append([tvector],[Pvals],axis=0)
np.savetxt(dir+'P'+'.txt', np.transpose(savedata).view(float))
savedata = np.append([tvector],[Mvals],axis=0)
np.savetxt(dir+'M'+'.txt', np.transpose(savedata).view(float))
savedata = np.append([tvector],[PMvals],axis=0)
np.savetxt(dir+'PM'+'.txt', np.transpose(savedata).view(float))
savedata = np.append([tvector],[wpeak],axis=0)
np.savetxt(dir+'wp'+'.txt', np.transpose(savedata).view(float))
savedata = np.insert(sideband7, 0, tvector, axis=1)
np.savetxt(dir+'sb'+'.txt', savedata.view(float))
### STEP 5: Plot the values at each time chi
plotem = 1
if plotem == 1:
# Plotting vectors
fig1, ax1 = plt.subplots(2,2,figsize = (10,6.5))
fig2, ax2 = plt.subplots(len(sv),sharex=True,figsize = (10,6.5))
fig1.suptitle('Quantities of Interest',fontsize=16)
fig2.suptitle('Select Fourier Amplitudes',fontsize=16)
plotter1 = [Pvals,Mvals,PMvals,wpeak]
titles1 = ['CQ P', 'CQ M', r'$\omega_m$', r'$\omega_p$']
# CQ figure
ax1 = ax1.flatten()
for i in range(len(plotter1)):
ax1[i].plot(tvector,plotter1[i],'.',markersize =15)
ax1[i].set_title(titles1[i])
ax1[i].set_xlabel(r'$\chi$')
# Sideband figure
for k in range(len(sv)):
vp = sideband7[:,k]
ax2[k].plot(tvector,vp,'.',markersize=10)
ax2[k].set_ylabel('a'+ r'$_{'+str(svlab[k])+'}$')
# Fix configurations
fig1.tight_layout()
fig1.subplots_adjust(top=0.88)
fig2.tight_layout()
fig2.subplots_adjust(top=0.88)
if showplots =='yes':
plt.show()
# STEP 6: Get a fit of M
fitx = -2*tvector # The x vals of the fit
fity = np.log(Mvals) # The y vals of the fit
# Get the fit and define a new y vector
A = np.vstack([fitx, np.ones(len(fitx))]).T
m, b = np.linalg.lstsq(A, fity,rcond=-1)[0] # m is delta
fittedy = m*fitx+b
newy = np.exp(fittedy)
file = open(whichset+'SpecialVals.txt','r')
last = file.readlines(-1)
line =str(last[-1][:-3])
if line[0:5] != 'delta':
file.close()
file = open(whichset+'SpecialVals.txt','a')
file.write('delta, '+str(float(m))+'\n')
file.close()
# Get the error of the fit
error = np.sum(np.abs(newy-Mvals)**2)/len(newy)
# Plot results of the fit
fig3,ax3 = plt.subplots(2,1)
fig3.suptitle('August 2: Fitting ' r'$\mathcal{M}$'+' to find '+r'$\delta$')
plt.text(.7, .7,'y='+str(m)[1:7]+'x'+str(b)[1:6])
ax3[1].plot(fitx,fity,'.',label = 'Linearized values')
ax3[1].plot(fitx,fittedy, label ='Linear fit')
ax3[1].set_xlabel('-2'+r'$\chi$')
ax3[1].set_ylabel('ln('+r'$\mathcal{M}$'+')')
ax3[1].legend(loc='upper left')
ax3[1].text(-8e-05,-0.58,r'$\delta$'+' = '+str(float(m))[:9])
ax3[0].plot(tvector,Mvals,'.', label = 'Actual values')
ax3[0].plot(tvector,newy,label = 'Fit curve')
ax3[0].set_xlabel(r'$\chi$')
ax3[0].set_ylabel(r'$\mathcal{M}$')
ax3[0].legend(loc='upper right')
fig3.tight_layout()
fig3.subplots_adjust(top=0.88)
if showplots =='yes':
plt.savefig('fitM.png',dpi=500)
#plt.show()
def runsims(masterdir,num_o_times, per):
# Per controls the 3/16ths rule
# Define master directory
for whichset in ['Aug1Data/'+masterdir,'Aug2Data/'+masterdir,'JulyData/'+masterdir]:
w0,epsilon,Del = readsvals(whichset)
# Read in x and y data
IC = np.loadtxt(whichset+'NonDim Data/NDgauge2.out').view(complex)
xspace = IC[0]
u0 = IC[1]
# Read in time data
times = np.loadtxt(whichset+'chi.txt').view(float)
starttime = times[0]
stoptime = times[-1]
#num_o_times = 300
simtimes = np.linspace(starttime,stoptime,num_o_times)
# Save time data
np.savetxt(whichset+'/Simulations/SimTime.txt',simtimes.view(float))
# Set operator splitting parameters
L = xspace[-1]-xspace[0]+(xspace[1]-xspace[0])
gridnum = len(xspace)
k, expconsts = vdy.kvec(gridnum,L)
rk4steps = 1
# Perform operator splitting
r_NLS = np.zeros((num_o_times,gridnum),dtype=complex)
r_dNLS = np.zeros((num_o_times,gridnum),dtype=complex)
r_Dysthe = np.zeros((num_o_times,gridnum),dtype=complex)
r_vDysthe = np.zeros((num_o_times,gridnum),dtype=complex)
r_dGT = np.zeros((num_o_times,gridnum),dtype=complex)
r_NLS[0,:]=u0
r_dNLS[0,:]=u0
r_Dysthe[0,:]=u0
r_vDysthe[0,:]=u0
r_dGT[0,:]=u0
xyz = 0
for t in range(1,num_o_times):
steps = t*1
endtime = simtimes[t]
deltat = (endtime-starttime)/steps
#print(steps, endtime, deltat)
sim_NLS = NLS.sixth(u0,deltat,steps,expconsts,per)
sim_dNLS = dNLS.sixth(u0,deltat,steps,expconsts,Del,per)
sim_Dysthe = dy.sixth(u0,deltat,steps,rk4steps,k,expconsts,epsilon,per)
sim_vDysthe = vdy.sixth(u0,deltat,steps,rk4steps,k,expconsts,epsilon,Del,3/4)
sim_dGT = dGT.sixth(u0,deltat,steps,rk4steps,k,expconsts,epsilon,Del,per)
if np.isnan(sim_vDysthe[0]):
if xyz == 0:
os.system('say "Simulation Overflow Error"')
xyz=+1
input('Continue? Press enter.')
r_NLS[t,:]=sim_NLS
r_dNLS[t,:]=sim_dNLS
r_Dysthe[t,:]=sim_Dysthe
r_vDysthe[t,:]=sim_vDysthe
r_dGT[t,:]=sim_dGT
#print('vdysthe',np.sum(abs(sim_dGT-sim_vDysthe)**2))
#print('dnls',np.sum(abs(sim_dGT-sim_dNLS)**2))
# Save data
for s in range(num_o_times):
if s < 10:
ss = '00'+str(s)
elif 9<s<100:
ss='0'+str(s)
else:
ss = str(s)
np.savetxt(whichset+'Simulations/NLS Sim/simNLS'+ss+'.txt',r_NLS[s].view(float))
np.savetxt(whichset+'Simulations/dNLS Sim/simdNLS'+ss+'.txt',r_dNLS[s].view(float))
np.savetxt(whichset+'Simulations/Dysthe Sim/simDysthe'+ss+'.txt',r_Dysthe[s].view(float))
np.savetxt(whichset+'Simulations/vDysthe Sim/simvDysthe'+ss+'.txt',r_vDysthe[s].view(float))
np.savetxt(whichset+'Simulations/dGT Sim/simdGT'+ss+'.txt',r_dGT[s].view(float))
def simspecialvals(masterdir):
# Define something that will list directories that are not hidden
def listdirNH(path):
return glob.glob(os.path.join(path, '*'))
### STEP 1: Load in simulation data
# Load time values
for whichset in ['Aug1Data/'+masterdir,'Aug2Data/'+masterdir,'JulyData/'+masterdir]:
tvector = np.loadtxt(whichset+'Simulations/SimTime.txt').view(float)
# Choose the name of the file the data will be pulled from
masterdir1 = whichset+'Simulations/'
dir = ['dGT Sim','dNLS Sim', 'Dysthe Sim','NLS Sim','vDysthe Sim']
NLSd = {}
dNLSd = {}
Dysthed = {}
vDysthed = {}
dGTd = {}
Dictionaries = [dGTd,dNLSd,Dysthed,NLSd,vDysthed] # Alphabetized
# Read in the intital data
IC = np.loadtxt(whichset+'NonDim Data/NDgauge2.out').view(complex)
x = IC[0]
y = IC[1]
h = 0
for m in dir:
dict = Dictionaries[h]
dirnames = listdirNH(masterdir1+m)
dirlength = len(dirnames)
kk = 0
for name in dirnames:
if os.path.isfile(name) == True:
title = tvector[kk]
vdatavals = np.loadtxt(name).view(complex)
N = len(vdatavals)
dict[title]=np.append([x],[vdatavals],axis=0)
kk=kk+1
h=h+1
### STEP 2: Find the sideband values and the carrier wave location
# Perform an FFT of the y values
yhat =fft(y) # Fourier amplitudes
yhat1 = 1/N*np.abs(yhat) # Normalized fourier amplitudes
# Define some constants/constant vectors
L = x[-1]-x[0]+(x[1]-x[0])
k=NLS.kvec(N)
#sv = np.array([-3,-2,-1,0,1,2,3]) # The sideband vector
sv = [] # the sideband vector
for j in range(len(yhat1)):
if yhat1[j]>0.00000000001:
sv.append(j)
lll = len(sv)
# Find max Fourier amplitude and location
mt = max(yhat1) # The max amplitude (m)
i = np.where(yhat1 == mt)[0][0] ################################
carrier = 1/L*k[i]
### STEP 3: Find P, M, omega_m, omega_p, and the sidebands at each gauge location (each xi)
NLSCQ = {}
dNLSCQ = {}
DystheCQ = {}
vDystheCQ = {}
dGTCQ = {}
CQDict = [dGTCQ,dNLSCQ, DystheCQ, NLSCQ, vDystheCQ]
keys = ['P', 'M', 'PM', 'wp', 'sb']
dname = ['dGT CQ','dNLS CQ','Dysthe CQ','NLS CQ','vDysthe CQ']
cid = 0
for dict in Dictionaries:
Pvals = np.zeros(len(tvector))
Mvals = np.zeros(len(tvector))
PMvals = np.zeros(len(tvector))
wpeak = np.zeros(len(tvector))
sideband7 = np.zeros((len(tvector),lll))
j=0
CQs = CQDict[cid]
for n in dict:
x=dict[n][0]
y=dict[n][1]
# Perform an FFT of the y values
yhat =fft(y) # Fourier amplitudes
yhat1 = 1/N*np.abs(yhat) # Normalized fourier amplitudes
# Find max Fourier amplitude and location
m = max(yhat1) # The max amplitude (m)
i = np.where(yhat1 == m)
carrier = 1/L*k[i] # omega_p
P = NLS.CQ_P1(y,L,k) #P
M = NLS.CQ_M(y,L) # M
PM = P/M # omega_m
sidebands = yhat1[sv] # sidebands
Pvals[j] = np.real(P)
Mvals[j] = np.real(M)
PMvals[j] = np.real(PM)
wpeak[j] = np.real(carrier[0])
sideband7[j]=sidebands
j=j+1
valuevect = [Pvals,Mvals,PMvals,wpeak,sideband7]
for val in range(len(keys)):
CQs[keys[val]] = valuevect[val]
# Get the number on each sideband for later labeling purposes
svlabp = []
iop = 0
for gg in range(len(sv)):
if sv[gg] <N//2:
svlabp.append(iop)
iop +=1
svlabn = []
iop = 1
for gg in range(len(sv)):
if np.flip(sv,axis =0)[gg] >N//2:
svlabn.append(-iop)
iop +=1
svlab = np.append(svlabp,np.flip(svlabn,axis=0),axis=0)
### STEP 4: Plot the results
plotem = 1
if plotem == 1:
tn = dname[cid]
# Plotting vectors
fig1, ax1 = plt.subplots(2,2,figsize = (10,6.5))
fig1.suptitle(tn+' Quantities of Interest',fontsize=16)
plotter1 = [Pvals,Mvals,PMvals,wpeak]
titles1 = ['CQ P', 'CQ M', r'$\omega_m$', r'$\omega_p$']
ax1 = ax1.flatten()
for i in range(len(plotter1)):
ax1[i].plot(tvector,plotter1[i])
ax1[i].set_title(titles1[i])
ax1[i].set_xlabel(r'$\chi$')
fig1.tight_layout()
fig1.subplots_adjust(top=0.88)
plt.savefig(whichset+'Simulations/CQ Figs/CQ '+tn+'.png')
fig2, ax2 = plt.subplots(lll,sharex=True,figsize = (7,1.625*lll))
fig2.suptitle(tn+' Select Fourier Amplitudes',fontsize=16)
for po in range(lll):
vp = sideband7[:,po]
ax2[po].plot(tvector,vp)
ax2[po].set_ylabel('a'+ r'$_{'+str(svlab[po])+'}$')
fig2.tight_layout()
fig2.subplots_adjust(top=0.97)
plt.savefig(whichset+'Simulations/CQ Figs/Sidebands '+tn+'.png')
cid = cid +1
plt.close('all')
### STEP 5: Save the Data
dval = 0
for dict in CQDict:
dctnm = dname[dval]
m=0
for cons in dict:
filename = whichset+'Simulations/Conserved Quantities/'+dctnm+'/'+cons+'.txt'
consval = dict[cons]
if consval.shape==tvector.shape:
savedata = np.append([tvector],[consval],axis=0)
np.savetxt(filename, np.transpose(savedata).view(float))
else:
savedata = np.insert(consval, 0, tvector, axis=1)
np.savetxt(filename, savedata.view(float))
m=m+1
dval = dval +1
def redim(masterdir):
# Define something that will list directories that are not hidden
def listdirNH(path):
return glob.glob(os.path.join(path, '*'))
### STEP 1: READ IN THE EXPERIMENTAL DATA FILES
for whichset in ['Aug1Data/'+masterdir,'Aug2Data/'+masterdir,'JulyData/'+masterdir]:
# Define the dictionaries
P = {}
M = {}
PM = {}
sb = {}
wp = {}
MasterDict = [M,P,PM,sb,wp]
# Start reading in the data
dir = whichset+'Data CQs/NonDim CQ Values'
files = listdirNH(dir)
key1 = 'Data'
l = 0
for k in files:
Dict = MasterDict[l]
data = np.transpose(np.loadtxt(k).view(float))
Dict[key1]=data
l=l+1
### STEP 2: READ IN THE SIMULATION DATA
dir = whichset+'Simulations/Conserved Quantities'
key2 = ['dGT CQ','dNLS CQ', 'Dysthe CQ', 'NLS CQ', 'vDysthe CQ']
dk = 0
for subdir in key2:
files = listdirNH(dir+'/'+subdir)
l = 0
for k in files:
Dict = MasterDict[l]
data = np.transpose(np.loadtxt(k).view(float))
Dict[key2[dk]]=data
l=l+1
dk = dk+1
### STEP 3: DIMENSIONALIZE THE DATA
# Define dimensionalization constants
g = 9.81
w0,epsilon,Del = readsvals(whichset)
k0 = w0**2/g
# Dim P
dim_P = {}
for key in P:
ent = P[key]
xi = ent[0]
p = ent[1]
x = xi/(epsilon**2*k0)
dim_p = epsilon**3*w0/k0**2*p
dim_P[key] =
|
np.append([x],[dim_p],axis = 0)
|
numpy.append
|
"""
Code for Phase Retrieval Experiments (Section 6.3) of the paper:
Convex-Concave Backtracking for Inertial Bregman Proximal Gradient
Algorithms in Non-Convex Optimization
Paper authors: <NAME>, <NAME>,
<NAME> and <NAME>.
Other related codes at:
https://github.com/mmahesh/cocain-bpg-escapes-spurious-stationary-points
https://github.com/mmahesh/cocain-bpg-matrix-factorization
Algorithms Implemented:
BPG: Bregman Proximal Gradient
CoCaIn BPG: Convex Concave Inertial (CoCaIn) BPG
BPG-WB: BPG with Backtracking
IBPM-LS: Inexact Bregman Proximal Minimization Line Search Algorithm
References:
CoCaIn BPG paper: https://arxiv.org/abs/1904.03537
BPG paper: https://arxiv.org/abs/1706.06461
IBPM-LS paper: https://arxiv.org/abs/1707.02278
Contact: <NAME> (<EMAIL>)
"""
import numpy as np
from my_functions import *
import argparse
parser = argparse.ArgumentParser(description='Phase Retrieval Experiments')
parser.add_argument('--lam', '--regularization-parameter',
default=1e-1, type=float, dest='lam')
parser.add_argument('--algo', '--algorithm', default=1, type=int, dest='algo')
parser.add_argument('--max_iter', '--max_iter',
default=1000, type=int, dest='max_iter')
parser.add_argument('--fun_num', '--fun_num', default=1,
type=int, dest='fun_num')
parser.add_argument('--abs_fun_num', '--abs_fun_num',
default=1, type=int, dest='abs_fun_num')
parser.add_argument('--breg_num', '--breg_num',
default=1, type=int, dest='breg_num')
args = parser.parse_args()
# some backward compatibility and initialization
lam = args.lam
algo = args.algo
fun_num = args.fun_num
abs_fun_num = args.abs_fun_num
breg_num = args.breg_num
# for forward backward splitting only
np.random.seed(0)
max_iter = 1000
dim = 10 # dimension 10
temp_Alist = []
temp_blist = []
global_L = 0
for i in range(100):
temp_A = np.random.rand(dim, 1)
A = temp_A*temp_A.T
temp_Alist = temp_Alist + [A]
temp_b = np.random.rand(1)[0]
global_L = global_L + 3*(np.linalg.norm(A)**2) + \
np.linalg.norm(A)*abs(temp_b)
temp_blist = temp_blist + [temp_b]
A = temp_Alist
b = temp_blist
if fun_num == 1:
# fun_num = 1 for L1 Regularization
del_val = 0.15
eps_val = 0.00001
uL_est = 10
lL_est = 1e-4*uL_est
init_U = uL_est
U = np.ones(dim)
lam = 1e-1
prev_U = U
if fun_num == 2:
# fun_num = 2 for L2 Regularization
del_val = 0.15
eps_val = 0.00001
uL_est = 10
lL_est = 1e-4*uL_est
U = np.ones(dim)
init_U = uL_est
lam = 1e-1
prev_U = U
lL_est_main = lL_est
def prox_onto_cube(x):
return np.maximum(-1.0,np.minimum(1.0,x))
def prox_L1(x, tau):
return np.maximum(0, np.abs(x)-tau)*np.sign(x)
def prox_squared_L2(x, tau):
return x/(1+tau)
def make_update_new(y, grad, uL_est, option=0):
# option=0 => L1 Regularization
# option=1 => L2 Regularization
# Bregman Proximal Mapping with L1 Regularization
# Probably requires PDHG algorithm/ADMM (and depends on its parameters)
# global variable lam is used.
# y is actually x^k
# Subproblem objective
# TODO: Check once more
def internal_objective(x,y, tau):
# Model around y
return abs_func(A, b, x, y, lam, abs_fun_num=abs_fun_num, fun_num=fun_num)\
+ ((0.5/tau)*breg(x, y, breg_num=breg_num))
# Constructing K.
# TODO: Here I am using 100 as default setting make it general
K = np.zeros((len(y), 100)) # TODO: Should be K^T
temp_b_2 = np.zeros_like(b) # tilde b
count = 0
for item in A:
temp_x_val = 2*item.dot(y)*(y.T.dot(item.dot(y)) - b[count])
K[:, count] = temp_x_val
temp_b_2[count] = (y.T.dot(item.dot(y)) - b[count])**2 - \
((temp_x_val.dot(y))*(y.T.dot(item.dot(y)) - b[count]))
count+=1
K = K.T
# param value
L_pdhg = np.linalg.norm(K)
tau = 0.1/L_pdhg
sigma = 0.99/(L_pdhg*0.1)
c_1 = tau*uL_est*1.1
c_2 = c_1 + 1
# primal variables
x_1 = y.copy()
x_hat = x_1.copy()
# dual variables
p = np.zeros_like(b)
max_sub_iter = 1000
for iter in range(max_sub_iter):
def del_val(x,y):
del_val = c_1*((np.linalg.norm(x)**2)+1)*x + y
return del_val
# dual update step
vec_2 = sigma*K.dot(2*x_1 - x_hat)
p = prox_onto_cube(p+vec_2+sigma*temp_b_2)
x_hat = x_1.copy()
if option == 0:
# primal update step
c_3 = del_val(x_1 - tau*K.T.dot(p), y)
c_3 = prox_L1(c_3, lam*tau)
temp_pnorm = np.linalg.norm(c_3)**2
coeff = [temp_pnorm*c_1, 0, c_2, -1]
temp_y = np.roots(coeff)[-1].real
x_1 = temp_y*c_3
else:
x_1 = prox_squared_L2(0.5*(x_1 - tau*K.T.dot(p)) + 0.5*(y), lam*tau)
# print('Objective ' + str(internal_objective(x_1, y, (0.9/uL_est))) +
# ' tau ' + str((0.9/uL_est)))
# TODO: Internal objective not giving zero objective when
return x_1
def make_update(y, grad, uL_est):
# Bregman Proximal Mapping with L1 Regularization
temp_p = (1/uL_est)*grad - (y.T.dot(y))*y - y
temp_p = np.maximum(0, np.abs(temp_p)-lam*(1/uL_est))*np.sign(-temp_p)
temp_pnorm = np.linalg.norm(temp_p)**2
coeff = [temp_pnorm, 0, 1, -1]
temp_y = np.roots(coeff)[-1].real
print('temp_y L1 is ' + str(temp_y))
return temp_y*temp_p
def make_update1(y, grad, uL_est):
# Bregman Proximal Mapping with L2 Regularization
temp_p = (1/uL_est)*grad - (y.T.dot(y))*y - y
temp_pnorm = np.linalg.norm(temp_p)**2
coeff = [temp_pnorm, 0, (2*lam*(1/uL_est) + 1), 1]
temp_y = np.roots(coeff)[-1].real
print('temp_y L2 is '+ str(temp_y))
return temp_y*temp_p
def find_gamma(A, b, U, prev_U, uL_est, lL_est):
# Finding the inertial parameter gamma
gamma = 1
kappa = (del_val - eps_val)*(uL_est/(uL_est+lL_est))
y_U = U + gamma*(U-prev_U)
while (kappa*breg(prev_U, U, breg_num=breg_num, A=A, b=b, lam=lam) \
< breg(U, y_U, breg_num=breg_num, A=A, b=b, lam=lam)):
gamma = gamma*0.9
y_U = U + gamma*(U-prev_U)
return y_U, gamma
# def find_closed_gamma(A, b, U, prev_U, uL_est, lL_est):
# # Finding the inertial parameter gamma
# kappa = (del_val - eps_val)*(uL_est/(uL_est+lL_est))
# Delta_val = np.linalg.norm(U-prev_U)**2
# print(Delta_val)
# if Delta_val <0:
# y_U = U
# gamma = 0
# else:
# temp_var = (1.5*Delta_val + (7/4) )*(np.linalg.norm(U)**2)
# gamma = np.sqrt(kappa*breg(prev_U, U,\
# breg_num=breg_num, A=A, b=b, lam=lam)/temp_var)
# y_U = U + gamma*(U-prev_U)
# return y_U, gamma
def find_closed_gamma(A, b, U, prev_U, uL_est, lL_est):
# Finding the inertial parameter gamma
kappa = (del_val - eps_val)*(uL_est/(uL_est+lL_est))
Delta_val = np.linalg.norm(U-prev_U)**2
print(Delta_val)
if Delta_val <=0:
y_U = U
gamma = 0
else:
temp_var = (3*(np.linalg.norm(U)**2) + (7/2) )*Delta_val
gamma = np.sqrt(kappa*breg(prev_U, U,\
breg_num=breg_num, A=A, b=b, lam=lam)/temp_var)
y_U = U + gamma*(U-prev_U)
return y_U, gamma
def do_lb_search(A, b, U, U1, lam, uL_est, lL_est, closed_form=0):
# Lower bound backtracking
if closed_form==0:
y_U, gamma = find_gamma(A, b, U, U1, uL_est, lL_est)
else:
y_U, gamma = find_closed_gamma(A, b, U, U1, uL_est, lL_est)
while((abs_func(A, b, U, y_U, lam, abs_fun_num=abs_fun_num, fun_num=fun_num)
- main_func(A, b, U, lam, fun_num=fun_num)
- (lL_est*breg(U, y_U,breg_num=breg_num, A=A, b=b, lam=lam))) > 1e-7):
lL_est = (2)*lL_est
if closed_form==0:
y_U, gamma = find_gamma(A, b, U, U1, uL_est, lL_est)
else:
y_U, gamma = find_closed_gamma(A, b, U, U1, uL_est, lL_est)
return lL_est, y_U, gamma
def do_ub_search(A, b, y_U, uL_est):
# compute gradients
grad_u = grad(A, b, y_U, lam, fun_num=fun_num)
# make update step
if fun_num == 1:
x_U = make_update(y_U, grad_u, uL_est)
elif fun_num == 2:
x_U = make_update1(y_U, grad_u, uL_est)
else:
raise
delta_new = (abs_func(A, b, x_U, y_U, lam, abs_fun_num=abs_fun_num, fun_num=fun_num)
- main_func(A, b, x_U, lam, fun_num=fun_num)
+ (uL_est*breg(x_U, y_U, breg_num=breg_num, A=A, b=b, lam=lam)))
print('Delta is ' + str(delta_new))
while((delta_new < -1e-7)) :
delta_prev = delta_new
delta_new = (abs_func(A, b, x_U, y_U, lam, abs_fun_num=abs_fun_num, fun_num=fun_num)
- main_func(A, b, x_U, lam, fun_num=fun_num)
+ (uL_est*breg(x_U, y_U, breg_num=breg_num, A=A, b=b, lam=lam)))
print('Delta is '+ str(delta_new))
uL_est = (2)*uL_est
print('uL_est is '+ str(uL_est))
# make update step
if fun_num == 1:
x_U = make_update(y_U, grad_u, uL_est)
elif fun_num == 2:
x_U = make_update1(y_U, grad_u, uL_est)
else:
raise
return uL_est, x_U
def obtain_delta(A, b, y_U, uL_est):
grad_u = grad(A, b, y_U, lam, fun_num=fun_num)
if fun_num == 1:
tx_U = make_update(y_U, grad_u, uL_est)
elif fun_num == 2:
tx_U = make_update1(y_U, grad_u, uL_est)
else:
raise
temp_delta = (abs_func(A, b, tx_U, y_U, lam, \
abs_fun_num=abs_fun_num, fun_num=fun_num)
- main_func(A, b, y_U, lam, fun_num=fun_num)
+ (uL_est*breg(tx_U, y_U, breg_num=breg_num)))
return temp_delta, tx_U
def line_search(y_U):
gm = 0.001
eta = 0.001
# here some gm, eta values can be unstable towards the end
# requires some tuning
# the above values work fine
delta, tx_U = obtain_delta(A, b, y_U, uL_est)
x_U = y_U + eta*(tx_U - y_U)
while(main_func(A, b, x_U, lam, fun_num=fun_num) \
- main_func(A, b, y_U, lam, fun_num=fun_num) \
- (eta*gm*delta) > 1e-7) and (delta > 0):
eta = eta*0.1
x_U = y_U + eta*(tx_U - y_U)
return x_U
if algo == 1:
# Implementation of CoCaIn BPG
gamma_vals = [0]
uL_est_vals = [uL_est]
lL_est_vals = [lL_est]
temp = main_func(A, b, U, lam, fun_num=fun_num)
print('temp is ' + str(temp))
func_vals = [temp]
lyapunov_vals = [temp]
U_vals = [init_U]
# U2_vals = []
import time
time_vals = np.zeros(max_iter+1)
time_vals[0] = 0
for i in range(max_iter):
st_time = time.time()
lL_est, y_U, gamma = do_lb_search(
A, b, U, prev_U, lam, uL_est, lL_est=lL_est_main)
prev_U = U
print("doing Lb " + str(lL_est))
print("doing Ub " + str(uL_est))
temp_ulest = uL_est
uL_est, U = do_ub_search(A, b, y_U, uL_est)
# print('funct value at '+ str(i) + ' is ')
print(main_func(A, b, U, lam, fun_num=fun_num))
uL_est_vals = uL_est_vals + [uL_est]
lL_est_vals = lL_est_vals + [lL_est]
gamma_vals = gamma_vals + [gamma]
U_vals = U_vals + [U]
temp = main_func(A, b, U, lam, fun_num=fun_num)
if
|
np.isnan(temp)
|
numpy.isnan
|
'''
Unit test for the high level vds interface for eiger
https://support.hdfgroup.org/HDF5/docNewFeatures/VDS/HDF5-VDS-requirements-use-cases-2014-12-10.pdf
'''
import numpy as np
from numpy.testing import assert_array_equal
import os
import os.path as osp
import shutil
import tempfile
import h5py as h5
from ..common import ut
from ..._hl.vds import vds_support
@ut.skipUnless(vds_support,
'VDS requires HDF5 >= 1.9.233')
class TestEigerHighLevel(ut.TestCase):
def setUp(self):
self.working_dir = tempfile.mkdtemp()
self.fname = ['raw_file_1.h5', 'raw_file_2.h5', 'raw_file_3.h5']
for k, outfile in enumerate(self.fname):
filename = osp.join(self.working_dir, outfile)
f = h5.File(filename, 'w')
f['data'] = np.ones((20, 200, 200)) * k
f.close()
f = h5.File(osp.join(self.working_dir, 'raw_file_4.h5'), 'w')
f['data'] = np.ones((18, 200, 200)) * 3
self.fname.append('raw_file_4.h5')
self.fname = [osp.join(self.working_dir, ix) for ix in self.fname]
f.close()
def test_eiger_high_level(self):
outfile = osp.join(self.working_dir, 'eiger.h5')
layout = h5.VirtualLayout(shape=(78, 200, 200), dtype=float)
M_minus_1 = 0
# Create the virtual dataset file
with h5.File(outfile, 'w', libver='latest') as f:
for foo in self.fname:
in_data = h5.File(foo, 'r')['data']
src_shape = in_data.shape
in_data.file.close()
M = M_minus_1 + src_shape[0]
vsource = h5.VirtualSource(foo, 'data', shape=src_shape)
layout[M_minus_1:M, :, :] = vsource
M_minus_1 = M
f.create_virtual_dataset('data', layout, fillvalue=45)
f = h5.File(outfile, 'r')['data']
self.assertEqual(f[10, 100, 10], 0.0)
self.assertEqual(f[30, 100, 100], 1.0)
self.assertEqual(f[50, 100, 100], 2.0)
self.assertEqual(f[70, 100, 100], 3.0)
f.file.close()
def tearDown(self):
shutil.rmtree(self.working_dir)
'''
Unit test for the high level vds interface for excalibur
https://support.hdfgroup.org/HDF5/docNewFeatures/VDS/HDF5-VDS-requirements-use-cases-2014-12-10.pdf
'''
class ExcaliburData(object):
FEM_PIXELS_PER_CHIP_X = 256
FEM_PIXELS_PER_CHIP_Y = 256
FEM_CHIPS_PER_STRIPE_X = 8
FEM_CHIPS_PER_STRIPE_Y = 1
FEM_STRIPES_PER_MODULE = 2
@property
def sensor_module_dimensions(self):
x_pixels = self.FEM_PIXELS_PER_CHIP_X * self.FEM_CHIPS_PER_STRIPE_X
y_pixels = self.FEM_PIXELS_PER_CHIP_Y * self.FEM_CHIPS_PER_STRIPE_Y * self.FEM_STRIPES_PER_MODULE
return y_pixels, x_pixels,
@property
def fem_stripe_dimensions(self):
x_pixels = self.FEM_PIXELS_PER_CHIP_X * self.FEM_CHIPS_PER_STRIPE_X
y_pixels = self.FEM_PIXELS_PER_CHIP_Y * self.FEM_CHIPS_PER_STRIPE_Y
return y_pixels, x_pixels,
def generate_sensor_module_image(self, value, dtype='uint16'):
dset = np.empty(shape=self.sensor_module_dimensions, dtype=dtype)
dset.fill(value)
return dset
def generate_fem_stripe_image(self, value, dtype='uint16'):
dset = np.empty(shape=self.fem_stripe_dimensions, dtype=dtype)
dset.fill(value)
return dset
@ut.skipUnless(vds_support,
'VDS requires HDF5 >= 1.9.233')
class TestExcaliburHighLevel(ut.TestCase):
def create_excalibur_fem_stripe_datafile(self, fname, nframes, excalibur_data,scale):
shape = (nframes,) + excalibur_data.fem_stripe_dimensions
max_shape = shape#(None,) + excalibur_data.fem_stripe_dimensions
chunk = (1,) + excalibur_data.fem_stripe_dimensions
with h5.File(fname, 'w', libver='latest') as f:
dset = f.create_dataset('data', shape=shape, maxshape=max_shape, chunks=chunk, dtype='uint16')
for data_value_index in np.arange(nframes):
dset[data_value_index] = excalibur_data.generate_fem_stripe_image(data_value_index*scale)
def setUp(self):
self.working_dir = tempfile.mkdtemp()
self.fname = ["stripe_%d.h5" % stripe for stripe in range(1,7)]
self.fname = [osp.join(self.working_dir, f) for f in self.fname]
nframes = 5
self.edata = ExcaliburData()
for k, raw_file in enumerate(self.fname):
self.create_excalibur_fem_stripe_datafile(raw_file, nframes, self.edata,k)
def test_excalibur_high_level(self):
outfile = osp.join(self.working_dir, 'excalibur.h5')
f = h5.File(outfile,'w',libver='latest') # create an output file.
in_key = 'data' # where is the data at the input?
in_sh = h5.File(self.fname[0],'r')[in_key].shape # get the input shape
dtype = h5.File(self.fname[0],'r')[in_key].dtype # get the datatype
# now generate the output shape
vertical_gap = 10 # pixels spacing in the vertical
nfiles = len(self.fname)
nframes = in_sh[0]
width = in_sh[2]
height = (in_sh[1]*nfiles) + (vertical_gap*(nfiles-1))
out_sh = (nframes, height, width)
# Virtual layout is a representation of the output dataset
layout = h5.VirtualLayout(shape=out_sh, dtype=dtype)
offset = 0 # initial offset
for i, filename in enumerate(self.fname):
# A representation of the input dataset
vsource = h5.VirtualSource(filename, in_key, shape=in_sh)
layout[:, offset:(offset + in_sh[1]), :] = vsource # map them with indexing
offset += in_sh[1] + vertical_gap # increment the offset
# pass the fill value and list of maps
f.create_virtual_dataset('data', layout, fillvalue=0x1)
f.close()
f = h5.File(outfile,'r')['data']
self.assertEqual(f[3,100,0], 0.0)
self.assertEqual(f[3,260,0], 1.0)
self.assertEqual(f[3,350,0], 3.0)
self.assertEqual(f[3,650,0], 6.0)
self.assertEqual(f[3,900,0], 9.0)
self.assertEqual(f[3,1150,0], 12.0)
self.assertEqual(f[3,1450,0], 15.0)
f.file.close()
def tearDown(self):
shutil.rmtree(self.working_dir)
'''
Unit test for the high level vds interface for percival
https://support.hdfgroup.org/HDF5/docNewFeatures/VDS/HDF5-VDS-requirements-use-cases-2014-12-10.pdf
'''
@ut.skipUnless(vds_support,
'VDS requires HDF5 >= 1.9.233')
class TestPercivalHighLevel(ut.TestCase):
def setUp(self):
self.working_dir = tempfile.mkdtemp()
self.fname = ['raw_file_1.h5','raw_file_2.h5','raw_file_3.h5']
k = 0
for outfile in self.fname:
filename = osp.join(self.working_dir, outfile)
f = h5.File(filename,'w')
f['data'] = np.ones((20,200,200))*k
k +=1
f.close()
f = h5.File(osp.join(self.working_dir, 'raw_file_4.h5'), 'w')
f['data'] = np.ones((19,200,200))*3
self.fname.append('raw_file_4.h5')
self.fname = [osp.join(self.working_dir, ix) for ix in self.fname]
f.close()
def test_percival_high_level(self):
outfile = osp.join(self.working_dir, 'percival.h5')
# Virtual layout is a representation of the output dataset
layout = h5.VirtualLayout(shape=(79, 200, 200), dtype=np.float)
for k, filename in enumerate(self.fname):
dim1 = 19 if k == 3 else 20
vsource = h5.VirtualSource(filename, 'data',shape=(dim1, 200, 200))
layout[k:79:4, :, :] = vsource[:, :, :]
# Create the virtual dataset file
with h5.File(outfile, 'w', libver='latest') as f:
f.create_virtual_dataset('data', layout, fillvalue=-5)
foo = np.array(2 * list(range(4)))
with h5.File(outfile,'r') as f:
ds = f['data']
line = ds[:8,100,100]
self.assertEqual(ds.shape, (79,200,200),)
assert_array_equal(line, foo)
def test_percival_source_from_dataset(self):
outfile = osp.join(self.working_dir, 'percival.h5')
# Virtual layout is a representation of the output dataset
layout = h5.VirtualLayout(shape=(79, 200, 200), dtype=np.float)
for k, filename in enumerate(self.fname):
with h5.File(filename, 'r') as f:
vsource = h5.VirtualSource(f['data'])
layout[k:79:4, :, :] = vsource
# Create the virtual dataset file
with h5.File(outfile, 'w', libver='latest') as f:
f.create_virtual_dataset('data', layout, fillvalue=-5)
foo = np.array(2 * list(range(4)))
with h5.File(outfile,'r') as f:
ds = f['data']
line = ds[:8,100,100]
self.assertEqual(ds.shape, (79,200,200),)
assert_array_equal(line, foo)
def tearDown(self):
shutil.rmtree(self.working_dir)
@ut.skipUnless(vds_support,
'VDS requires HDF5 >= 1.9.233')
class SlicingTestCase(ut.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
# Create source files (1.h5 to 4.h5)
for n in range(1, 5):
with h5.File(osp.join(self.tmpdir, '{}.h5'.format(n)), 'w') as f:
d = f.create_dataset('data', (100,), 'i4')
d[:] = np.arange(100) + n
def make_virtual_ds(self):
# Assemble virtual dataset
layout = h5.VirtualLayout((4, 100), 'i4', maxshape=(4, None))
for n in range(1, 5):
filename = osp.join(self.tmpdir, "{}.h5".format(n))
vsource = h5.VirtualSource(filename, 'data', shape=(100,))
# Fill the first half with positions 0, 2, 4... from the source
layout[n - 1, :50] = vsource[0:100:2]
# Fill the second half with places 1, 3, 5... from the source
layout[n - 1, 50:] = vsource[1:100:2]
outfile = osp.join(self.tmpdir, 'VDS.h5')
# Add virtual dataset to output file
with h5.File(outfile, 'w', libver='latest') as f:
f.create_virtual_dataset('/group/data', layout, fillvalue=-5)
return outfile
def test_slice_source(self):
outfile = self.make_virtual_ds()
with h5.File(outfile, 'r') as f:
assert_array_equal(f['/group/data'][0][:3], [1, 3, 5])
assert_array_equal(f['/group/data'][0][50:53], [2, 4, 6])
assert_array_equal(f['/group/data'][3][:3], [4, 6, 8])
assert_array_equal(f['/group/data'][3][50:53], [5, 7, 9])
def test_inspection(self):
with h5.File(osp.join(self.tmpdir, '1.h5'), 'r') as f:
assert not f['data'].is_virtual
outfile = self.make_virtual_ds()
with h5.File(outfile, 'r') as f:
ds = f['/group/data']
assert ds.is_virtual
src_files = {osp.join(self.tmpdir, '{}.h5'.format(n))
for n in range(1, 5)}
assert {s.file_name for s in ds.virtual_sources()} == src_files
def tearDown(self):
shutil.rmtree(self.tmpdir)
@ut.skipUnless(vds_support,
'VDS requires HDF5 >= 1.9.233')
class IndexingTestCase(ut.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
# Create source file (1.h5)
with h5.File(osp.join(self.tmpdir, '1.h5'), 'w') as f:
d = f.create_dataset('data', (10,), 'i4')
d[:] = np.arange(10)*10
def test_index_layout(self):
# Assemble virtual dataset (indexing target)
layout = h5.VirtualLayout((100,), 'i4')
inds = [3,6,20,25,33,47,70,75,96,98]
filename = osp.join(self.tmpdir, "1.h5")
vsource = h5.VirtualSource(filename, 'data', shape=(10,))
layout[inds] = vsource
outfile = osp.join(self.tmpdir, 'VDS.h5')
# Assembly virtual dataset (indexing source)
layout2 = h5.VirtualLayout((6,), 'i4')
inds2 = [0,1,4,5,8]
layout2[1:] = vsource[inds2]
# Add virtual datasets to output file and close
with h5.File(outfile, 'w', libver='latest') as f:
f.create_virtual_dataset('/data', layout, fillvalue=-5)
f.create_virtual_dataset('/data2', layout2, fillvalue=-3)
# Read data from virtual datasets
with h5.File(outfile, 'r') as f:
data = f['/data'][()]
data2 = f['/data2'][()]
# Verify
assert_array_equal(data[inds], np.arange(10)*10)
assert_array_equal(data2[1:], [0,10,40,50,80])
mask = np.zeros(100)
mask[inds] = 1
self.assertEqual(data[mask == 0].min(), -5)
self.assertEqual(data[mask == 0].max(), -5)
self.assertEqual(data2[0], -3)
def tearDown(self):
shutil.rmtree(self.tmpdir)
@ut.skipUnless(vds_support,
'VDS requires HDF5 >= 1.9.233')
class RelativeLinkTestCase(ut.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.f1 = osp.join(self.tmpdir, 'testfile1.h5')
self.f2 = osp.join(self.tmpdir, 'testfile2.h5')
self.data1 = np.arange(10)
self.data2 = np.arange(10) * -1
with h5.File(self.f1, 'w') as f:
# dataset
ds = f.create_dataset('data', (10,), 'f4')
ds[:] = self.data1
with h5.File(self.f2, 'w') as f:
# dataset
ds = f.create_dataset('data', (10,), 'f4')
ds[:] = self.data2
# virtual dataset
layout = h5.VirtualLayout((2, 10), 'f4')
vsource1 = h5.VirtualSource(self.f1, 'data', shape=(10,))
vsource2 = h5.VirtualSource(self.f2, 'data', shape=(10,))
layout[0] = vsource1
layout[1] = vsource2
f.create_virtual_dataset('virtual', layout)
def test_relative_vds(self):
with h5.File(self.f2) as f:
data = f['virtual'][:]
assert (data[0] == self.data1).all()
assert (data[1] == self.data2).all()
# move f2 -> f3
f3 = osp.join(self.tmpdir, 'testfile3.h5')
os.rename(self.f2, f3)
with h5.File(f3) as f:
data = f['virtual'][:]
assert data.dtype == 'f4'
assert (data[0] == self.data1).all()
assert (data[1] == self.data2).all()
# moving other file
f4 = osp.join(self.tmpdir, 'testfile4.h5')
os.rename(self.f1, f4)
with h5.File(f3) as f:
data = f['virtual'][:]
assert data.dtype == 'f4'
# unavailable data is silently converted to default value
assert (data[0] == 0).all()
assert (data[1] == self.data2).all()
def tearDown(self):
shutil.rmtree(self.tmpdir)
@ut.skipUnless(vds_support,
'VDS requires HDF5 >= 1.9.233')
class VDSUnlimitedTestCase(ut.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.path = osp.join(self.tmpdir, "resize.h5")
with h5.File(self.path, "w") as f:
source_dset = f.create_dataset(
"source",
data=np.arange(20),
shape=(10, 2),
maxshape=(None, 2),
chunks=(10, 1),
fillvalue=-1
)
self.layout = h5.VirtualLayout((10, 1), np.int, maxshape=(None, 1))
layout_source = h5.VirtualSource(source_dset)
self.layout[:h5.UNLIMITED, 0] = layout_source[:h5.UNLIMITED, 1]
f.create_virtual_dataset("virtual", self.layout)
def test_unlimited_axis(self):
comp1 = np.arange(1, 20, 2).reshape(10, 1)
comp2 = np.vstack((
comp1,
|
np.full(shape=(10, 1), fill_value=-1)
|
numpy.full
|
def read():
import sys
return sys.stdin.buffer.read()
def read_ints():
*ints, = map(
int,
read().split(),
)
return ints
class Algebra:
...
class Modular(Algebra):
def __init__(
self,
mod=10**9+7,
**kwargs):
super(Modular, self).__init__(
**kwargs,
)
self.mod = mod
def inverse(self, n: int):
p = self.mod
return self.pow(n, p-2)
def cumprod(self, a):
import numpy as np
l = len(a)
n = int(np.sqrt(l) + 1)
a =
|
np.resize(a, (n, n))
|
numpy.resize
|
"""Thermal grid models module."""
import itertools
from multimethod import multimethod
import numpy as np
import pandas as pd
import scipy.constants
import scipy.sparse as sp
import scipy.sparse.linalg
import typing
import mesmo.config
import mesmo.data_interface
import mesmo.der_models
import mesmo.solutions
import mesmo.utils
logger = mesmo.config.get_logger(__name__)
class ThermalGridModel(mesmo.utils.ObjectBase):
"""Thermal grid model object."""
timesteps: pd.Index
node_names: pd.Index
line_names: pd.Index
der_names: pd.Index
der_types: pd.Index
nodes: pd.Index
branches: pd.Index
branch_loops: pd.Index
ders: pd.Index
branch_incidence_1_matrix: sp.spmatrix
branch_incidence_2_matrix: sp.spmatrix
branch_incidence_matrix: sp.spmatrix
branch_incidence_matrix_no_source_no_loop: sp.spmatrix
branch_incidence_matrix_no_source_loop: sp.spmatrix
branch_loop_incidence_matrix: sp.spmatrix
der_node_incidence_matrix: sp.spmatrix
der_thermal_power_vector_reference: np.ndarray
branch_flow_vector_reference: np.ndarray
node_head_vector_reference: np.ndarray
# TODO: Revise / reduce use of parameter attributes if possible.
line_parameters: pd.DataFrame
energy_transfer_station_head_loss: float
enthalpy_difference_distribution_water: float
distribution_pump_efficiency: float
source_der_model: mesmo.der_models.DERModel
plant_efficiency: float
def __init__(self, scenario_name: str):
# Obtain thermal grid data.
thermal_grid_data = mesmo.data_interface.ThermalGridData(scenario_name)
# Obtain index set for time steps.
# - This is needed for optimization problem definitions within linear thermal grid models.
self.timesteps = thermal_grid_data.scenario_data.timesteps
# Obtain node / line / DER names.
self.node_names = pd.Index(thermal_grid_data.thermal_grid_nodes["node_name"])
self.line_names = pd.Index(thermal_grid_data.thermal_grid_lines["line_name"])
self.der_names = pd.Index(thermal_grid_data.thermal_grid_ders["der_name"])
self.der_types = pd.Index(thermal_grid_data.thermal_grid_ders["der_type"]).unique()
# Obtain node / branch / DER index set.
nodes = pd.concat(
[
thermal_grid_data.thermal_grid_nodes.loc[:, "node_name"]
.apply(
# Obtain `node_type` column.
lambda value: "source"
if value == thermal_grid_data.thermal_grid.at["source_node_name"]
else "no_source"
)
.rename("node_type"),
thermal_grid_data.thermal_grid_nodes.loc[:, "node_name"],
],
axis="columns",
)
self.nodes = pd.MultiIndex.from_frame(nodes)
self.branches = pd.MultiIndex.from_product([self.line_names, ["no_loop"]], names=["branch_name", "loop_type"])
self.branch_loops = pd.MultiIndex.from_tuples([], names=["loop_id", "branch_name"]) # Values are filled below.
self.ders = pd.MultiIndex.from_frame(thermal_grid_data.thermal_grid_ders[["der_type", "der_name"]])
# Instantiate branch-to-node incidence matrices.
self.branch_incidence_1_matrix = sp.dok_matrix((len(self.branches), len(self.nodes)), dtype=int)
self.branch_incidence_2_matrix = sp.dok_matrix((len(self.branches), len(self.nodes)), dtype=int)
# Add lines to branch incidence matrices and identify any loops.
# - Uses temporary node tree variable to track construction of the network and identify any loops / cycles.
branches_loops = self.branches.to_frame()
node_trees = []
for line_index, line in thermal_grid_data.thermal_grid_lines.iterrows():
# Obtain indexes for positioning the line in the incidence matrices.
node_index_1 = mesmo.utils.get_index(self.nodes, node_name=line["node_1_name"])
node_index_2 = mesmo.utils.get_index(self.nodes, node_name=line["node_2_name"])
branch_index = mesmo.utils.get_index(self.branches, branch_name=line["line_name"])
# Insert connection indicators into incidence matrices.
self.branch_incidence_1_matrix[np.ix_(branch_index, node_index_1)] += 1
self.branch_incidence_2_matrix[np.ix_(branch_index, node_index_2)] += 1
# Check if node 1 or node 2 are in any node trees.
node_tree_index_1 = None
node_tree_index_2 = None
for node_tree_index, node_tree in enumerate(node_trees):
if line["node_1_name"] in node_tree:
node_tree_index_1 = node_tree_index
if line["node_2_name"] in node_tree:
node_tree_index_2 = node_tree_index
if (node_tree_index_1 is None) and (node_tree_index_2 is None):
# Create new tree, if neither node is on any tree.
node_trees.append([line["node_1_name"], line["node_2_name"]])
elif (node_tree_index_1 is not None) and (node_tree_index_2 is None):
# Add node to tree, if other node is on any tree.
node_trees[node_tree_index_1].append(line["node_2_name"])
elif (node_tree_index_1 is None) and (node_tree_index_2 is not None):
# Add node to tree, if other node is on any tree.
node_trees[node_tree_index_2].append(line["node_1_name"])
else:
if node_tree_index_1 == node_tree_index_2:
# Mark branch as loop, if both nodes are in the same tree.
branches_loops.at[self.branches[branch_index], "loop_type"] = "loop"
else:
# Merge trees, if the branch connects nodes on different trees.
node_trees[node_tree_index_1].extend(node_trees[node_tree_index_2])
node_trees[node_tree_index_2] = []
# Update branch / loop indexes.
self.branches = pd.MultiIndex.from_frame(branches_loops)
self.branch_loops = pd.MultiIndex.from_frame(
pd.concat(
[
pd.Series(range(sum(branches_loops.loc[:, "loop_type"] == "loop")), name="loop_id", dtype=int),
branches_loops.loc[branches_loops.loc[:, "loop_type"] == "loop", "branch_name"].reset_index(
drop=True
),
],
axis="columns",
)
)
# Raise errors on invalid network configurations.
node_trees = [node_tree for node_tree in node_trees if len(node_tree) > 0]
if len(node_trees) > 1:
raise ValueError(
f"The thermal grid contains disjoint sections of nodes:"
+ "".join(
[
f"\nSection {node_tree_index}: {node_tree}"
for node_tree_index, node_tree in enumerate(node_trees)
]
)
)
elif len(node_trees[0]) != len(self.node_names):
raise ValueError(
f"The thermal grid contains disconnected nodes:\n"
f"{[node_name for node_name in self.node_names if node_name not in node_trees[0]]}"
)
# Obtained combined branch incidence matrix.
self.branch_incidence_matrix = self.branch_incidence_1_matrix - self.branch_incidence_2_matrix
# Convert DOK matrices to CSR matrices.
self.branch_incidence_1_matrix = self.branch_incidence_1_matrix.tocsr()
self.branch_incidence_2_matrix = self.branch_incidence_2_matrix.tocsr()
self.branch_incidence_matrix = self.branch_incidence_matrix.tocsr()
# Obtain shorthand definitions.
self.branch_incidence_matrix_no_source_no_loop = self.branch_incidence_matrix[
np.ix_(
mesmo.utils.get_index(self.branches, loop_type="no_loop"),
mesmo.utils.get_index(self.nodes, node_type="no_source"),
)
]
self.branch_incidence_matrix_no_source_loop = self.branch_incidence_matrix[
np.ix_(
mesmo.utils.get_index(self.branches, loop_type="loop", raise_empty_index_error=False),
mesmo.utils.get_index(self.nodes, node_type="no_source"),
)
]
# Obtain branch-to-loop incidence matrix.
self.branch_loop_incidence_matrix = sp.vstack(
[
-1.0 * sp.linalg.inv(self.branch_incidence_matrix_no_source_no_loop.transpose())
# Using `sp.linalg.inv()` instead of `sp.linalg.spsolve()` to preserve dimensions in all cases.
@ self.branch_incidence_matrix_no_source_loop.transpose(),
sp.eye(len(self.branch_loops)),
]
).tocsr()
# Instantiate DER-to-node incidence matrix.
self.der_node_incidence_matrix = sp.dok_matrix((len(self.nodes), len(self.ders)), dtype=int)
# Add DERs into DER incidence matrix.
for der_name, der in thermal_grid_data.thermal_grid_ders.iterrows():
# Obtain indexes for positioning the DER in the incidence matrix.
node_index = mesmo.utils.get_index(self.nodes, node_name=der["node_name"])
der_index = mesmo.utils.get_index(self.ders, der_name=der["der_name"])
# Insert connection indicator into incidence matrices.
self.der_node_incidence_matrix[node_index, der_index] = 1
# Convert DOK matrices to CSR matrices.
self.der_node_incidence_matrix = self.der_node_incidence_matrix.tocsr()
# Obtain DER nominal thermal power vector.
self.der_thermal_power_vector_reference = thermal_grid_data.thermal_grid_ders.loc[
:, "thermal_power_nominal"
].values
# Obtain nominal branch flow vector.
self.branch_flow_vector_reference = (
np.pi
* (thermal_grid_data.thermal_grid_lines.loc[:, "diameter"].values / 2) ** 2
* thermal_grid_data.thermal_grid_lines.loc[:, "maximum_velocity"].values
)
# Obtain nominal branch flow vector.
# TODO: Define proper node head reference vector.
self.node_head_vector_reference = np.ones(len(self.nodes))
# Obtain line parameters.
self.line_parameters = thermal_grid_data.thermal_grid_lines.loc[:, ["length", "diameter", "absolute_roughness"]]
# Obtain other system parameters.
self.energy_transfer_station_head_loss = float(
thermal_grid_data.thermal_grid["energy_transfer_station_head_loss"]
)
self.enthalpy_difference_distribution_water = float(
thermal_grid_data.thermal_grid["enthalpy_difference_distribution_water"]
)
self.distribution_pump_efficiency = float(thermal_grid_data.thermal_grid["distribution_pump_efficiency"])
# Obtain DER model source node.
# TODO: Use state space model for simulation / optimization.
self.source_der_model = mesmo.der_models.make_der_model(
thermal_grid_data.thermal_grid.at["source_der_model_name"], thermal_grid_data.der_data, is_standalone=True
)
# TODO: Remove temporary workaround: Obtain efficiency factors.
if thermal_grid_data.thermal_grid.at["source_der_type"] == "cooling_plant":
self.plant_efficiency = self.source_der_model.cooling_plant_efficiency
elif thermal_grid_data.thermal_grid.at["source_der_type"] == "heating_plant":
self.plant_efficiency = self.source_der_model.thermal_efficiency
else:
raise ValueError(f"Incompatible der model type: {thermal_grid_data.thermal_grid.at['source_der_type']}")
# Define shorthands for no-source / source variables.
# TODO: Add in class documentation.
# TODO: Replace local variables in power flow / linear models.
node_incidence_matrix = sp.identity(len(self.nodes)).tocsr()
self.node_incidence_matrix_no_source = node_incidence_matrix[
np.ix_(range(len(self.nodes)), mesmo.utils.get_index(self.nodes, node_type="no_source"))
]
self.node_incidence_matrix_source = node_incidence_matrix[
np.ix_(range(len(self.nodes)), mesmo.utils.get_index(self.nodes, node_type="source"))
]
self.der_node_incidence_matrix_no_source = self.der_node_incidence_matrix[
np.ix_(mesmo.utils.get_index(self.nodes, node_type="no_source"), range(len(self.ders)))
]
self.branch_incidence_matrix_no_source = self.branch_incidence_matrix[
np.ix_(range(len(self.branches)), mesmo.utils.get_index(self.nodes, node_type="no_source"))
]
self.branch_incidence_matrix_source = self.branch_incidence_matrix[
np.ix_(range(len(self.branches)), mesmo.utils.get_index(self.nodes, node_type="source"))
]
self.node_head_vector_reference_no_source = self.node_head_vector_reference[
mesmo.utils.get_index(self.nodes, node_type="no_source")
]
self.node_head_vector_reference_source = self.node_head_vector_reference[
mesmo.utils.get_index(self.nodes, node_type="source")
]
def get_branch_loss_coefficient_vector(self, branch_flow_vector: np.ndarray):
# Obtain branch velocity vector.
branch_velocity_vector = (
4.0 * branch_flow_vector / (np.pi * self.line_parameters.loc[:, "diameter"].values ** 2)
)
# Obtain branch Reynolds coefficient vector.
branch_reynold_vector = (
np.abs(branch_velocity_vector)
* self.line_parameters.loc[:, "diameter"].values
/ mesmo.config.water_kinematic_viscosity
)
# Obtain branch friction factor vector.
@np.vectorize
def branch_friction_factor_vector(reynold, absolute_roughness, diameter):
# No flow.
if reynold == 0:
friction_factor = 0
# Laminar Flow, based on Hagen-Poiseuille velocity profile, analytical correlation.
elif 0 < reynold < 4000:
friction_factor = 64 / reynold
# Turbulent flow, Swamee-Jain formula, approximating correlation of Colebrook-White equation.
elif 4000 <= reynold:
if not (reynold <= 100000000 and 0.000001 <= ((absolute_roughness / 1000) / diameter) <= 0.01):
logger.warning(
"Exceeding validity range of Swamee-Jain formula for calculation of friction factor."
)
friction_factor = (
1.325 / (np.log((absolute_roughness / 1000) / (3.7 * diameter) + 5.74 / (reynold**0.9))) ** 2
)
else:
raise ValueError(f"Invalid Reynolds coefficient: {reynold}")
# Convert from 1/m to 1/km.
friction_factor *= 1.0e3
return friction_factor
# Obtain branch head loss coefficient vector.
branch_loss_coefficient_vector = (
branch_friction_factor_vector(
branch_reynold_vector,
self.line_parameters.loc[:, "absolute_roughness"].values,
self.line_parameters.loc[:, "diameter"].values,
)
* 8.0
* self.line_parameters.loc[:, "length"].values
/ (
mesmo.config.gravitational_acceleration
* self.line_parameters.loc[:, "diameter"].values ** 5
* np.pi**2
)
)
return branch_loss_coefficient_vector
class ThermalGridDEROperationResults(mesmo.utils.ResultsBase):
der_thermal_power_vector: pd.DataFrame
der_thermal_power_vector_per_unit: pd.DataFrame
class ThermalGridOperationResults(ThermalGridDEROperationResults):
thermal_grid_model: ThermalGridModel
node_head_vector: pd.DataFrame
node_head_vector_per_unit: pd.DataFrame
branch_flow_vector: pd.DataFrame
branch_flow_vector_per_unit: pd.DataFrame
pump_power: pd.DataFrame
class ThermalGridDLMPResults(mesmo.utils.ResultsBase):
thermal_grid_energy_dlmp_node_thermal_power: pd.DataFrame
thermal_grid_head_dlmp_node_thermal_power: pd.DataFrame
thermal_grid_congestion_dlmp_node_thermal_power: pd.DataFrame
thermal_grid_pump_dlmp_node_thermal_power: pd.DataFrame
thermal_grid_total_dlmp_node_thermal_power: pd.DataFrame
thermal_grid_energy_dlmp_der_thermal_power: pd.DataFrame
thermal_grid_head_dlmp_der_thermal_power: pd.DataFrame
thermal_grid_congestion_dlmp_der_thermal_power: pd.DataFrame
thermal_grid_pump_dlmp_der_thermal_power: pd.DataFrame
thermal_grid_total_dlmp_der_thermal_power: pd.DataFrame
thermal_grid_total_dlmp_price_timeseries: pd.DataFrame
class ThermalPowerFlowSolutionBase(mesmo.utils.ObjectBase):
"""Thermal grid power flow solution object."""
der_thermal_power_vector: np.ndarray
node_head_vector: np.ndarray
branch_flow_vector: np.ndarray
pump_power: float
@multimethod
def __init__(self, scenario_name: str):
# Obtain thermal grid model.
thermal_grid_model = ThermalGridModel(scenario_name)
self.__init__(thermal_grid_model)
@multimethod
def __init__(self, thermal_grid_model: ThermalGridModel):
# Obtain DER thermal power vector.
der_thermal_power_vector = thermal_grid_model.der_thermal_power_vector_reference
self.__init__(thermal_grid_model, der_thermal_power_vector)
@multimethod
def __init__(self, thermal_grid_model: ThermalGridModel, der_thermal_power_vector: np.ndarray):
raise NotImplementedError
class ThermalPowerFlowSolutionExplicit(ThermalPowerFlowSolutionBase):
# Enable calls to `__init__` method definitions in parent class.
@multimethod
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@multimethod
def __init__(self, thermal_grid_model: ThermalGridModel, der_thermal_power_vector: np.ndarray):
# Obtain DER thermal power vector.
self.der_thermal_power_vector = der_thermal_power_vector.ravel()
# Define shorthand for DER volume flow vector.
der_flow_vector = (
self.der_thermal_power_vector
/ mesmo.config.water_density
/ thermal_grid_model.enthalpy_difference_distribution_water
)
# Obtain branch volume flow vector.
self.branch_flow_vector = (
scipy.sparse.linalg.spsolve(
thermal_grid_model.branch_incidence_matrix[
:, mesmo.utils.get_index(thermal_grid_model.nodes, node_type="no_source")
].transpose(),
thermal_grid_model.der_node_incidence_matrix[
mesmo.utils.get_index(thermal_grid_model.nodes, node_type="no_source"), :
]
@ np.transpose([der_flow_vector]),
)
).ravel()
# Obtain node head vector.
self.node_head_vector = np.zeros(len(thermal_grid_model.nodes), dtype=float)
self.node_head_vector[
mesmo.utils.get_index(thermal_grid_model.nodes, node_type="no_source")
] = scipy.sparse.linalg.spsolve(
thermal_grid_model.branch_incidence_matrix[
:, mesmo.utils.get_index(thermal_grid_model.nodes, node_type="no_source")
].tocsc(),
(
thermal_grid_model.get_branch_loss_coefficient_vector(self.branch_flow_vector)
* self.branch_flow_vector
* np.abs(self.branch_flow_vector) # TODO: Check if absolute value needed.
),
)
# Obtain pump power loss.
self.pump_power = (
(2.0 * np.max(np.abs(self.node_head_vector)) + thermal_grid_model.energy_transfer_station_head_loss)
* -1.0
* np.sum(der_flow_vector) # Source volume flow.
* mesmo.config.water_density
* mesmo.config.gravitational_acceleration
/ thermal_grid_model.distribution_pump_efficiency
)
class ThermalPowerFlowSolutionNewtonRaphson(ThermalPowerFlowSolutionBase):
# Enable calls to `__init__` method definitions in parent class.
@multimethod
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@multimethod
def __init__(
self,
thermal_grid_model: ThermalGridModel,
der_thermal_power_vector: np.ndarray,
head_iteration_limit=100,
head_tolerance=1e-2,
):
# Obtain DER thermal power vector.
self.der_thermal_power_vector = der_thermal_power_vector.ravel()
# Define shorthand for DER volume flow vector.
der_flow_vector = (
self.der_thermal_power_vector
/ mesmo.config.water_density
/ thermal_grid_model.enthalpy_difference_distribution_water
)
# Obtain nodal power vector.
node_flow_vector_no_source = (
thermal_grid_model.der_node_incidence_matrix_no_source @ np.transpose([der_thermal_power_vector])
).ravel()
# Obtain initial nodal power and voltage vectors, assuming no load and no injection.
# TODO: Enable passing previous solution for initialization.
node_flow_vector_initial_no_source = np.zeros(node_flow_vector_no_source.shape)
node_head_vector_initial_no_source = thermal_grid_model.node_head_vector_reference_no_source.copy()
branch_flow_vector_initial = thermal_grid_model.branch_flow_vector_reference.copy()
branch_loss_coefficient_vector_initial = thermal_grid_model.get_branch_loss_coefficient_vector(
branch_flow_vector_initial
)
# Define nodal power vector candidate to the desired nodal power vector.
node_flow_vector_candidate_no_source = node_flow_vector_initial_no_source.copy()
# Instantiate Newton-Raphson iteration variables.
head_iteration = 0
head_change = np.inf
# Run Newton-Raphson iterations.
while (head_iteration < head_iteration_limit) & (head_change > head_tolerance):
node_head_vector_estimate_no_source = scipy.sparse.linalg.spsolve(
(
np.transpose(thermal_grid_model.branch_incidence_matrix_no_source)
@ (
0.5
* sp.diags(branch_flow_vector_initial**-1)
@ sp.diags(branch_loss_coefficient_vector_initial**-1)
)
@ thermal_grid_model.branch_incidence_matrix_no_source
),
(
np.transpose(thermal_grid_model.branch_incidence_matrix_no_source)
@ (0.5 * (branch_flow_vector_initial**-1))
- np.transpose(thermal_grid_model.branch_incidence_matrix_no_source)
@ (
0.5
* sp.diags(branch_flow_vector_initial**-1)
@ sp.diags(branch_loss_coefficient_vector_initial**-1)
)
@ thermal_grid_model.branch_incidence_matrix_source
@ thermal_grid_model.node_head_vector_reference_source
+ node_flow_vector_candidate_no_source
),
)
node_head_vector_estimate = (
thermal_grid_model.node_incidence_matrix_no_source @ node_head_vector_estimate_no_source
+ thermal_grid_model.node_incidence_matrix_source @ thermal_grid_model.node_head_vector_reference_source
)
branch_flow_vector_estimate = (
0.5 * branch_flow_vector_initial
- (
0.5
* sp.diags(branch_flow_vector_initial**-1)
@ sp.diags(branch_loss_coefficient_vector_initial**-1)
)
@ thermal_grid_model.branch_incidence_matrix
@ node_head_vector_estimate
)
head_change = np.max(np.abs(node_head_vector_estimate_no_source - node_head_vector_initial_no_source))
node_head_vector_initial_no_source = node_head_vector_estimate_no_source.copy()
branch_flow_vector_initial = branch_flow_vector_estimate.copy()
branch_loss_coefficient_vector_initial = thermal_grid_model.get_branch_loss_coefficient_vector(
branch_flow_vector_initial
)
head_iteration += 1
# For fixed-point algorithm, reaching the iteration limit is considered undesired and triggers a warning
if head_iteration >= head_iteration_limit:
logger.warning(
"Newton-Raphson solution algorithm reached " f"maximum limit of {head_iteration_limit} iterations."
)
# Obtain node head vector.
self.node_head_vector = node_head_vector_estimate
# Obtain branch volume flow vector.
self.branch_flow_vector = branch_flow_vector_estimate
# Obtain pump power loss.
self.pump_power = (
(2.0 * np.max(np.abs(self.node_head_vector)) + thermal_grid_model.energy_transfer_station_head_loss)
* -1.0
* np.sum(der_flow_vector) # Source volume flow.
* mesmo.config.water_density
* mesmo.config.gravitational_acceleration
/ thermal_grid_model.distribution_pump_efficiency
)
class ThermalPowerFlowSolution(ThermalPowerFlowSolutionBase):
"""Thermal grid power flow solution object."""
# Enable calls to `__init__` method definitions in parent class.
@multimethod
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@multimethod
def __init__(self, thermal_grid_model: ThermalGridModel, der_thermal_power_vector: np.ndarray):
# Select power flow solution method, depending on whether network is radial or meshed.
if len(thermal_grid_model.branch_loops) == 0:
# Use explicit thermal power flow solution method.
ThermalPowerFlowSolutionExplicit.__init__(self, thermal_grid_model, der_thermal_power_vector)
else:
raise NotImplementedError("Thermal power flow solution for meshed networks has not yet been implemented.")
class ThermalPowerFlowSolutionSet(mesmo.utils.ObjectBase):
power_flow_solutions: typing.Dict[pd.Timestamp, ThermalPowerFlowSolution]
thermal_grid_model: ThermalGridModel
der_thermal_power_vector: pd.DataFrame
timesteps: pd.Index
@multimethod
def __init__(
self, thermal_grid_model: ThermalGridModel, der_operation_results: ThermalGridDEROperationResults, **kwargs
):
der_thermal_power_vector = der_operation_results.der_thermal_power_vector
self.__init__(thermal_grid_model, der_thermal_power_vector, **kwargs)
@multimethod
def __init__(
self,
thermal_grid_model: ThermalGridModel,
der_thermal_power_vector: pd.DataFrame,
power_flow_solution_method=ThermalPowerFlowSolution,
):
# Store attributes.
self.thermal_grid_model = thermal_grid_model
self.der_thermal_power_vector = der_thermal_power_vector
self.timesteps = self.thermal_grid_model.timesteps
# Obtain power flow solutions.
power_flow_solutions = mesmo.utils.starmap(
power_flow_solution_method, zip(itertools.repeat(self.thermal_grid_model), der_thermal_power_vector.values)
)
self.power_flow_solutions = dict(zip(self.timesteps, power_flow_solutions))
def get_results(self) -> ThermalGridOperationResults:
raise NotImplementedError
class LinearThermalGridModel(mesmo.utils.ObjectBase):
"""Linear thermal grid model object."""
thermal_grid_model: ThermalGridModel
thermal_power_flow_solution: ThermalPowerFlowSolution
sensitivity_branch_flow_by_node_power: sp.spmatrix
sensitivity_branch_flow_by_der_power: sp.spmatrix
sensitivity_node_head_by_node_power: sp.spmatrix
sensitivity_node_head_by_der_power: sp.spmatrix
sensitivity_pump_power_by_node_power: np.array
sensitivity_pump_power_by_der_power: np.array
@multimethod
def __init__(
self,
scenario_name: str,
):
# Obtain thermal grid model.
thermal_grid_model = ThermalGridModel(scenario_name)
# Obtain DER power vector.
der_thermal_power_vector = thermal_grid_model.der_thermal_power_vector_reference
# Obtain thermal power flow solution.
thermal_power_flow_solution = ThermalPowerFlowSolution(thermal_grid_model, der_thermal_power_vector)
self.__init__(thermal_grid_model, thermal_power_flow_solution)
@multimethod
def __init__(
self,
thermal_grid_model: ThermalGridModel,
thermal_power_flow_solution: ThermalPowerFlowSolution,
):
# Store thermal grid model.
self.thermal_grid_model = thermal_grid_model
# Store thermal power flow solution.
self.thermal_power_flow_solution = thermal_power_flow_solution
# Obtain inverse / transpose incidence matrices.
node_index_no_source = mesmo.utils.get_index(
self.thermal_grid_model.nodes, node_type="no_source"
) # Define shorthand.
branch_node_incidence_matrix_inverse = sp.dok_matrix(
(len(self.thermal_grid_model.branches), len(self.thermal_grid_model.nodes)), dtype=float
)
branch_node_incidence_matrix_inverse = sp.dok_matrix(
(len(self.thermal_grid_model.branches), len(self.thermal_grid_model.nodes)), dtype=float
)
branch_node_incidence_matrix_inverse[
np.ix_(range(len(self.thermal_grid_model.branches)), node_index_no_source)
] = scipy.sparse.linalg.inv(
self.thermal_grid_model.branch_incidence_matrix[:, node_index_no_source].transpose()
)
branch_node_incidence_matrix_inverse = branch_node_incidence_matrix_inverse.tocsr()
branch_node_incidence_matrix_transpose_inverse = sp.dok_matrix(
(len(self.thermal_grid_model.nodes), len(self.thermal_grid_model.branches)), dtype=float
)
branch_node_incidence_matrix_transpose_inverse[
np.ix_(node_index_no_source, range(len(self.thermal_grid_model.branches)))
] = scipy.sparse.linalg.inv(self.thermal_grid_model.branch_incidence_matrix[:, node_index_no_source].tocsc())
branch_node_incidence_matrix_transpose_inverse = branch_node_incidence_matrix_transpose_inverse.tocsr()
der_node_incidence_matrix_transpose = np.transpose(self.thermal_grid_model.der_node_incidence_matrix)
# Obtain sensitivity matrices.
self.sensitivity_node_power_by_der_power = self.thermal_grid_model.der_node_incidence_matrix
self.sensitivity_branch_flow_by_node_power = (
branch_node_incidence_matrix_inverse
/ mesmo.config.water_density
/ self.thermal_grid_model.enthalpy_difference_distribution_water
)
self.sensitivity_branch_flow_by_der_power = (
self.sensitivity_branch_flow_by_node_power @ self.sensitivity_node_power_by_der_power
)
self.sensitivity_node_head_by_node_power = (
branch_node_incidence_matrix_transpose_inverse
@ sp.diags(
np.abs(thermal_power_flow_solution.branch_flow_vector)
* thermal_grid_model.get_branch_loss_coefficient_vector(thermal_power_flow_solution.branch_flow_vector)
)
@ self.sensitivity_branch_flow_by_node_power
)
self.sensitivity_node_head_by_der_power = (
self.sensitivity_node_head_by_node_power @ self.sensitivity_node_power_by_der_power
)
self.sensitivity_pump_power_by_node_power = (
(
-1.0
* thermal_power_flow_solution.der_thermal_power_vector
/ mesmo.config.water_density
/ thermal_grid_model.enthalpy_difference_distribution_water
) # DER volume flow vector.
@ (-2.0 * der_node_incidence_matrix_transpose)
@ self.sensitivity_node_head_by_node_power
* mesmo.config.water_density
* mesmo.config.gravitational_acceleration
/ self.thermal_grid_model.distribution_pump_efficiency
) + (
-1.0
* self.thermal_grid_model.energy_transfer_station_head_loss
* mesmo.config.gravitational_acceleration
/ self.thermal_grid_model.enthalpy_difference_distribution_water
/ self.thermal_grid_model.distribution_pump_efficiency
)
self.sensitivity_pump_power_by_der_power = np.array(
[self.sensitivity_pump_power_by_node_power @ self.sensitivity_node_power_by_der_power]
)
# TODO: Split global / local approximation methods.
LinearThermalGridModelGlobal = LinearThermalGridModel
class LinearThermalGridModelSet(mesmo.utils.ObjectBase):
linear_thermal_grid_models: typing.Dict[pd.Timestamp, LinearThermalGridModel]
thermal_grid_model: ThermalGridModel
timesteps: pd.Index
@multimethod
def __init__(
self,
thermal_grid_model: ThermalGridModel,
thermal_power_flow_solution_set: ThermalPowerFlowSolutionSet,
linear_thermal_grid_model_method: typing.Type[LinearThermalGridModel] = LinearThermalGridModelGlobal,
):
self.check_linear_thermal_grid_model_method(linear_thermal_grid_model_method)
# Obtain linear thermal grid models.
linear_thermal_grid_models = mesmo.utils.starmap(
linear_thermal_grid_model_method,
zip(itertools.repeat(thermal_grid_model), thermal_power_flow_solution_set.power_flow_solutions.values()),
)
linear_thermal_grid_models = dict(zip(thermal_grid_model.timesteps, linear_thermal_grid_models))
self.__init__(thermal_grid_model, linear_thermal_grid_models)
@multimethod
def __init__(
self,
thermal_grid_model: ThermalGridModel,
thermal_power_flow_solution: ThermalPowerFlowSolution,
linear_thermal_grid_model_method: typing.Type[LinearThermalGridModel] = LinearThermalGridModelGlobal,
):
self.check_linear_thermal_grid_model_method(linear_thermal_grid_model_method)
# Obtain linear thermal grid models.
linear_thermal_grid_model = LinearThermalGridModelGlobal(thermal_grid_model, thermal_power_flow_solution)
linear_thermal_grid_models = dict(
zip(thermal_grid_model.timesteps, itertools.repeat(linear_thermal_grid_model))
)
self.__init__(thermal_grid_model, linear_thermal_grid_models)
@multimethod
def __init__(
self,
thermal_grid_model: ThermalGridModel,
linear_thermal_grid_models: typing.Dict[pd.Timestamp, LinearThermalGridModel],
):
# Store attributes.
self.thermal_grid_model = thermal_grid_model
self.timesteps = self.thermal_grid_model.timesteps
self.linear_thermal_grid_models = linear_thermal_grid_models
@staticmethod
def check_linear_thermal_grid_model_method(linear_thermal_grid_model_method):
if not issubclass(linear_thermal_grid_model_method, LinearThermalGridModel):
raise ValueError(f"Invalid linear thermal grid model method: {linear_thermal_grid_model_method}")
def define_optimization_problem(
self,
optimization_problem: mesmo.solutions.OptimizationProblem,
price_data: mesmo.data_interface.PriceData,
scenarios: typing.Union[list, pd.Index] = None,
**kwargs,
):
# Defined optimization problem definitions through respective sub-methods.
self.define_optimization_variables(optimization_problem, scenarios=scenarios)
self.define_optimization_parameters(optimization_problem, price_data, scenarios=scenarios, **kwargs)
self.define_optimization_constraints(optimization_problem, scenarios=scenarios)
self.define_optimization_objective(optimization_problem, scenarios=scenarios)
def define_optimization_variables(
self, optimization_problem: mesmo.solutions.OptimizationProblem, scenarios: typing.Union[list, pd.Index] = None
):
# If no scenarios given, obtain default value.
if scenarios is None:
scenarios = [None]
# Define DER power vector variables.
optimization_problem.define_variable(
"der_thermal_power_vector", scenario=scenarios, timestep=self.timesteps, der=self.thermal_grid_model.ders
)
# Define node head, branch flow and pump power variables.
optimization_problem.define_variable(
"node_head_vector", scenario=scenarios, timestep=self.timesteps, node=self.thermal_grid_model.nodes
)
optimization_problem.define_variable(
"branch_flow_vector", scenario=scenarios, timestep=self.timesteps, branch=self.thermal_grid_model.branches
)
optimization_problem.define_variable("pump_power", scenario=scenarios, timestep=self.timesteps)
def define_optimization_parameters(
self,
optimization_problem: mesmo.solutions.OptimizationProblem,
price_data: mesmo.data_interface.PriceData,
node_head_vector_minimum: np.ndarray = None,
branch_flow_vector_maximum: np.ndarray = None,
scenarios: typing.Union[list, pd.Index] = None,
):
# If no scenarios given, obtain default value.
if scenarios is None:
scenarios = [None]
# Obtain timestep interval in hours, for conversion of power to energy.
timestep_interval_hours = (self.timesteps[1] - self.timesteps[0]) / pd.Timedelta("1h")
# Define head variable term.
optimization_problem.define_parameter(
"head_variable",
sp.block_diag(
[
sp.diags(linear_thermal_grid_model.thermal_grid_model.node_head_vector_reference**-1)
@ linear_thermal_grid_model.sensitivity_node_head_by_der_power
@ sp.diags(linear_thermal_grid_model.thermal_grid_model.der_thermal_power_vector_reference)
for linear_thermal_grid_model in self.linear_thermal_grid_models.values()
]
),
)
# Define head constant term.
optimization_problem.define_parameter(
"head_constant",
np.concatenate(
[
sp.diags(linear_thermal_grid_model.thermal_grid_model.node_head_vector_reference**-1)
@ (
np.transpose([linear_thermal_grid_model.thermal_power_flow_solution.node_head_vector])
- linear_thermal_grid_model.sensitivity_node_head_by_der_power
@ np.transpose([linear_thermal_grid_model.thermal_power_flow_solution.der_thermal_power_vector])
)
for linear_thermal_grid_model in self.linear_thermal_grid_models.values()
]
),
)
# Define branch flow variable term.
optimization_problem.define_parameter(
"branch_flow_variable",
sp.block_diag(
[
sp.diags(linear_thermal_grid_model.thermal_grid_model.branch_flow_vector_reference**-1)
@ linear_thermal_grid_model.sensitivity_branch_flow_by_der_power
@ sp.diags(linear_thermal_grid_model.thermal_grid_model.der_thermal_power_vector_reference)
for linear_thermal_grid_model in self.linear_thermal_grid_models.values()
]
),
)
# Define branch flow constant term.
optimization_problem.define_parameter(
"branch_flow_constant",
np.concatenate(
[
sp.diags(linear_thermal_grid_model.thermal_grid_model.branch_flow_vector_reference**-1)
@ (
|
np.transpose([linear_thermal_grid_model.thermal_power_flow_solution.branch_flow_vector])
|
numpy.transpose
|
import numpy as np
import cv2
def rgb2grayscale(rgb):
return rgb[:, :, 0] * 0.2989 + rgb[:, :, 1] * 0.587 + rgb[:, :, 2] * 0.114
class DenseToSparse:
def __init__(self):
pass
def dense_to_sparse(self, rgb, depth):
pass
def __repr__(self):
pass
class UniformSampling(DenseToSparse):
name = "uar"
def __init__(self, num_samples, max_depth=np.inf):
DenseToSparse.__init__(self)
self.num_samples = num_samples
self.max_depth = max_depth
def __repr__(self):
return "%s{ns=%d,md=%f}" % (self.name, self.num_samples, self.max_depth)
def dense_to_sparse(self, rgb, depth):
"""
Samples pixels with `num_samples`/#pixels probability in `depth`.
Only pixels with a maximum depth of `max_depth` are considered.
If no `max_depth` is given, samples in all pixels
"""
mask_keep = depth > 0
if self.max_depth is not np.inf:
mask_keep = np.bitwise_and(mask_keep, depth <= self.max_depth)
n_keep = np.count_nonzero(mask_keep)
if n_keep == 0:
return mask_keep
else:
prob = float(self.num_samples) / n_keep
return np.bitwise_and(mask_keep, np.random.uniform(0, 1, depth.shape) < prob)
class SimulatedStereo(DenseToSparse):
name = "sim_stereo"
def __init__(self, num_samples, max_depth=np.inf, dilate_kernel=3, dilate_iterations=1):
DenseToSparse.__init__(self)
self.num_samples = num_samples
self.max_depth = max_depth
self.dilate_kernel = dilate_kernel
self.dilate_iterations = dilate_iterations
def __repr__(self):
return "%s{ns=%d,md=%f,dil=%d.%d}" % \
(self.name, self.num_samples, self.max_depth, self.dilate_kernel, self.dilate_iterations)
# We do not use cv2.Canny, since that applies non max suppression
# So we simply do
# RGB to intensitities
# Smooth with gaussian
# Take simple sobel gradients
# Threshold the edge gradient
# Dilatate
def dense_to_sparse(self, rgb, depth):
gray = rgb2grayscale(rgb)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
gx = cv2.Sobel(blurred, cv2.CV_64F, 1, 0, ksize=5)
gy = cv2.Sobel(blurred, cv2.CV_64F, 0, 1, ksize=5)
depth_mask = np.bitwise_and(depth != 0.0, depth <= self.max_depth)
edge_fraction = float(self.num_samples) / np.size(depth)
mag = cv2.magnitude(gx, gy)
min_mag = np.percentile(mag[depth_mask], 100 * (1.0 - edge_fraction))
mag_mask = mag >= min_mag
if self.dilate_iterations >= 0:
kernel = np.ones((self.dilate_kernel, self.dilate_kernel), dtype=np.uint8)
cv2.dilate(mag_mask.astype(np.uint8), kernel, iterations=self.dilate_iterations)
mask =
|
np.bitwise_and(mag_mask, depth_mask)
|
numpy.bitwise_and
|
"""
@package ion_functions.test.adcp_functions
@file ion_functions/test/test_adcp_functions.py
@author <NAME>, <NAME>, <NAME>
@brief Unit tests for adcp_functions module
"""
from nose.plugins.attrib import attr
from ion_functions.test.base_test import BaseUnitTestCase
import numpy as np
from ion_functions.data import adcp_functions as af
from ion_functions.data.adcp_functions import ADCP_FILLVALUE
from ion_functions.data.generic_functions import SYSTEM_FILLVALUE
@attr('UNIT', group='func')
class TestADCPFunctionsUnit(BaseUnitTestCase):
def setUp(self):
"""
Implemented by:
2014-02-06: <NAME>. Initial Code.
2015-06-12: <NAME>. Changed raw beam data to type int. This
change did not affect any previously written unit tests.
"""
# set test inputs -- values from DPS
self.b1 = np.array([[-0.0300, -0.2950, -0.5140, -0.2340, -0.1880,
0.2030, -0.3250, 0.3050, -0.2040, -0.2940]]) * 1000
self.b2 = np.array([[0.1800, -0.1320, 0.2130, 0.3090, 0.2910,
0.0490, 0.1880, 0.3730, -0.0020, 0.1720]]) * 1000
self.b3 = np.array([[-0.3980, -0.4360, -0.1310, -0.4730, -0.4430,
0.1880, -0.1680, 0.2910, -0.1790, 0.0080]]) * 1000
self.b4 = np.array([[-0.2160, -0.6050, -0.0920, -0.0580, 0.4840,
-0.0050, 0.3380, 0.1750, -0.0800, -0.5490]]) * 1000
# the data type of the raw beam velocities is int;
# set b1-b4 to int so that fill replacement can be tested.
self.b1 = self.b1.astype(int)
self.b2 = self.b2.astype(int)
self.b3 = self.b3.astype(int)
self.b4 = self.b4.astype(int)
#
self.echo = np.array([[0, 25, 50, 75, 100, 125, 150, 175, 200, 225, 250]])
self.sfactor = 0.45
# units of compass data are in centidegrees.
self.heading = 9841
self.pitch = 69
self.roll = -254
self.orient = 1
self.lat = 50.0000
self.lon = -145.0000
self.depth = 0.0
self.ntp = 3545769600.0 # May 12, 2012
# set expected results -- velocity profiles in earth coordinates
# (values in DPS)
self.uu = np.array([[0.2175, -0.2814, -0.1002, 0.4831, 1.2380,
-0.2455, 0.6218, -0.1807, 0.0992, -0.9063]])
self.vv = np.array([[-0.3367, -0.1815, -1.0522, -0.8676, -0.8919,
0.2585, -0.8497, -0.0873, -0.3073, -0.5461]])
self.ww = np.array([[0.1401, 0.3977, 0.1870, 0.1637, 0.0091,
-0.1290, 0.0334, -0.3017, 0.1384, 0.1966]])
# set expected results -- magnetic variation correction applied
# (computed in Matlab using above values and mag_var.m)
self.uu_cor = np.array([[0.1099, -0.3221, -0.4025, 0.2092, 0.9243,
-0.1595, 0.3471, -0.1983, 0.0053, -1.0261]])
self.vv_cor = np.array([[-0.3855, -0.0916, -0.9773, -0.9707, -1.2140,
0.3188, -0.9940, -0.0308, -0.3229, -0.2582]])
# set the expected results -- error velocity
self.ee = np.array([[0.789762, 0.634704, -0.080630, 0.626434, 0.064090,
0.071326, -0.317352, 0.219148, 0.054787, 0.433129]])
# set the expected results -- echo intensity conversion from counts to dB
self.dB = np.array([[0.00, 11.25, 22.50, 33.75, 45.00, 56.25, 67.50,
78.75, 90.00, 101.25, 112.50]])
def test_adcp_beam(self):
"""
Directly tests DPA functions adcp_beam_eastward, adcp_beam_northward,
adcp_beam_vertical, and adcp_beam_error.
Tests adcp_beam2ins, adcp_ins2earth and magnetic_correction functions
for ADCPs that output data in beam coordinates. All three functions
must return the correct output for final tests cases to work.
Values based on those defined in DPS:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00750_Data_Product_SPEC_VELPROF_OOI.pdf)
Implemented by:
2013-04-10: <NAME>. Initial code.
2014-02-06: <NAME>. Added tests to confirm arrays of
arrays can be processed (in other words, vectorized the
code).
2015-06-23: <NAME>. Revised documentation. Added unit test
for the function adcp_beam_error.
Notes:
The original suite of tests within this function did not provide a
test for adcp_beam_error. However, adcp_beam_error and vadcp_beam_error
are identical functions, and vadcp_beam_error is implicitly tested in the
test_vadcp_beam function when the 4th output argument of adcp_beam2inst
is tested. Therefore values to directly test adcp_beam_error were
then derived from the function itself and included as part of the unit
test within this code (test_adcp_beam).
"""
# single record case
got_uu_cor = af.adcp_beam_eastward(self.b1, self.b2, self.b3, self.b4,
self.heading, self.pitch, self.roll, self.orient,
self.lat, self.lon, self.depth, self.ntp)
got_vv_cor = af.adcp_beam_northward(self.b1, self.b2, self.b3, self.b4,
self.heading, self.pitch, self.roll, self.orient,
self.lat, self.lon, self.depth, self.ntp)
got_ww = af.adcp_beam_vertical(self.b1, self.b2, self.b3, self.b4,
self.heading, self.pitch, self.roll, self.orient)
got_ee = af.adcp_beam_error(self.b1, self.b2, self.b3, self.b4)
# test results
np.testing.assert_array_almost_equal(got_uu_cor, self.uu_cor, 4)
np.testing.assert_array_almost_equal(got_vv_cor, self.vv_cor, 4)
np.testing.assert_array_almost_equal(got_ww, self.ww, 4)
np.testing.assert_array_almost_equal(got_ee, self.ee, 4)
# reset the test inputs for multiple records
b1 = np.tile(self.b1, (24, 1))
b2 = np.tile(self.b2, (24, 1))
b3 = np.tile(self.b3, (24, 1))
b4 = np.tile(self.b4, (24, 1))
heading = np.ones(24, dtype=np.int) * self.heading
pitch = np.ones(24, dtype=np.int) * self.pitch
roll = np.ones(24, dtype=np.int) * self.roll
orient = np.ones(24, dtype=np.int) * self.orient
lat = np.ones(24) * self.lat
lon = np.ones(24) * self.lon
depth = np.ones(24) * self.depth
ntp = np.ones(24) * self.ntp
# reset outputs for multiple records
uu_cor = np.tile(self.uu_cor, (24, 1))
vv_cor = np.tile(self.vv_cor, (24, 1))
ww = np.tile(self.ww, (24, 1))
ee = np.tile(self.ee, (24, 1))
# multiple record case
got_uu_cor = af.adcp_beam_eastward(b1, b2, b3, b4,
heading, pitch, roll, orient,
lat, lon, depth, ntp)
got_vv_cor = af.adcp_beam_northward(b1, b2, b3, b4,
heading, pitch, roll, orient,
lat, lon, depth, ntp)
got_ww = af.adcp_beam_vertical(b1, b2, b3, b4,
heading, pitch, roll, orient)
got_ee = af.adcp_beam_error(b1, b2, b3, b4)
# test results
np.testing.assert_array_almost_equal(got_uu_cor, uu_cor, 4)
np.testing.assert_array_almost_equal(got_vv_cor, vv_cor, 4)
np.testing.assert_array_almost_equal(got_ww, ww, 4)
np.testing.assert_array_almost_equal(got_ee, ee, 4)
def test_adcp_beam_with_fill(self):
"""
Directly tests DPA functions adcp_beam_eastward, adcp_beam_northward,
adcp_beam_vertical, and adcp_beam_error when system fill values and
ADCP fill values (bad value sentinels) are present in the data stream.
Non-fill values are based on those used in test_adcp_beam in this module.
Implemented by:
2013-06-24: <NAME>. Initial code.
Notes:
"""
# for convenience
sfill = SYSTEM_FILLVALUE
afill = ADCP_FILLVALUE
### set input data
# units of compass data are in centidegrees.
heading = np.array([9841])
pitch = np.array([69])
roll = np.array([-254])
missingroll = np.array([sfill])
orient = np.array([1])
lat = np.array([50.0000])
lon = np.array([-145.0000])
depth = np.array([0.0])
ntp = np.array([3545769600.0]) # May 12, 2012
###
# for positional clarity, input beam and expected velocities will be explicitly
# enumerated for each single time record test case.
###
### single time record case; missing roll data
## the ADCP does not use its bad flag sentinel for compass data, only beam data.
## however, it is possible that CI could supply the system fillvalue for missing compass data.
# input data
# beam velocity units are mm/s
b1_x1 = np.array([[-30, -295, -514, -234, -188, 203, -325, 305, -204, -294]])
b2_x1 = np.array([[180, -132, 213, 309, 291, 49, 188, 373, -2, 172]])
b3_x1 = np.array([[-398, -436, -131, -473, -443, 188, -168, 291, -179, 8]])
b4_x1 = np.array([[-216, -605, -92, -58, 484, -5, 338, 175, -80, -549]])
# expected results if all good beam and compass data
# these will be used later in the multiple time record test
uu_x0 = np.array([[0.1099, -0.3221, -0.4025, 0.2092, 0.9243,
-0.1595, 0.3471, -0.1983, 0.0053, -1.0261]])
vv_x0 = np.array([[-0.3855, -0.0916, -0.9773, -0.9707, -1.2140,
0.3188, -0.9940, -0.0308, -0.3229, -0.2582]])
ww_x0 = np.array([[0.1401, 0.3977, 0.1870, 0.1637, 0.0091,
-0.1290, 0.0334, -0.3017, 0.1384, 0.1966]])
ee_x0 = np.array([[0.789762, 0.634704, -0.080630, 0.626434, 0.064090,
0.071326, -0.317352, 0.219148, 0.054787, 0.433129]])
# expected results for all good beam data, missing roll data;
# nans for all results except for the error velocity, which does not depend on the compass
uu_x1 = uu_x0 * np.nan
vv_x1 = vv_x0 * np.nan
ww_x1 = ww_x0 * np.nan
ee_x1 = np.copy(ee_x0)
uu_calc = af.adcp_beam_eastward(b1_x1, b2_x1, b3_x1, b4_x1, heading, pitch,
missingroll,
orient, lat, lon, depth, ntp)
vv_calc = af.adcp_beam_northward(b1_x1, b2_x1, b3_x1, b4_x1, heading, pitch,
missingroll,
orient, lat, lon, depth, ntp)
ww_calc = af.adcp_beam_vertical(b1_x1, b2_x1, b3_x1, b4_x1, heading, pitch,
missingroll,
orient)
ee_calc = af.adcp_beam_error(b1_x1, b2_x1, b3_x1, b4_x1)
# test results
np.testing.assert_array_almost_equal(uu_calc, uu_x1, 4)
np.testing.assert_array_almost_equal(vv_calc, vv_x1, 4)
np.testing.assert_array_almost_equal(ww_calc, ww_x1, 4)
np.testing.assert_array_almost_equal(ee_calc, ee_x1, 4)
### single time record case; missing and bad-flagged beam data, good compass data
# input data
b1_x2 = np.array([[sfill, -295, -514, -234, -188, 203, -325, afill, -204, -294]])
b2_x2 = np.array([[sfill, -132, 213, 309, 291, 49, 188, afill, -2, sfill]])
b3_x2 = np.array([[sfill, -436, -131, -473, -443, 188, -168, afill, -179, 8]])
b4_x2 = np.array([[sfill, -605, -92, -58, afill, -5, 338, afill, -80, -549]])
# expected
uu_x2 = np.array([[np.nan, -0.3221, -0.4025, 0.2092, np.nan,
-0.1595, 0.3471, np.nan, 0.0053, np.nan]])
vv_x2 = np.array([[np.nan, -0.0916, -0.9773, -0.9707, np.nan,
0.3188, -0.9940, np.nan, -0.3229, np.nan]])
ww_x2 = np.array([[np.nan, 0.3977, 0.1870, 0.1637, np.nan,
-0.1290, 0.0334, np.nan, 0.1384, np.nan]])
ee_x2 = np.array([[np.nan, 0.634704, -0.080630, 0.626434, np.nan,
0.071326, -0.317352, np.nan, 0.054787, np.nan]])
# calculated
uu_calc = af.adcp_beam_eastward(b1_x2, b2_x2, b3_x2, b4_x2,
heading, pitch, roll, orient,
lat, lon, depth, ntp)
vv_calc = af.adcp_beam_northward(b1_x2, b2_x2, b3_x2, b4_x2,
heading, pitch, roll, orient,
lat, lon, depth, ntp)
ww_calc = af.adcp_beam_vertical(b1_x2, b2_x2, b3_x2, b4_x2,
heading, pitch, roll, orient)
ee_calc = af.adcp_beam_error(b1_x2, b2_x2, b3_x2, b4_x2)
# test results
np.testing.assert_array_almost_equal(uu_calc, uu_x2, 4)
np.testing.assert_array_almost_equal(vv_calc, vv_x2, 4)
np.testing.assert_array_almost_equal(ww_calc, ww_x2, 4)
np.testing.assert_array_almost_equal(ee_calc, ee_x2, 4)
### multiple (5) record case
## reset the test inputs for 5 time records
# 1st time record is the bad/missing beam data case above
# 2nd time record is a missing heading data case
# 3rd time record is all good data
# 4th time record is bad/missing beam and missing pitch data.
# 5th time record is missing orientation data
b1 = np.vstack((b1_x2, b1_x1, b1_x1, b1_x2, b1_x1))
b2 = np.vstack((b2_x2, b2_x1, b2_x1, b2_x2, b2_x1))
b3 = np.vstack((b3_x2, b3_x1, b3_x1, b3_x2, b3_x1))
b4 = np.vstack((b4_x2, b4_x1, b4_x1, b4_x2, b4_x1))
heading = np.hstack((heading, sfill, heading, heading, heading))
pitch = np.hstack((pitch, pitch, pitch, sfill, pitch))
roll = np.tile(roll, 5)
orient = np.hstack((orient, orient, orient, orient, sfill))
lat = np.tile(lat, 5)
lon = np.tile(lon, 5)
depth = np.tile(depth, 5)
ntp = np.tile(ntp, 5)
# set expected outputs for these 5 records
# notes:
# (1) heading is not used in the calculation of vertical velocity,
# therefore the second entry to ww_xpctd is good data out (ww_x0),
# not nans as resulted from the missingroll test.
# (2) pitch is not used in the calculation of error velocity, so that
# in the mixed case (time record 4) the error velocity should be
# the same as that for the pure bad/missing beam case (ee_x2, 1st
# and 4th entries in ee_xpctd).
# (3) the orientation argument affects the roll calculation, so that
# when its value is missing (5th time record) the expected result
# would be the same as if the roll value were missing. therefore
# the 5th column entries are all x1 results.
uu_xpctd = np.vstack((uu_x2, uu_x1, uu_x0, uu_x1, uu_x1))
vv_xpctd = np.vstack((vv_x2, vv_x1, vv_x0, vv_x1, vv_x1))
ww_xpctd = np.vstack((ww_x2, ww_x0, ww_x0, ww_x1, ww_x1))
ee_xpctd = np.vstack((ee_x2, ee_x1, ee_x0, ee_x2, ee_x1))
# calculated
uu_calc = af.adcp_beam_eastward(b1, b2, b3, b4,
heading, pitch, roll, orient,
lat, lon, depth, ntp)
vv_calc = af.adcp_beam_northward(b1, b2, b3, b4,
heading, pitch, roll, orient,
lat, lon, depth, ntp)
ww_calc = af.adcp_beam_vertical(b1, b2, b3, b4,
heading, pitch, roll, orient)
ee_calc = af.adcp_beam_error(b1, b2, b3, b4)
# test results
np.testing.assert_array_almost_equal(uu_calc, uu_xpctd, 4)
np.testing.assert_array_almost_equal(vv_calc, vv_xpctd, 4)
np.testing.assert_array_almost_equal(ww_calc, ww_xpctd, 4)
np.testing.assert_array_almost_equal(ee_calc, ee_xpctd, 4)
def test_adcp_earth(self):
"""
Tests magnetic_correction function for ADCPs set to output data in the
Earth Coordinate system.
Values were not defined in DPS, were recreated using test values above:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00750_Data_Product_SPEC_VELPROF_OOI.pdf)
Implemented by:
2014-02-06: <NAME>. Initial code.
2015-06-10: <NAME>.
Changed adcp_ins2earth to require the units of the compass
data to be in centidegrees.
"""
# set the test data
u, v, w, e = af.adcp_beam2ins(self.b1, self.b2, self.b3, self.b4)
### old adcp_ins2earth returned 3 variables (CEW)
# adcp_ins2earth now requires compass data in units of centidegrees (RAD)
uu, vv, ww = af.adcp_ins2earth(u, v, w, self.heading, self.pitch,
self.roll, self.orient)
# test the magnetic variation correction
got_uu_cor = af.adcp_earth_eastward(uu, vv, self.depth, self.lat, self.lon, self.ntp)
got_vv_cor = af.adcp_earth_northward(uu, vv, self.depth, self.lat, self.lon, self.ntp)
np.testing.assert_array_almost_equal(got_uu_cor, self.uu_cor, 4)
np.testing.assert_array_almost_equal(got_vv_cor, self.vv_cor, 4)
# reset the test inputs for multiple records using the integer inputs.
uu = np.tile(uu, (24, 1))
vv = np.tile(vv, (24, 1))
depth = np.ones(24) * self.depth
lat = np.ones(24) * self.lat
lon = np.ones(24) * self.lon
ntp = np.ones(24) * self.ntp
# reset expected results for multiple records
uu_cor = np.tile(self.uu_cor, (24, 1))
vv_cor = np.tile(self.vv_cor, (24, 1))
# compute the results for multiple records
got_uu_cor = af.adcp_earth_eastward(uu, vv, depth, lat, lon, ntp)
got_vv_cor = af.adcp_earth_northward(uu, vv, depth, lat, lon, ntp)
# test the magnetic variation correction
np.testing.assert_array_almost_equal(got_uu_cor, uu_cor, 4)
np.testing.assert_array_almost_equal(got_vv_cor, vv_cor, 4)
def test_adcp_earth_int_input_velocity_data(self):
"""
Tests adcp_earth_eastward and adcp_earth_northward using int type raw velocity data,
as will be supplied by CI. Also tests the almost trivial functions adcp_earth_vertical
and adcp_earth_error (unit change).
Input raw velocity values were derived from the float unit test in test_adcp_earth
by rounding the uu and vv float output from adcp_ins2earth. These int inputs failed
the assert_array_almost_equal unit tests (decimals=4) in test_adcp_earth because of
round-off error but passed when the agreement precision was relaxed to decimals=3.
This is taken as justification to more precisely calculate the expected values for
unit tests in the current module from adcp_earth_eastward and adcp_earth_northward
themselves (the very modules being tested), using as input the type int raw velocity
data. Because these DPA functions were used to derive their own check data, the
original (float type input velocity data) unit tests are retained in the
test_adcp_earth function.
The tests in this module will be used to derive unit tests checking the replacement
of ADCP int bad value sentinels (-32768) with Nans; these tests require that the
raw velocity data be of type int.
Implemented by:
2014-06-16: <NAME>. Initial code.
"""
# set the input test data [mm/sec]
uu = np.array([[218, -281, -100, 483, 1238, -245, 622, -181, 99, -906]])
vv = np.array([[-337, -182, -1052, -868, -892, 258, -850, -87, -307, -546]])
ww = np.array([[140, 398, 187, 164, 9, -129, 33, -302, 138, 197]])
ee = np.array([[790, 635, 81, 626, 64, 71, -317, 219, 55, 433]])
# expected values, calculated using adcp_earth_eastward and adcp_earth_northward
uu_cor = np.array([[0.11031103, -0.32184604, -0.40227939, 0.20903718, 0.92426103,
-0.15916447, 0.34724837, -0.19849871, 0.00522179, -1.02580274]])
vv_cor = np.array([[-0.38590734, -0.09219615, -0.97717720, -0.97109035, -1.21410442,
0.31820696, -0.99438552, -0.03046741, -0.32252555, -0.25822614]])
# expected values, calculated by changing units from mm/s to m/s
ww_vel = ww / 1000.0
ee_vel = ee / 1000.0
# test the magnetic variation correction using type integer inputs for the velocities.
got_uu_cor = af.adcp_earth_eastward(uu, vv, self.depth, self.lat, self.lon, self.ntp)
got_vv_cor = af.adcp_earth_northward(uu, vv, self.depth, self.lat, self.lon, self.ntp)
# and the unit change functions
got_ww_vel = af.adcp_earth_vertical(ww)
got_ee_vel = af.adcp_earth_error(ee)
np.testing.assert_array_almost_equal(got_uu_cor, uu_cor, 4)
np.testing.assert_array_almost_equal(got_vv_cor, vv_cor, 4)
np.testing.assert_array_almost_equal(got_ww_vel, ww_vel, 4)
np.testing.assert_array_almost_equal(got_ee_vel, ee_vel, 4)
# reset the test inputs for multiple records using the integer inputs.
uu = np.tile(uu, (24, 1))
vv = np.tile(vv, (24, 1))
ww = np.tile(ww, (24, 1))
ee = np.tile(ee, (24, 1))
depth = np.ones(24) * self.depth
lat = np.ones(24) * self.lat
lon = np.ones(24) * self.lon
ntp = np.ones(24) * self.ntp
# reset expected results for multiple records
uu_cor = np.tile(uu_cor, (24, 1))
vv_cor = np.tile(vv_cor, (24, 1))
ww_vel = np.tile(ww_vel, (24, 1))
ee_vel = np.tile(ee_vel, (24, 1))
# compute the results for multiple records
got_uu_cor = af.adcp_earth_eastward(uu, vv, depth, lat, lon, ntp)
got_vv_cor = af.adcp_earth_northward(uu, vv, depth, lat, lon, ntp)
got_ww_vel = af.adcp_earth_vertical(ww)
got_ee_vel = af.adcp_earth_error(ee)
# test the magnetic variation correction
np.testing.assert_array_almost_equal(got_uu_cor, uu_cor, 4)
np.testing.assert_array_almost_equal(got_vv_cor, vv_cor, 4)
# and the unit change functions
np.testing.assert_array_almost_equal(got_ww_vel, ww_vel, 4)
np.testing.assert_array_almost_equal(got_ee_vel, ee_vel, 4)
def test_adcp_earth_with_fill(self):
"""
Tests adcp_earth_eastward, adcp_earth_northward, adcp_earth_vertical and
adcp_earth_error when system fill values and ADCP fill values (bad value
sentinels) are present in the data stream.
Non-fill test values come from the function test_adcp_earth_int_input_velocity_data
in this module.
Implemented by:
2014-06-25: <NAME>. Initial code.
"""
# for convenience
sfill = SYSTEM_FILLVALUE
afill = ADCP_FILLVALUE
### scalar time case
# set the input test data
lat = np.array([50.0000])
lon = np.array([-145.0000])
depth = np.array([0.0])
ntp = np.array([3545769600.0]) # May 12, 2012
# input velocities [mm/sec]
uu_in0 = np.array([[218, sfill, -100, 483, afill, -245]])
vv_in0 = np.array([[sfill, -182, -1052, -868, -892, afill]])
ww_in0 = np.array([[sfill, 398, afill, 164, 9, -129]])
ee_in0 = np.array([[afill, 635, 81, 626, sfill, 71]])
# expected values [m/sec]
uu_x0 = np.array([[np.nan, np.nan, -0.40227, 0.20903, np.nan, np.nan]])
vv_x0 = np.array([[np.nan, np.nan, -0.97717, -0.97109, np.nan, np.nan]])
ww_x0 = np.array([[np.nan, 0.398, np.nan, 0.164, 0.009, -0.129]])
ee_x0 = np.array([[np.nan, 0.635, 0.081, 0.626, np.nan, 0.071]])
# calculated
uu_calc = af.adcp_earth_eastward(uu_in0, vv_in0, depth, lat, lon, ntp)
vv_calc = af.adcp_earth_northward(uu_in0, vv_in0, depth, lat, lon, ntp)
ww_calc = af.adcp_earth_vertical(ww_in0)
ee_calc = af.adcp_earth_error(ee_in0)
# test
np.testing.assert_array_almost_equal(uu_calc, uu_x0, 4)
np.testing.assert_array_almost_equal(vv_calc, vv_x0, 4)
np.testing.assert_array_almost_equal(ww_calc, ww_x0, 4)
np.testing.assert_array_almost_equal(ee_calc, ee_x0, 4)
### multiple time record case
# set the input test data
lat = np.tile(lat, 5)
lon = np.tile(lon, 5)
depth = np.tile(depth, 5)
ntp = np.tile(ntp, 5)
uu_in0 = np.tile(uu_in0, (5, 1))
vv_in0 = np.tile(vv_in0, (5, 1))
ww_in0 = np.tile(ww_in0, (5, 1))
ee_in0 = np.tile(ee_in0, (5, 1))
# expected
uu_x0 = np.tile(uu_x0, (5, 1))
vv_x0 = np.tile(vv_x0, (5, 1))
ww_x0 = np.tile(ww_x0, (5, 1))
ee_x0 = np.tile(ee_x0, (5, 1))
# calculated
uu_calc = af.adcp_earth_eastward(uu_in0, vv_in0, depth, lat, lon, ntp)
vv_calc = af.adcp_earth_northward(uu_in0, vv_in0, depth, lat, lon, ntp)
ww_calc = af.adcp_earth_vertical(ww_in0)
ee_calc = af.adcp_earth_error(ee_in0)
# test
np.testing.assert_array_almost_equal(uu_calc, uu_x0, 4)
np.testing.assert_array_almost_equal(vv_calc, vv_x0, 4)
np.testing.assert_array_almost_equal(ww_calc, ww_x0, 4)
np.testing.assert_array_almost_equal(ee_calc, ee_x0, 4)
def test_adcp_backscatter(self):
"""
Tests echo intensity scaling function (adcp_backscatter) for ADCPs
in order to convert from echo intensity in counts to dB.
Values were not defined in DPS, were created using test values above:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00750_Data_Product_SPEC_VELPROF_OOI.pdf)
Implemented by <NAME>, 2014-02-06
<NAME>, 2015-06-25. Added tests for fill values.
"""
# the single record case
got = af.adcp_backscatter(self.echo, self.sfactor)
np.testing.assert_array_almost_equal(got, self.dB, 4)
# the multi-record case -- inputs
raw = np.tile(self.echo, (24, 1))
sf = np.ones(24) * self.sfactor
# the multi-record case -- outputs
dB = np.tile(self.dB, (24, 1))
got = af.adcp_backscatter(raw, sf)
np.testing.assert_array_almost_equal(got, dB, 4)
### test fill value replacement with nan
# for convenience
sfill = SYSTEM_FILLVALUE
# the adcp bad sentinel fillvalue (requires 2 bytes) is not used for echo
# intensity, which is stored in 1 byte.
# the single time record case
echo_with_fill, xpctd = np.copy(self.echo), np.copy(self.dB)
echo_with_fill[0, 3], xpctd[0, 3] = sfill, np.nan
echo_with_fill[0, 6], xpctd[0, 6] = sfill, np.nan
echo_with_fill[0, 7], xpctd[0, 7] = sfill, np.nan
got = af.adcp_backscatter(echo_with_fill, self.sfactor)
np.testing.assert_array_almost_equal(got, xpctd, 4)
# the multiple time record case
echo_with_fill = np.vstack((echo_with_fill, self.echo, echo_with_fill))
xpctd = np.vstack((xpctd, self.dB, xpctd))
sfactor = np.tile(self.sfactor, (3, 1))
got = af.adcp_backscatter(echo_with_fill, sfactor)
np.testing.assert_array_almost_equal(got, xpctd, 4)
def test_vadcp_beam(self):
"""
Indirectly tests vadcp_beam_eastward, vadcp_beam_northward,
vadcp_beam_vertical_est, and vadcp_beam_vertical_true functions (which
call adcp_beam2ins and adcp_ins2earth) and vadcp_beam_error (which only
calls adcp_beam2ins) for the specialized 5-beam ADCP. Application of
the magnetic correction and conversion from mm/s to m/s is not applied.
Values based on those defined in DPS:
OOI (2012). Data Product Specification for Turbulent Velocity Profile
and Echo Intensity. Document Control Number 1341-00760.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00760_Data_Product_SPEC_VELTURB_OOI.pdf)
Implemented by:
2014-07-24: <NAME>. Initial code.
2015-06-10: <NAME>.
adcp_ins2earth now requires the units of the compass
data to be in centidegrees.
"""
# test inputs
b1 = np.ones((10, 10)).astype(int) * -325
b2 =
|
np.ones((10, 10))
|
numpy.ones
|
"""
First created on Mon Aug 13 10:01:03 2018
Module used for analysis of the image created from Zernike analysis
Versions:
Oct 31, 2019: 0.22 -> 0.22b introduced verbosity
Mar 10, 2020: 0.22b -> 0.23 if std too small, disregard error calculation
Apr 01, 2020: 0.23 -> 0.24 added options to create_basic_comparison_plot
Apr 29, 2020: 0.24 -> 0.24a added check for image for both side of defocus in create_solution
Jun 17, 2020: 0.24a -> 0.24b cleaned the STAMPS_FOLDER specification
Jun 25, 2020: 0.24b -> 0.25 improved create_res_data
Jul 03, 2020: 0.25 -> 0.26 included multi analysis
Jul 15, 2020: 0.26 -> 0.26a modified to include PSF_DIRECTORY
Sep 08, 2020: 0.26a -> 0.26b small changed around create_chains functions
Dec 07, 2020: 0.26b -> 0.26c added dataset=6
Feb 04, 2021: 0.26c -> 0.26d finalAr_Feb2020.pkl to finalAr_Feb2020
Feb 25, 2021: 0.26d -> 0.26e different folder for dataset=6 for tiger
Mar 10, 2021: 0.26e -> 0.26f added mask options for create_basic_comparison_plot
Mar 24, 2021: 0.26f -> 0.26g updated create_res_data and find_centroid
Apr 02, 2021: 0.26g -> 0.26h added option to save in create_basic_comparison_plot
Apr 21, 2021: 0.26h -> 0.26i expanded support for Tiger
Jul 26, 2021: 0.26i -> 0.26j changed default directory on loca, to point to Saturn_USA
Sep 28. 2021: 0.26j -> 0.26k modified parameters in plot_1D_residual
Nov 20. 2021: 0.26k -> 0.26l Hilo modifications
@author: <NAME>
@contact: <EMAIL>
@web: www.ncaplar.com
"""
########################################
# standard library imports
from __future__ import absolute_import, division, print_function
import os
# import time
# import sys
# import math
import socket
# os.environ["MKL_NUM_THREADS"] = "1"
# os.environ["NUMEXPR_NUM_THREADS"] = "1"
# os.environ["OMP_NUM_THREADS"] = "1"
import numpy as np
# print(np.__config__)
# from multiprocessing import current_process
# from functools import lru_cache
# from tqdm import tqdm
# import pyfftw
# import pandas as pd
########################################
# Related third party imports
# none at the moment
########################################
# Local application/library specific imports
# galsim
import galsim
# astropy
# import astropy
# import astropy.convolution
# from astropy.convolution import Gaussian2DKernel
from astropy.stats import bootstrap
# scipy and skimage
import scipy.misc
# import skimage.transform
import scipy.optimize as optimize
# from scipy.ndimage.filters import gaussian_filter
# pickle
import pickle
# lmfit
# import lmfit
# matplotlib
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
# pandas
import pandas as pd
# needed for resizing routines
from typing import Tuple, Iterable
np.set_printoptions(suppress=True)
np.seterr(divide='ignore', invalid='ignore')
galsim.GSParams.maximum_fft_size = 12000
########################################
__all__ = ['Zernike_Analysis', 'Zernike_result_analysis', 'create_mask', 'resize', 'create_res_data']
__version__ = "0.26l"
############################################################
# name your directory where you want to have files!
if socket.gethostname() == 'IapetusUSA':
PSF_DIRECTORY = '/Volumes/Saturn_USA/PFS/'
elif socket.gethostname() == 'pfsa-usr01-gb.subaru.nao.ac.jp' or \
socket.gethostname() == 'pfsa-usr02-gb.subaru.nao.ac.jp':
PSF_DIRECTORY = '/work/ncaplar/'
else:
PSF_DIRECTORY = '/tigress/ncaplar/'
############################################################
TESTING_FOLDER = PSF_DIRECTORY + 'Testing/'
TESTING_PUPIL_IMAGES_FOLDER = TESTING_FOLDER + 'Pupil_Images/'
TESTING_WAVEFRONT_IMAGES_FOLDER = TESTING_FOLDER + 'Wavefront_Images/'
TESTING_FINAL_IMAGES_FOLDER = TESTING_FOLDER + 'Final_Images/'
class Zernike_Analysis(object):
"""Class for analysing results of the cluster run
"""
def __init__(self, date, obs, single_number, eps, arc=None, dataset=None,
multi_var=False, list_of_defocuses=None, verbosity=1):
"""!
@param[in] date date
@param[in] obs observatio
@param[in] single_number single number determining which spot we are analyzing
@param[in] eps analysis parameter
@param[in] arc arc-lamp used
@param[in] dataset dataset number
@param[in] multi_var is this multi analysis
@param[in] list_of_defocuses at which defocuses we are analyzing
"""
############
# initializing
###########
if arc is None:
arc = ''
self.date = date
self.obs = obs
self.single_number = single_number
self.eps = eps
self.arc = arc
self.multi_var = multi_var
self.list_of_defocuses = list_of_defocuses
method = 'P'
self.method = method
self.verbosity = verbosity
#############
# where are poststamps of spots located
if dataset == 0:
STAMPS_FOLDER = PSF_DIRECTORY+"Data_Nov_14/Stamps_cleaned/"
if dataset == 1:
STAMPS_FOLDER = PSF_DIRECTORY+"ReducedData/Data_Feb_5/Stamps_cleaned/"
if dataset == 2:
STAMPS_FOLDER = PSF_DIRECTORY+"ReducedData/Data_May_28/Stamps_cleaned/"
if dataset == 3:
STAMPS_FOLDER = PSF_DIRECTORY+"ReducedData/Data_Jun_25/Stamps_cleaned/"
if dataset == 4 or dataset == 5:
STAMPS_FOLDER = PSF_DIRECTORY+"ReducedData/Data_Aug_14/Stamps_cleaned/"
if dataset == 6:
if socket.gethostname() == 'IapetusUSA':
STAMPS_FOLDER = PSF_DIRECTORY+"ReducedData/Data_Nov_20_2020/Stamps_cleaned/"
elif socket.gethostname() == 'pfsa-usr01-gb.subaru.nao.ac.jp' or \
socket.gethostname() == 'pfsa-usr02-gb.subaru.nao.ac.jp':
STAMPS_FOLDER = '/work/ncaplar/ReducedData/Data_Nov_20/Stamps_cleaned/'
else:
STAMPS_FOLDER = PSF_DIRECTORY+"ReducedData/Data_Nov_20/Stamps_cleaned/"
if dataset == 7:
STAMPS_FOLDER = PSF_DIRECTORY+"ReducedData/Data_May_21_2021/Stamps_cleaned/"
if dataset == 8:
if socket.gethostname() == 'IapetusUSA':
STAMPS_FOLDER = '/Volumes/Saturn_USA/PFS/'+"ReducedData/Data_May_21/Stamps_cleaned/"
elif socket.gethostname() == 'pfsa-usr01-gb.subaru.nao.ac.jp' or \
socket.gethostname() == 'pfsa-usr02-gb.subaru.nao.ac.jp':
STAMPS_FOLDER = '/work/ncaplar/ReducedData/Data_May_25_2021/Stamps_cleaned/'
else:
STAMPS_FOLDER = '/tigress/ncaplar/ReducedData/Data_May_25_2021/Stamps_cleaned/'
print('STAMPS_FOLDER: '+str(STAMPS_FOLDER))
# which observation numbers associated with each dataset
if dataset == 0:
if arc is not None:
if arc == "HgAr":
single_number_focus = 8603
elif arc == "Ne":
single_number_focus = 8693
if dataset == 1:
# F/3.4 stop
if arc is not None:
if arc == "HgAr":
single_number_focus = 11748
obs_possibilites = np.array([11796, 11790, 11784, 11778, 11772, 11766, 11760, 11754,
11748, 11748, 11694, 11700, 11706, 11712, 11718, 11724,
11730, 11736])
elif arc == "Ne":
single_number_focus = 11748+607
obs_possibilites = np.array([12403, 12397, 12391, 12385, 12379, 12373,
12367, 12361, 12355, 12355, 12349, 12343,
12337, 12331, 12325, 12319, 12313, 12307])
if dataset == 2:
# F/2.8 stop
if arc is not None:
if arc == "HgAr":
single_number_focus = 17017+54
obs_possibilites = np.array([17023, 17023+6, 17023+12, 17023+18, 17023+24, 17023+30,
17023+36, 17023+42, -99, 17023+48, 17023+54, 17023+60,
17023+66, 17023+72, 17023+78, 17023+84, 17023+90, 17023+96,
17023+48])
if arc == "Ne":
single_number_focus = 16292
obs_possibilites = np.array([16238+6, 16238+12, 16238+18,
16238+24, 16238+30, 16238+36,
16238+42, 16238+48, -99, 16238+54,
16238+60, 16238+66, 16238+72,
16238+78, 16238+84, 16238+90,
16238+96, 16238+102, 16238+54])
if arc == "Kr":
single_number_focus = 17310+54
obs_possibilites = np.array([17310+6, 17310+12, 17310+18,
17310+24, 17310+30, 17310+36,
17310+42, 17310+48, -99, 17310+54,
17310+60, 17310+66, 17310+72,
17310+78, 17310+84, 17310+90,
17310+96, 17310+102, 17310+54])
if dataset == 3:
# F/2.5 stop
if arc is not None:
if arc == "HgAr":
single_number_focus = 19238+54
obs_possibilites = np.array([19238, 19238+6, 19238+12,
19238+18, 19238+24, 19238+30,
19238+36, 19238+42, -99, 19238+48,
19238+54, 19238+60, 19238+66,
19238+72, 19238+78, 19238+84,
19238+90, 19238+96, 19238+48])
elif arc == "Ne":
single_number_focus = 19472
obs_possibilites = np.array([19472+6, 19472+12, 19472+18,
19472+24, 19472+30, 19472+36,
19472+42, 19472+48, -99, 19472+54,
19472+60, 19472+66, 19472+72,
19472+78, 19472+84, 19472+90,
19472+96, 19472+102, 19472+54])
if dataset == 4:
# F/2.8 stop, July LAM data, full defocus
if arc is not None:
if arc == "HgAr":
single_number_focus = 21346+54
obs_possibilites = np.array([21346+6, 21346+12, 21346+18,
21346+24, 21346+30, 21346+36,
21346+42, 21346+48, -99, 21346+54,
21346+60, 21346+66, 21346+72,
21346+78, 21346+84, 21346+90,
21346+96, 21346+102, 21346+48])
if arc == "Ne":
single_number_focus = 21550+54
obs_possibilites = np.array([21550+6, 21550+12, 21550+18,
21550+24, 21550+30, 21550+36,
21550+42, 21550+48, -99, 21550+54,
21550+60, 21550+66, 21550+72,
21550+78, 21550+84, 21550+90,
21550+96, 21550+102, 21550+54])
if str(arc) == "Kr":
single_number_focus = 21754+54
obs_possibilites = np.array([21754+6, 21754+12, 21754+18,
21754+24, 21754+30, 21754+36,
21754+42, 21754+48, -99, 21754+54,
21754+60, 21754+66, 21754+72,
21754+78, 21754+84, 21754+90,
21754+96, 21754+102, 21754+54])
if dataset == 5:
# F/2.8 stop, July LAM data, fine defocus
if arc == 'HgAr':
obs_possibilites = np.arange(21280, 21280+11*6, 6)
if arc == 'Ne':
obs_possibilites = np.arange(21484, 21484+11*6, 6)
if arc == 'Kr':
obs_possibilites = np.arange(21688, 21688+11*6, 6)
if dataset == 6:
if arc == 'Ar':
single_number_focus = 34341+48
obs_possibilites = np.array([34341, 34341+6, 34341+12,
34341+18, 34341+24, 34341+30,
34341+36, 34341+42, 34341+48, 34341+48,
34341+54, 34341+60, 34341+66,
34341+72, 34341+78, 34341+84,
34341+90, 34341+96, 21346+48])
if arc == 'Ne':
single_number_focus = 34217+48
obs_possibilites = np.array([34217, 34217+6, 34217+12,
34217+18, 34217+24, 34217+30,
34217+36, 34217+42, 34217+48, 34217+48,
34217+54, 34217+60, 34217+66,
34217+72, 34217+78, 34217+84,
34217+90, 34217+96, 34217+48])
if arc == 'Kr':
single_number_focus = 34561+48
obs_possibilites = np.array([34561, 34561+6, 34561+12,
34561+18, 34561+24, 34561+30,
34561+36, 34561+42, 34561+48, 34561+48,
34561+54, 34561+60, 34561+66,
34561+72, 34561+78, 34561+84,
34561+90, 34561+96, 34561+48])
if dataset == 7:
# if str(arc) == "Ar":
# single_number_focus=34341+48
if str(arc) == "Ne":
single_number_focus = 27677
if multi_var is True:
obs_multi = 27719
obs_possibilites = np.array([27713, -999, 27683,
-999, -999, -999, -999,
-999, 27677, -999,
-999, -999, -999,
-999, -999, 27698,
-999, 27719, -999])
if dataset == 8:
if arc == 'Ar':
single_number_focus = 51485+8*12
obs_possibilites = np.array([51485, 51485+12, 51485+2*12,
51485+3*12, 51485+4*12, 51485+5*12,
51485+6*12, 51485+7*12, 51485+8*12,
52085+8*12, 51485+9*12, 51485+10*12,
51485+11*12, 51485+12*12, 51485+13*12,
51485+14*12, 51485+15*12, 51485+16*12, 51485+8*12])
if arc == 'Ne':
single_number_focus = 59655+8*12
obs_possibilites = np.array([59655, 59655+12, 59655+2*12,
59655+3*12, 59655+4*12, 59655+5*12,
59655+6*12, 59655+7*12, 59655+8*12,
52085+8*12, 59655+9*12, 59655+10*12,
59655+11*12, 59655+12*12, 59655+13*12,
59655+14*12, 59655+15*12, 59655+16*12, 59655+8*12])
if arc == 'Kr':
single_number_focus = 52085+8*12
obs_possibilites = np.array([52085, 52085+12, 52085+2*12,
52085+3*12, 52085+4*12, 52085+5*12,
52085+6*12, 52085+7*12, 52085+8*12,
52085+8*12, 52085+9*12, 52085+10*12,
52085+11*12, 52085+12*12, 52085+13*12,
52085+14*12, 52085+15*12, 52085+16*12, 52085+8*12])
# elif str(arc)=="Kr":
# single_number_focus=34561+48
# if multi ??
if multi_var is True and dataset < 7:
obs_multi = single_number_focus + 48
# if multi ??
if multi_var is True and dataset == 8:
obs_multi = single_number_focus + 96
if multi_var is True:
self.obs_multi = obs_multi
obs_single = obs
self.obs_single = obs_single
label = ['m4', 'm35', 'm3',
'm25', 'm2', 'm15',
'm1', 'm05', '0', '0d',
'p05', 'p1', 'p15',
'p2', 'p25', 'p3',
'p35', 'p4', '0p']
label_fine_defocus = ['m05ff', 'm04ff', 'm03ff',
'm02ff', 'm01ff', '0ff',
'p01ff', 'p02ff', 'p03ff',
'p04ff', 'p05ff']
if type(obs) == str:
labelInput = obs
obs = obs_possibilites[label.index(labelInput)]
obs_int = int(obs)
if dataset in [0, 1, 2, 3, 4, 6, 7]:
labelInput = label[list(obs_possibilites).index(obs_int)]
if dataset in [5]:
labelInput = label_fine_defocus[list(obs_possibilites).index(obs_int)]
if multi_var is True:
if self.verbosity == 1:
print('labelInput: ' + str(labelInput))
print('self.single_number: '+str(self.single_number))
index_of_single_image_in_list_of_images = self.list_of_defocuses.index(labelInput)
self.index_of_single_image_in_list_of_images = index_of_single_image_in_list_of_images
list_of_obs = []
if multi_var is True:
for labelInput in self.list_of_defocuses:
if dataset in [0, 1, 2, 3, 4, 6, 7, 8]:
obs_single = obs_possibilites[label.index(labelInput)]
if dataset in [5]:
obs_single = obs_possibilites[label_fine_defocus.index(labelInput)]
list_of_obs.append(obs_single)
else:
list_of_obs.append(obs_single)
##########################
# import data
##########################
if multi_var is True:
list_of_sci_images = []
list_of_mask_images = []
list_of_var_images = []
if self.verbosity == 1:
print('list_of_defocuses: ' + str(self.list_of_defocuses))
print('list_of_obs: ' + str(list_of_obs))
# for obs_v in list_of_obs:
# if obs_v>0:
# sci_image =np.load(STAMPS_FOLDER+'sci'+str(obs_v)+str(single_number)+
# str(arc)+'_Stacked.npy')
# mask_image =np.load(STAMPS_FOLDER+'mask'+str(obs_v)+str(single_number)+
# str(arc)+'_Stacked.npy')
# var_image =np.load(STAMPS_FOLDER+'var'+str(obs_v)+str(single_number)+
# str(arc)+'_Stacked.npy')
# else:
# # if the image is not avaliable (has obs_v negative) make some dummy images
# sci_image=np.ones((20,20))
# mask_image=np.ones((20,20))
# var_image=np.ones((20,20))
for obs_v in list_of_obs:
try:
sci_image = np.load(STAMPS_FOLDER+'sci'+str(obs_v)+
str(single_number)+str(arc)+'_Stacked.npy')
mask_image = np.load(STAMPS_FOLDER+'mask'+str(obs_v)+
str(single_number)+str(arc)+'_Stacked.npy')
var_image = np.load(STAMPS_FOLDER+'var'+str(obs_v)+
str(single_number)+str(arc)+'_Stacked.npy')
except:
# if the image is not avaliable (has obs_v negative) make some dummy images
sci_image = np.ones((20,20))
mask_image = np.ones((20,20))
var_image = np.ones((20,20))
list_of_sci_images.append(sci_image)
list_of_mask_images.append(mask_image)
list_of_var_images.append(var_image)
sci_image =np.load(STAMPS_FOLDER+'sci'+str(obs)+str(single_number)+str(arc)+'_Stacked.npy')
mask_image =np.load(STAMPS_FOLDER+'mask'+str(obs)+str(single_number)+str(arc)+'_Stacked.npy')
var_image =np.load(STAMPS_FOLDER+'var'+str(obs)+str(single_number)+str(arc)+'_Stacked.npy')
try:
sci_image_focus_large =np.load(STAMPS_FOLDER+'sci'+str(single_number_focus)+str(single_number)+str(arc)+'_Stacked_large.npy')
var_image_focus_large =np.load(STAMPS_FOLDER+'var'+str(single_number_focus)+str(single_number)+str(arc)+'_Stacked_large.npy')
except:
pass
self.list_of_sci_images=list_of_sci_images
self.list_of_mask_images=list_of_mask_images
self.list_of_var_images=list_of_var_images
self.sci_image=sci_image
self.var_image=var_image
self.mask_image=mask_image
self.STAMPS_FOLDER=STAMPS_FOLDER
if dataset==1:
if arc=="HgAr":
finalArc=finalHgAr_Feb2019
elif arc=="Ne":
finalArc=finalNe_Feb2019
else:
print("Not recognized arc-line")
if dataset==2:
with open(PSF_DIRECTORY+'ReducedData/Data_May_28/Dataframes/finalNe_May2019.pkl', 'rb') as f:
finalNe_May2019=pickle.load(f)
with open(PSF_DIRECTORY+'ReducedData/Data_May_28/Dataframes/finalHgAr_May2019.pkl', 'rb') as f:
finalHgAr_May2019=pickle.load(f)
with open(PSF_DIRECTORY+'ReducedData/Data_May_28/Dataframes/finalKr_May2019.pkl', 'rb') as f:
finalKr_May2019=pickle.load(f)
if arc=="HgAr":
finalArc=finalHgAr_May2019
elif arc=="Ne":
finalArc=finalNe_May2019
elif arc=="Kr":
finalArc=finalKr_May2019
else:
print("Not recognized arc-line")
if dataset==3:
with open(PSF_DIRECTORY+'ReducedData/Data_Jun_25/Dataframes/finalNe_May2019.pkl', 'rb') as f:
finalNe_May2019=pickle.load(f)
with open(PSF_DIRECTORY+'ReducedData/Data_Jun_25/Dataframes/finalHgAr_May2019.pkl', 'rb') as f:
finalHgAr_May2019=pickle.load(f)
with open(PSF_DIRECTORY+'ReducedData/Data_Jun_25/Dataframes/finalKr_May2019.pkl', 'rb') as f:
finalKr_May2019=pickle.load(f)
if arc=="HgAr":
finalArc=finalHgAr_May2019
elif arc=="Ne":
finalArc=finalNe_May2019
else:
print("Not recognized arc-line")
if dataset==4 or dataset==5:
with open(PSF_DIRECTORY+'ReducedData/Data_Aug_14/Dataframes/finalHgAr_Feb2020', 'rb') as f:
print(f)
finalHgAr_Feb2020_dataset=pickle.load(f)
with open(PSF_DIRECTORY+'ReducedData/Data_Aug_14/Dataframes/finalNe_Feb2020', 'rb') as f:
finalNe_Feb2020_dataset=pickle.load(f)
with open(PSF_DIRECTORY+'ReducedData/Data_Aug_14/Dataframes/finalKr_Feb2020', 'rb') as f:
finalKr_Feb2020_dataset=pickle.load(f)
if arc=="HgAr":
finalArc=finalHgAr_Feb2020_dataset
elif arc=="Ne":
finalArc=finalNe_Feb2020_dataset
elif arc=="Kr":
finalArc=finalKr_Feb2020_dataset
else:
print("Not recognized arc-line")
if dataset==6 or dataset==7:
if socket.gethostname()=='IapetusUSA':
with open(PSF_DIRECTORY+'ReducedData/Data_Nov_20_2020/Dataframes/finalHgAr_Feb2020', 'rb') as f:
finalHgAr_Feb2020_dataset=pickle.load(f)
with open(PSF_DIRECTORY+'ReducedData/Data_Nov_20_2020/Dataframes/finalNe_Feb2020', 'rb') as f:
finalNe_Feb2020_dataset=pickle.load(f)
with open(PSF_DIRECTORY+'ReducedData/Data_Nov_20_2020/Dataframes/finalKr_Feb2020', 'rb') as f:
finalKr_Feb2020_dataset=pickle.load(f)
with open(PSF_DIRECTORY+'ReducedData/Data_Nov_20_2020/Dataframes/finalAr_Feb2020', 'rb') as f:
finalAr_Feb2020_dataset=pickle.load(f)
else:
#with open(PSF_DIRECTORY+'ReducedData/Data_Nov_20/Dataframes/finalHgAr_Feb2020', 'rb') as f:
# finalHgAr_Feb2020_dataset=pickle.load(f)
#with open(PSF_DIRECTORY+'ReducedData/Data_Nov_20/Dataframes/finalNe_Feb2020', 'rb') as f:
# finalNe_Feb2020_dataset=pickle.load(f)
#with open(PSF_DIRECTORY+'ReducedData/Data_Nov_20/Dataframes/finalKr_Feb2020', 'rb') as f:
# finalKr_Feb2020_dataset=pickle.load(f)
#with open(PSF_DIRECTORY+'ReducedData/Data_Nov_20/Dataframes/finalAr_Feb2020', 'rb') as f:
# finalAr_Feb2020_dataset=pickle.load(f)
finalHgAr_Feb2020_dataset = np.load(PSF_DIRECTORY
+ 'ReducedData/Data_Nov_20/Dataframes/finalHgAr_Feb2020',
allow_pickle=True)
finalNe_Feb2020_dataset = np.load(PSF_DIRECTORY
+ 'ReducedData/Data_Nov_20/Dataframes/finalNe_Feb2020',
allow_pickle=True)
finalKr_Feb2020_dataset = np.load(PSF_DIRECTORY
+ 'ReducedData/Data_Nov_20/Dataframes/finalKr_Feb2020',
allow_pickle=True)
finalAr_Feb2020_dataset = np.load(PSF_DIRECTORY
+ 'ReducedData/Data_Nov_20/Dataframes/finalAr_Feb2020',
allow_pickle=True)
if arc=="HgAr":
finalArc=finalHgAr_Feb2020_dataset
elif arc=="Ne":
finalArc=finalNe_Feb2020_dataset
elif arc=="Kr":
finalArc=finalKr_Feb2020_dataset
elif arc=="Ar":
finalArc=finalAr_Feb2020_dataset
else:
print("Not recognized arc-line")
if dataset==8:
if socket.gethostname()=='IapetusUSA':
with open('/Volumes/Saturn_USA/PFS/'+'ReducedData/Data_May_21/DataFrames/finalNe_Jul2021', 'rb') as f:
finalArc=pickle.load(f)
with open('/Volumes/Saturn_USA/PFS/'+'ReducedData/Data_May_21/DataFrames/finalKr_Jul2021', 'rb') as f:
finalArc=pickle.load(f)
with open('/Volumes/Saturn_USA/PFS/'+'ReducedData/Data_May_21/DataFrames/finalAr_Jul2021', 'rb') as f:
finalArc=pickle.load(f)
else:
finalNe_Feb2020_dataset=np.load(PSF_DIRECTORY+'ReducedData/Data_May_25_2021/Dataframes/finalNe_Jul2021')
finalKr_Feb2020_dataset=np.load(PSF_DIRECTORY+'ReducedData/Data_May_25_2021/Dataframes/finalKr_Jul2021')
finalAr_Feb2020_dataset=np.load(PSF_DIRECTORY+'ReducedData/Data_May_25_2021/Dataframes/finalAr_Jul2021')
if arc=="Ne":
finalArc=finalNe_Feb2020_dataset
elif arc=="Kr":
finalArc=finalKr_Feb2020_dataset
elif arc=="Ar":
finalArc=finalAr_Feb2020_dataset
else:
print("Not recognized arc-line")
##########################
# import column names
##########################
"""
columns=['z4','z5','z6','z7','z8','z9','z10','z11',
'hscFrac','strutFrac','dxFocal','dyFocal','slitFrac','slitFrac_dy',
'radiometricEffect','radiometricExponent',
'x_ilum','y_ilum',
'x_fiber','y_fiber','effective_ilum_radius','frd_sigma','det_vert','slitHolder_frac_dx',
'grating_lines','scattering_radius','scattering_slope','scattering_amplitude',
'pixel_effect','fiber_r','flux']
columns22=['z4','z5','z6','z7','z8','z9','z10','z11',
'z12','z13','z14','z15','z16','z17','z18','z19','z20','z21','z22',
'hscFrac','strutFrac','dxFocal','dyFocal','slitFrac','slitFrac_dy',
'radiometricEffect','radiometricExponent','x_ilum','y_ilum',
'x_fiber','y_fiber','effective_radius_illumination',
'frd_sigma','frd_lorentz_factor','det_vert','slitHolder_frac_dx',
'grating_lines','scattering_slope','scattering_amplitude',
'pixel_effect','fiber_r','flux']
"""
columns=['z4','z5','z6','z7','z8','z9','z10','z11',
'hscFrac','strutFrac','dxFocal','dyFocal','slitFrac','slitFrac_dy',
'wide_0','wide_23','wide_43','misalign',
'x_fiber','y_fiber','effective_ilum_radius','frd_sigma','det_vert','slitHolder_frac_dx',
'grating_lines','scattering_radius','scattering_slope','scattering_amplitude',
'pixel_effect','fiber_r','flux']
columns22=['z4','z5','z6','z7','z8','z9','z10','z11',
'z12','z13','z14','z15','z16','z17','z18','z19','z20','z21','z22',
'hscFrac','strutFrac','dxFocal','dyFocal','slitFrac','slitFrac_dy',
'wide_0','wide_23','wide_43','misalign',
'x_fiber','y_fiber','effective_radius_illumination',
'frd_sigma','frd_lorentz_factor','det_vert','slitHolder_frac_dx',
'grating_lines','scattering_slope','scattering_amplitude',
'pixel_effect','fiber_r','flux']
columns22_analysis=columns22+['chi2','chi2max']
self.columns=columns
self.columns22=columns22
self.columns22_analysis=columns22_analysis
##########################
# where are results from Tiger placed
##########################
############################################################
# name your directory where you want to have files!
if socket.gethostname()=='IapetusUSA':
RESULT_FOLDER=PSF_DIRECTORY+'TigerAnalysis/ResultsFromTiger/'+date+'/'
if os.path.exists(RESULT_FOLDER):
pass
else:
RESULT_FOLDER='/Volumes/My Passport for Mac/Old_Files/PFS/TigerAnalysis/ResultsFromTiger/'+date+'/'
if os.path.exists(RESULT_FOLDER):
pass
else:
RESULT_FOLDER='/Volumes/Saturn_USA/PFS/TigerAnalysis/ResultsFromTiger/'+date+'/'
else:
# if the analysis is done on Tiger
RESULT_FOLDER='/tigress/ncaplar/Results/'
self.RESULT_FOLDER=RESULT_FOLDER
############################################################
IMAGES_FOLDER=PSF_DIRECTORY+'/Images/'+date+'/'
if not os.path.exists(IMAGES_FOLDER):
os.makedirs(IMAGES_FOLDER)
self.IMAGES_FOLDER=IMAGES_FOLDER
#print('finalArc[close].loc[int(single_number)]'+str(finalArc['close'].loc[int(single_number)]))
if finalArc['close'].loc[int(single_number)]=='1' or finalArc['close'].loc[int(single_number)]==1:
double_sources=False
else:
double_sources=True
#print('double_sources'+str(double_sources))
self.double_sources=double_sources
double_sources_positions_ratios=finalArc.loc[int(single_number)][['second_offset','second_ratio']].values
self.double_sources_positions_ratios=double_sources_positions_ratios
if self.verbosity==1:
print('analyzing label: '+str(obs))
print('double_sources_positions_ratios for this spot is: '+str(double_sources_positions_ratios))
def return_double_sources(self):
return self.double_sources,self.double_sources_positions_ratios
def return_lists_of_images(self):
assert self.multi_var==True
return self.list_of_sci_images,self.list_of_var_images,self.list_of_mask_images
def return_index_of_single_image_in_list_of_images(self):
return self.index_of_single_image_in_list_of_images
def return_columns(self):
return self.columns,self.columns22,self.columns22_analysis
def create_list_of_var_or_ln_sums(self,sigma_offset=0):
"""
gives likelihood for chi**2 =1
"""
list_of_var_sums=[]
for i in range(len(self.list_of_var_images)):
# taking from create_chi_2_almost function in LN_PFS_single
mask_image=self.list_of_mask_images[i]
var_image=self.list_of_var_images[i]
# array that has True for values which are good and False for bad values
inverted_mask=~mask_image.astype(bool)
#
var_image_masked=var_image*inverted_mask
var_image_masked_without_nan = var_image_masked.ravel()[var_image_masked.ravel()>0]
var_sum=-(1/2)*(len(var_image_masked_without_nan)*sigma_offset+np.sum(np.log(2*np.pi*var_image_masked_without_nan)))
list_of_var_sums.append(var_sum)
array_of_var_sums=np.array(list_of_var_sums)
return array_of_var_sums
def create_likelihood(self):
if self.multi_var is True:
self.obs = self.obs_multi
# self.len_of_chains()
# Swarm1
# likechain_Swarm1=np.load(self.RESULT_FOLDER+'likechain'+str(self.date)+'_Single_'+str(self.method)+
# '_'+str(self.obs)+str(self.single_number)+str(self.eps)+str(self.arc)+'Swarm1.npy')
likechain_Swarm1 = self.likechain_Swarm1
like_min_swarm1 = []
for i in range(likechain_Swarm1.shape[0]):
like_min_swarm1.append(np.min(np.abs(likechain_Swarm1[i])))
#
if self.chain_Emcee2 is not None:
# Emcee1
# likechain_Emcee2=np.load(self.RESULT_FOLDER+'likechain'+str(self.date)+'_Single_'+
# str(self.method)+'_'+str(self.obs)+str(self.single_number)+str(self.eps)+
# str(self.arc)+'Emcee2.npy')
likechain_Emcee1 = self.likechain_Emcee1
like_min_Emcee1 = []
for i in range(likechain_Emcee1.shape[1]):
like_min_Emcee1.append(np.min(np.abs(likechain_Emcee1[:, i])))
# Swarm2
likechain_Swarm2 = np.load(self.RESULT_FOLDER + 'likechain' + str(self.date) + '_Single_' +
str(self.method) + '_'+str(self.obs) + str(self.single_number) +
str(self.eps) + str(self.arc)+'Swarm2.npy')
likechain_Swarm2 = self.likechain_Swarm2
like_min_swarm2 = []
for i in range(likechain_Swarm2.shape[0]):
like_min_swarm2.append(np.min(np.abs(likechain_Swarm2[i])))
# Emcee 2
# chain_Emcee3=np.load(self.RESULT_FOLDER+'chain'+str(self.date)+'_Single_'+str(self.method)+
# '_'+str(self.obs)+str(self.single_number)+str(self.eps)+str(self.arc)+'Emcee3.npy')
# likechain_Emcee3=np.load(self.RESULT_FOLDER+'likechain'+str(self.date)+'_Single_'+
# str(self.method)+'_'+str(self.obs)+str(self.single_number)+str(self.eps)+str(self.arc)+'Emcee3.npy')
chain_Emcee2 = self.chain_Emcee2
likechain_Emcee2 = self.likechain_Emcee2
# get chain number 0, which is has lowest temperature
# if len(likechain_Emcee3)<=4:
# likechain0_Emcee3=likechain_Emcee3[0]
# chain0_Emcee3=chain_Emcee3[0]
# else:
# likechain0_Emcee3=likechain_Emcee3
# chain0_Emcee3=chain_Emcee3
# check the shape of the chain (number of walkers, number of steps, number of parameters)
if self.verbosity == 1:
print('(number of walkers, number of steps, number of parameters for Emcee): ' +
str(chain_Emcee2.shape))
# see the best chain
minchain = chain_Emcee2[np.abs(likechain_Emcee2) == np.min(np.abs(likechain_Emcee2))][0]
# print(minchain)
self.minchain = minchain
like_min_Emcee2 = []
for i in range(likechain_Emcee2.shape[1]):
like_min_Emcee2.append(np.min(np.abs(likechain_Emcee2[:, i])))
like_min = like_min_swarm1 + like_min_Emcee1+like_min_swarm2 + like_min_Emcee2
else:
# see the best chain
minchain = self.chain_Swarm1[np.abs(self.likechain_Swarm1) ==
np.min(np.abs(self.likechain_Swarm1))][0]
# print(minchain)
self.minchain = minchain
like_min = like_min_swarm1
list_of_var_sums = self.create_list_of_var_or_ln_sums(0)
# print('list_of_var_sums: '+str(list_of_var_sums))
array_of_var_sum = np.array(list_of_var_sums)
max_of_array_of_var_sum = np.max(array_of_var_sum)
renormalization_of_var_sum = array_of_var_sum/max_of_array_of_var_sum
zero_sigma_ln = np.mean(list_of_var_sums/renormalization_of_var_sum)
self.zero_sigma_ln = zero_sigma_ln
list_of_var_sums_1 = self.create_list_of_var_or_ln_sums(1)
one_sigma_ln = np.mean(list_of_var_sums_1/renormalization_of_var_sum)
self.one_sigma_ln = one_sigma_ln
# print(len(like_min))
if self.verbosity == 1:
print('minimal likelihood is: '+str(np.min(like_min)))
min_like_min = np.min(like_min)
self.min_like_min = min_like_min
# chi2 = (np.array(like_min)*(2)-np.sum(np.log(2*np.pi*self.var_image)))/(self.sci_image.shape[0])**2
# min_chi2 = -(min_like_min+zero_sigma_ln)/(one_sigma_ln-zero_sigma_ln)
# print('average chi2 reduced is: ' + str(min_chi2))
return minchain, like_min
def len_of_chains(self):
if self.multi_var==True:
self.obs=self.obs_multi
self.create_chains_Emcee_1()
self.create_chains_Emcee_2()
self.create_chains_swarm_1()
self.create_chains_swarm_2()
# (number of walkers, number of steps, number of parameters) for Emcee
# (number of steps, number of walkers, number of parameters) for Swarm
if self.chain_Emcee2 is None:
print(self.chain_Swarm1.shape)
return [len(self.chain_Swarm1),0,0,0]
else:
print(self.chain_Swarm1.shape,self.chain_Emcee2.shape,self.chain_Swarm2.shape,self.chain_Emcee3.shape)
return [len(self.chain_Swarm1),(self.chain_Emcee2).shape[1],len(self.chain_Swarm2),(self.chain_Emcee3).shape[1]]
def create_chains(self):
#chain_Emcee1=np.load(self.RESULT_FOLDER+'chain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+str(self.single_number)+str(self.eps)+'Emcee1.npy')
#likechain_Emcee1=np.load(self.RESULT_FOLDER+'likechain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+str(self.single_number)+str(self.eps)+'Emcee1.npy')
# get chain number 0, which is has lowest temperature
#likechain0_Emcee1=likechain_Emcee1[0]
#chain0_Emcee1=chain_Emcee1[0]
#chain_Emcee2=np.load(self.RESULT_FOLDER+'chain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+str(self.single_number)+str(self.eps)+'Emcee2.npy')
#likechain_Emcee2=np.load(self.RESULT_FOLDER+'likechain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+str(self.single_number)+str(self.eps)+'Emcee2.npy')
#likechain0_Emcee2=likechain_Emcee2[0]
#chain0_Emcee2=chain_Emcee2[0]
if self.multi_var==True:
self.obs=self.obs_multi
chain_Emcee3=np.load(self.RESULT_FOLDER+'chain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+str(self.single_number)+str(self.eps)+str(self.arc)+'Emcee3.npy')
likechain_Emcee3=np.load(self.RESULT_FOLDER+'likechain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+str(self.single_number)+str(self.eps)+str(self.arc)+'Emcee3.npy')
# get chain number 0, which is has lowest temperature
likechain0_Emcee3=likechain_Emcee3
chain0_Emcee3=chain_Emcee3
self.chain0_Emcee3=chain0_Emcee3
self.likechain0_Emcee3=likechain0_Emcee3
return chain0_Emcee3,likechain0_Emcee3
def create_chains_Emcee_1(self):
"""
get chain and likelihood chain for first run of Emcee
unfortunately the file name is ``Emcee2'', because of historical reasons
Returns
-------
chain0_Emcee1 : chain
likechain0_Emcee1 : likelihood chain
"""
if self.multi_var==True:
self.obs=self.obs_multi
try:
chain_Emcee1=np.load(self.RESULT_FOLDER+'chain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+\
str(self.single_number)+str(self.eps)+str(self.arc)+'Emcee2.npy')
likechain_Emcee1=np.load(self.RESULT_FOLDER+'likechain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+\
str(self.single_number)+str(self.eps)+str(self.arc)+'Emcee2.npy')
self.chain_Emcee1=chain_Emcee1
self.likechain_Emcee1=likechain_Emcee1
except:
self.chain_Emcee1=None
self.likechain_Emcee1=None
return self.chain_Emcee1,self.likechain_Emcee1
def create_chains_Emcee_2(self):
"""
get chain and likelihood chain for the second run of Emcee
unfortunately the file name is ``Emcee3'', because of historical reasons
"""
if self.multi_var==True:
self.obs=self.obs_multi
try:
chain_Emcee2=np.load(self.RESULT_FOLDER+'chain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+\
str(self.single_number)+str(self.eps)+str(self.arc)+'Emcee3.npy')
likechain_Emcee2=np.load(self.RESULT_FOLDER+'likechain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+\
str(self.single_number)+str(self.eps)+str(self.arc)+'Emcee3.npy')
self.chain_Emcee2=chain_Emcee2
self.likechain_Emcee2=likechain_Emcee2
except:
self.chain_Emcee2=None
self.likechain_Emcee2=None
return self.chain_Emcee2,self.likechain_Emcee2
def create_Emcee2_stack(self):
if self.multi_var==True:
self.obs=self.obs_multi
chain0_Emcee2=np.load(self.RESULT_FOLDER+'chain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+\
str(self.single_number)+str(self.eps)+str(self.arc)+'Emcee3.npy')
likechain0_Emcee2=np.load(self.RESULT_FOLDER+'likechain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+\
str(self.single_number)+str(self.eps)+str(self.arc)+'Emcee3.npy')
for i in range(chain0_Emcee2.shape[1]):
if i==0:
chain0_Emcee2_reshaped=chain0_Emcee2[:,0]
likechain0_Emcee2_reshaped=likechain0_Emcee2[:,0]
else:
chain0_Emcee2_reshaped=np.vstack((chain0_Emcee2_reshaped,chain0_Emcee2[:,i]))
likechain0_Emcee2_reshaped=np.vstack((likechain0_Emcee2_reshaped,likechain0_Emcee2[:,i]))
chain0_stack=chain0_Emcee2_reshaped
likechain0_stack=likechain0_Emcee2_reshaped.ravel()
likechain0_stack=likechain0_stack-np.max(likechain0_stack)
return chain0_stack,likechain0_stack
def create_chains_swarm_1(self):
"""get chain and likelihood chain from the swarm analysis
"""
if self.multi_var is True:
self.obs = self.obs_multi
try:
chain_Swarm1 = np.load(self.RESULT_FOLDER+'chain' + str(self.date) + '_Single_' +
str(self.method) + '_' + str(self.obs) +
str(self.single_number) + str(self.eps) + str(self.arc)+'Swarm1.npy')
likechain_Swarm1 = np.load(self.RESULT_FOLDER + 'likechain' + str(self.date) + '_Single_' +
str(self.method) + '_' + str(self.obs) +
str(self.single_number) + str(self.eps) + str(self.arc)+'Swarm1.npy')
print('create_chains_swarm_1: Swarm1 and likechainSwarm1 found')
print('Path searched was: ' + str(self.RESULT_FOLDER+'chain' + str(self.date) + '_Single_' +
str(self.method) + '_' + str(self.obs) +
str(self.single_number) + str(self.eps) + str(self.arc) +
'Swarm1.npy'))
except : # noqa
print('Swarm1 or likechainSwarm1 not found')
print('Path searched for chain was: ' +
str(self.RESULT_FOLDER + 'chain' + str(self.date) +
'_Single_' + str(self.method)+'_' + str(self.obs) +
str(self.single_number) + str(self.eps) + str(self.arc) + 'Swarm1.npy'))
print('Path searched for likechain was: ' +
str(self.RESULT_FOLDER + 'likechain' + str(self.date) +
'_Single_' + str(self.method)+'_' + str(self.obs) +
str(self.single_number) + str(self.eps) + str(self.arc) + 'Swarm1.npy'))
self.chain_Swarm1 = chain_Swarm1
self.likechain_Swarm1 = likechain_Swarm1
return chain_Swarm1, likechain_Swarm1
def create_chains_swarm_2(self):
"""
get chain and likelihood chain for the second run of cosmoHammer optimizer
"""
if self.multi_var==True:
self.obs=self.obs_multi
try:
chain_Swarm2=np.load(self.RESULT_FOLDER+'chain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+\
str(self.single_number)+str(self.eps)+str(self.arc)+'Swarm2.npy')
likechain_Swarm2=np.load(self.RESULT_FOLDER+'likechain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+\
str(self.single_number)+str(self.eps)+str(self.arc)+'Swarm2.npy')
self.chain_Swarm2=chain_Swarm2
self.likechain_Swarm2=likechain_Swarm2
except:
self.chain_Swarm2=None
self.likechain_Swarm2=None
return self.chain_Swarm2,self.likechain_Swarm2
def create_allparameters_single(self,mm,array_of_polyfit_1_parameterizations,zmax=None):
"""
copied from multi
transfroms linear fits as a function of defocus of parametrizations into form acceptable for creating single images
workhorse function used by create_list_of_allparameters
@param mm [float] defocus of the slit
@param array_of_polyfit_1_parameterizations parametrs describing linear fit for the parameters as a function of focus
@param zmax largerst Zernike used
"""
if zmax==None:
zmax=11
#for single case, up to z11
if zmax==11:
z_parametrizations=array_of_polyfit_1_parameterizations[:8]
g_parametrizations=array_of_polyfit_1_parameterizations[8:]
allparameters_proposal_single=np.zeros((8+len(g_parametrizations)))
for i in range(0,8,1):
allparameters_proposal_single[i]=self.value_at_defocus(mm,z_parametrizations[i][0],z_parametrizations[i][1])
for i in range(len(g_parametrizations)):
allparameters_proposal_single[i+8]=g_parametrizations[i][1]
if zmax==22:
z_parametrizations=array_of_polyfit_1_parameterizations[:19]
g_parametrizations=array_of_polyfit_1_parameterizations[19:]
allparameters_proposal_single=np.zeros((19+len(g_parametrizations)))
for i in range(0,19,1):
#print(str([i,mm,z_parametrizations[i]]))
allparameters_proposal_single[i]=self.value_at_defocus(mm,z_parametrizations[i][0],z_parametrizations[i][1])
for i in range(len(g_parametrizations)):
allparameters_proposal_single[19+i]=g_parametrizations[i][1]
return allparameters_proposal_single
def entrance_exit_pupil_plot(self):
ilum=np.load(TESTING_PUPIL_IMAGES_FOLDER+'ilum.npy')
radiometricEffectArray=np.load(TESTING_PUPIL_IMAGES_FOLDER+'radiometricEffectArray.npy')
ilum_radiometric=np.load(TESTING_PUPIL_IMAGES_FOLDER+'ilum_radiometric.npy')
plt.figure(figsize=(30,8))
plt.subplot(131)
plt.imshow(ilum,origin='lower',vmax=1,vmin=0)
plt.title('entrance pupil')
plt.colorbar()
plt.subplot(132)
plt.title('ent->exit pupil')
plt.imshow(radiometricEffectArray,origin='lower',vmax=1,vmin=0)
plt.colorbar()
plt.subplot(133)
plt.title('exit pupil')
plt.imshow(ilum_radiometric,origin='lower',vmax=1,vmin=0)
plt.colorbar()
def wavefront_plot(self):
wf_full = np.load(TESTING_WAVEFRONT_IMAGES_FOLDER + 'wf_full.npy')
plt.figure(figsize=(36, 6))
plt.subplot(141)
plt.imshow(wf_full)
plt.colorbar()
plt.subplot(142)
plt.imshow(np.real(np.exp(2j*np.pi * wf_full/800)))
plt.colorbar()
plt.subplot(143)
plt.imshow(np.imag(np.exp(2j*np.pi * wf_full/800)))
plt.colorbar()
def illumination_wavefront_plot(self,return_Images=False):
ilum=np.load(TESTING_PUPIL_IMAGES_FOLDER+'ilum.npy')
wf_full=np.load(TESTING_WAVEFRONT_IMAGES_FOLDER+'wf_full.npy')
wf_full_fake_0=np.load(TESTING_WAVEFRONT_IMAGES_FOLDER+'wf_full_fake_0.npy')
midpoint=int(len(ilum)/2)
ilum_zoom=ilum[int(midpoint-len(ilum)/4):int(midpoint+len(ilum)/4),int(midpoint-len(ilum)/4):int(midpoint+len(ilum)/4)]
plt.figure(figsize=(28,8))
plt.subplot(131)
plt.imshow(ilum_zoom,origin='lower',vmax=1,vmin=0)
plt.title('illumination of the pupil',fontsize=25)
plt.subplot(132)
ilum_1=np.copy(ilum)
ilum_1[ilum_1>0.01]=1
wavefront=ilum_1*wf_full
wavefront=wavefront/800
wavefront_zoom=wavefront[int(midpoint-len(ilum)/4):int(midpoint+len(ilum)/4),int(midpoint-len(ilum)/4):\
int(midpoint+len(ilum)/4)]
plt.imshow(wavefront_zoom,cmap=plt.get_cmap('bwr'),vmax=np.max(np.abs(wavefront))*0.75,vmin=-np.max(np.abs(wavefront))*0.75)
plt.colorbar(fraction=0.046, pad=0.04)
plt.title('wavefront [units of waves]',fontsize=25)
plt.subplot(133)
ilum_1=np.copy(ilum)
ilum_1[ilum_1>0.01]=1
wavefront=ilum_1*wf_full_fake_0
wavefront=wavefront/800
wavefront_0_zoom=wavefront[int(midpoint-len(ilum)/4):int(midpoint+len(ilum)/4),int(midpoint-len(ilum)/4):int(midpoint+len(ilum)/4)]
plt.imshow(wavefront_0_zoom,cmap=plt.get_cmap('bwr'),vmax=np.max(np.abs(wavefront))*0.75,vmin=-np.max(np.abs(wavefront))*0.75)
plt.colorbar(fraction=0.046, pad=0.04)
plt.title('wavefront w.o. defocus [u. of waves]',fontsize=25)
if return_Images==True:
return ilum_zoom,wavefront_zoom,wavefront_0_zoom
def wavefront_gradient_plot(self):
wf_full=np.load(TESTING_WAVEFRONT_IMAGES_FOLDER+'wf_full.npy')
plt.figure(figsize=(30,8))
plt.subplot(131)
vgrad = np.gradient(wf_full)
fulgrad = np.sqrt(vgrad[0]**2 + vgrad[1]**2)
plt.title('gradient (magnitude)')
plt.imshow(fulgrad,cmap=plt.get_cmap('hot'), vmin = np.amin(fulgrad),vmax = np.amax(fulgrad))
plt.colorbar()
plt.subplot(132)
x, y = range(0, len(wf_full)), range(0,len(wf_full))
xi, yi = np.meshgrid(x, y)
plt.title('gradient (direction)')
plt.streamplot(xi, yi, vgrad[0], vgrad[1])
plt.subplot(133)
laplace_of_wf = scipy.ndimage.filters.laplace(wf_full)
plt.title('Laplacian')
plt.imshow(laplace_of_wf,cmap=plt.get_cmap('hot'), vmin = -1,vmax = 1)
plt.colorbar()
def create_basic_data_image(self,return_Images=False):
sci_image=self.sci_image
var_image=self.var_image
mask_image=self.mask_image
plt.figure(figsize=(30,8))
plt.subplot(131)
plt.imshow(sci_image,norm=LogNorm(vmin=1,vmax=np.max(sci_image)),origin='lower')
cbar=plt.colorbar(fraction=0.046, pad=0.04)
cbar.set_ticks([10,10**2,10**3,10**4,10**5])
plt.title('sci_image')
plt.subplot(132)
plt.imshow(var_image,norm=LogNorm(vmin=1,vmax=np.max(sci_image)),origin='lower')
cbar=plt.colorbar(fraction=0.046, pad=0.04)
cbar.set_ticks([10,10**2,10**3,10**4,10**5])
plt.title('var_image')
plt.subplot(133)
plt.imshow(sci_image,norm=LogNorm(vmin=1,vmax=np.max(sci_image)),origin='lower')
cbar=plt.colorbar(fraction=0.046, pad=0.04)
plt.imshow(mask_image,origin='lower',vmin=0,vmax=np.max(mask_image),alpha=0.2)
cbar.set_ticks([10,10**2,10**3,10**4,10**5])
plt.title('sci+mask_image')
if return_Images==True:
return sci_image,var_image,mask_image
def create_fitting_evolution_plot(self):
minchain, like_min = self.create_likelihood()
len_of_chains = self.len_of_chains()
# chain0_Emcee3,likechain0_Emcee3=self.create_chains()
# size=self.chain_swarm1.shape[1]
matplotlib.rcParams.update({'font.size': 18})
plt.figure(figsize=(24, 12))
plt.subplot(211)
plt.plot(np.linspace(0, len(like_min)-1, len(like_min)), like_min, 'blue', ls='-', marker='o')
plt.ylabel('likelihood')
plt.xlabel('steps')
plt.axvline(np.sum(len_of_chains[:1])+0.5, ls='--')
if np.min(like_min) < -self.zero_sigma_ln:
plt.ylim(np.min(like_min) * 0.95, 1.05 * np.max(like_min))
else:
plt.ylim(-self.zero_sigma_ln, 1.05 * np.max(like_min))
plt.axhline(self.min_like_min, ls='--')
# plt.axhline(-self.one_sigma_ln, ls='--', color='black')
plt.subplot(212)
plt.plot(np.linspace(0, len(like_min)-1, len(like_min)), np.log10(like_min),
'blue', ls='-', marker='o')
plt.ylabel('log10(likelihood)')
plt.xlabel('steps')
plt.axvline(np.sum(len_of_chains[:1])+0.5, ls='--')
def create_basic_comparison_plot(self, custom_model_image=None, custom_mask=None,
custom_sci_image=None, custom_var_image=None,
use_max_chi_scaling=False, use_max_flux_scaling=False,
show_flux_mask=False, show_impact_pixels_mask=False,
save=False, multi_background_factor=3):
if custom_model_image is None:
optPsf_cut_fiber_convolved_downsampled = np.load(TESTING_FINAL_IMAGES_FOLDER +
'optPsf_cut_fiber_convolved_downsampled.npy')
res_iapetus = optPsf_cut_fiber_convolved_downsampled
else:
res_iapetus = custom_model_image
if custom_sci_image is None:
sci_image = self.sci_image
else:
sci_image = custom_sci_image
if custom_var_image is None:
var_image = self.var_image
else:
var_image = custom_var_image
mean_value_of_background_via_var = np.mean([np.median(var_image[0]),
np.median(var_image[-1]),
np.median(var_image[:, 0]),
np.median(var_image[:, -1])]) *\
multi_background_factor
mean_value_of_background_via_sci = np.mean([np.median(sci_image[0]),
np.median(sci_image[-1]),
np.median(sci_image[:, 0]),
np.median(sci_image[:, -1])]) *\
multi_background_factor
mean_value_of_background = np.max([mean_value_of_background_via_var,
mean_value_of_background_via_sci])
print(str(multi_background_factor) + 'x mean_value_of_background via sci is estimated to be: ' +
str(mean_value_of_background))
if type(show_flux_mask) == bool:
flux_mask = sci_image > (mean_value_of_background)
else:
flux_mask = sci_image > (show_flux_mask)
show_flux_mask = True
size = sci_image.shape[0]
# if size==40:
# dithering=2
# else:
# dithering=1
# dithering=1
if size == 20:
x_center = find_centroid_of_flux(res_iapetus)[0]
else:
x_center = (size/2)
left_limit = np.round(x_center-3.5)+0.5
right_limit = np.round(x_center+3.5)-0.5
chi2_image = (sci_image-res_iapetus)**2/((1)*var_image)
if show_impact_pixels_mask is True:
mask_most_impactful_pixels = np.zeros(sci_image.shape)
mask_most_impactful_pixels[chi2_image > (np.quantile(chi2_image[flux_mask].ravel(), 0.99))] = 1
value_of_chi2_1 = np.sum(chi2_image[flux_mask].ravel()
[chi2_image[flux_mask].ravel() >
np.quantile(chi2_image[flux_mask].ravel(), 0.99)])
total_chi2 = np.sum(chi2_image[flux_mask].ravel())
print('fraction of chi2 due to 1% of pixels: ' + str(value_of_chi2_1/total_chi2))
plt.figure(figsize=(20, 20))
plt.subplot(221)
plt.imshow(res_iapetus, origin='lower', vmax=np.max(np.abs(sci_image)))
plt.plot(np.ones(len(sci_image))*(left_limit), np.array(range(len(sci_image))), '--', color='white')
plt.plot(np.ones(len(sci_image))*(right_limit), np.array(range(len(sci_image))), '--', color='white')
plt.colorbar(fraction=0.046, pad=0.04)
if show_flux_mask is True:
plt.imshow(flux_mask, origin='lower', alpha=0.4)
if show_impact_pixels_mask is True:
plt.imshow(mask_most_impactful_pixels, origin='lower', alpha=0.35, cmap='magma')
plt.title('Model')
plt.grid(False)
plt.subplot(222)
plt.imshow(sci_image, origin='lower', vmax=np.max(np.abs(sci_image)))
plt.plot(np.ones(len(sci_image))*(left_limit), np.array(range(len(sci_image))), '--', color='white')
plt.plot(np.ones(len(sci_image))*(right_limit), np.array(range(len(sci_image))), '--', color='white')
plt.colorbar(fraction=0.046, pad=0.04)
if show_flux_mask is True:
plt.imshow(flux_mask, alpha=0.4, origin='lower',)
if show_impact_pixels_mask is True:
plt.imshow(mask_most_impactful_pixels, alpha=0.35, cmap='magma', origin='lower',)
plt.title('Data')
plt.grid(False)
plt.subplot(223)
if use_max_flux_scaling is False:
plt.imshow(sci_image-res_iapetus, origin='lower', cmap='bwr',
vmin=-np.max(np.abs(sci_image))/20, vmax=np.max(np.abs(sci_image))/20)
else:
max_flux = np.max(np.abs(sci_image-res_iapetus))
plt.imshow((sci_image-res_iapetus), origin='lower', cmap='bwr',
vmax=-max_flux*0.75, vmin=max_flux*0.75)
plt.colorbar(fraction=0.046, pad=0.04)
plt.plot(np.ones(len(sci_image))*(left_limit), np.array(range(len(sci_image))), '--', color='black')
plt.plot(np.ones(len(sci_image))*(right_limit), np.array(range(len(sci_image))), '--', color='black')
if show_flux_mask is True:
plt.imshow(flux_mask, alpha=0.55, vmin=0, vmax=1, origin='lower',)
if custom_mask is None:
pass
else:
if np.sum(custom_mask) == 0:
alpha_value = 0
else:
alpha_value = 0.25
plt.imshow(custom_mask, origin='lower', alpha=alpha_value)
if show_impact_pixels_mask is True:
plt.imshow(mask_most_impactful_pixels, alpha=0.55, cmap='magma', origin='lower',)
plt.title('Residual (data - model)')
plt.grid(False)
plt.subplot(224)
if use_max_chi_scaling is False:
plt.imshow((sci_image-res_iapetus)/np.sqrt(var_image),
origin='lower', cmap='bwr', vmax=5, vmin=-5)
else:
max_chi = np.max(np.abs((sci_image-res_iapetus)/np.sqrt(var_image))) * 0.75
plt.imshow((sci_image-res_iapetus)/np.sqrt(var_image), origin='lower',
cmap='bwr', vmax=-max_chi, vmin=max_chi)
plt.plot(np.ones(len(sci_image))*(left_limit), np.array(range(len(sci_image))), '--', color='black')
plt.plot(np.ones(len(sci_image))*(right_limit), np.array(range(len(sci_image))), '--', color='black')
plt.colorbar(fraction=0.046, pad=0.04)
plt.title('chi map')
plt.tight_layout(pad=0.0, w_pad=1.8, h_pad=-10.0)
if show_flux_mask is True:
plt.imshow(flux_mask, alpha=0.55, origin='lower')
if show_impact_pixels_mask is True:
plt.imshow(mask_most_impactful_pixels, alpha=0.55, cmap='magma', origin='lower')
chi2_max_reduced = np.sum((res_iapetus)**2/((var_image.shape[0]*var_image.shape[1])*var_image))
chi2_reduced = np.sum((res_iapetus-sci_image)**2/((var_image.shape[0]*var_image.shape[1])*var_image))
chi_max_reduced = np.sum(np.abs(res_iapetus)**1/((var_image.shape[0]*var_image.shape[1]) *
np.sqrt(var_image)))
chi_reduced = np.sum(np.abs(res_iapetus-sci_image)**1/((var_image.shape[0]*var_image.shape[1]) *
np.sqrt(var_image)))
print('---------------------')
print('chi**2 max reduced is: '+str(chi2_max_reduced))
print('chi**2 reduced is: '+str(chi2_reduced) + ' for log improvement: ' +
str(np.log10(chi2_reduced/chi2_max_reduced)))
print('chi max reduced is: '+str(chi_max_reduced))
print('chi reduced is: '+str(chi_reduced) + ' for log improvement: ' +
str(np.log10(chi_reduced/chi_max_reduced)))
print('---------------------')
if custom_mask is None:
pass
else:
custom_mask = ~custom_mask.astype('bool')
print('chi**2 reduced within custom mask area is: ' +
str(np.mean((res_iapetus[custom_mask]-sci_image[custom_mask])**2/(var_image[custom_mask]))))
chi2_max_flux_reduced = np.sum((res_iapetus[flux_mask])**2/(len(var_image[flux_mask]) *
var_image[flux_mask]))
chi2_flux_reduced = np.mean((res_iapetus[flux_mask]-sci_image[flux_mask])**2/(var_image[flux_mask]))
chi_max_flux_reduced = np.sum(np.abs(res_iapetus[flux_mask])**1/(len(var_image[flux_mask]) *
np.sqrt(var_image)[flux_mask]))
chi_flux_reduced = np.mean(np.abs(res_iapetus[flux_mask] -
sci_image[flux_mask])**1/np.sqrt(var_image[flux_mask]))
print('---------------------')
print('chi**2 max reduced within flux mask area is: ' +
str(chi2_max_flux_reduced))
print('chi**2 reduced within flux mask area is: ' +
str(chi2_flux_reduced) + ' for log improvement: ' +
str(np.log10(chi2_flux_reduced/chi2_max_flux_reduced)))
print('chi max reduced within flux mask area is: ' +
str(chi_max_flux_reduced))
print('chi reduced within flux mask area is: ' +
str(chi_flux_reduced) + ' for log improvement: ' +
str(np.log10(chi_flux_reduced/chi_max_flux_reduced)))
print('---------------------')
print('Abs of residual divided by total flux is: ' +
str(np.sum(np.abs((res_iapetus-sci_image)))/np.sum((res_iapetus))))
print('Abs of residual divided by largest value of a flux in the image is: ' +
str(np.max(np.abs((res_iapetus-sci_image)/np.max(res_iapetus)))))
if save is not False:
plt.savefig('/Users/nevencaplar/Documents/PFS/Images/Jan2921/Spot_figures/spot_'+save)
plt.clf()
def create_basic_comparison_plot_log(self,custom_model_image=None,custom_mask=None,custom_sci_image=None,custom_var_image=None,use_max_chi_scaling=False,\
show_flux_mask=False):
if custom_model_image is None:
optPsf_cut_fiber_convolved_downsampled=np.load(TESTING_FINAL_IMAGES_FOLDER+'optPsf_cut_fiber_convolved_downsampled.npy')
res_iapetus=optPsf_cut_fiber_convolved_downsampled
else:
res_iapetus=custom_model_image
if custom_sci_image is None:
sci_image=self.sci_image
else:
sci_image=custom_sci_image
if custom_var_image is None:
var_image=self.var_image
else:
var_image=custom_var_image
mean_value_of_background_via_var=np.mean([np.median(var_image[0]),np.median(var_image[-1]),\
np.median(var_image[:,0]),np.median(var_image[:,-1])])*3
mean_value_of_background_via_sci=np.mean([np.median(sci_image[0]),np.median(sci_image[-1]),\
np.median(sci_image[:,0]),np.median(sci_image[:,-1])])*3
mean_value_of_background=np.max([mean_value_of_background_via_var,mean_value_of_background_via_sci])
flux_mask=sci_image>(mean_value_of_background)
size=sci_image.shape[0]
if size==40:
dithering=2
else:
dithering=1
if size==20:
x_center=find_centroid_of_flux(res_iapetus)[0]
else:
x_center=(size/2)
left_limit=np.round(x_center-3.5)+0.5
right_limit=np.round(x_center+3.5)-0.5
chi2_image=(sci_image-res_iapetus)**2/((1)*var_image)
plt.figure(figsize=(20,20))
plt.subplot(221)
plt.imshow(res_iapetus,origin='lower',vmin=1,vmax=np.max(np.abs(sci_image)),norm=LogNorm())
plt.plot(np.ones(len(sci_image))*(left_limit),np.array(range(len(sci_image))),'--',color='white')
plt.plot(np.ones(len(sci_image))*(right_limit),np.array(range(len(sci_image))),'--',color='white')
cbar=plt.colorbar(fraction=0.046, pad=0.04)
cbar.set_ticks([10,10**2,10**3,10**4,10**5])
plt.title('Model')
if show_flux_mask==True:
plt.imshow(flux_mask,alpha=0.55)
plt.grid(False)
plt.subplot(222)
plt.imshow(sci_image,origin='lower',vmin=1,vmax=np.max(np.abs(sci_image)),norm=LogNorm())
plt.plot(np.ones(len(sci_image))*(left_limit),np.array(range(len(sci_image))),'--',color='white')
plt.plot(np.ones(len(sci_image))*(right_limit),np.array(range(len(sci_image))),'--',color='white')
cbar=plt.colorbar(fraction=0.046, pad=0.04)
cbar.set_ticks([10,10**2,10**3,10**4,10**5])
plt.title('Data')
if show_flux_mask==True:
plt.imshow(flux_mask,alpha=0.55)
plt.grid(False)
plt.subplot(223)
plt.imshow(np.abs(sci_image-res_iapetus),origin='lower',vmax=np.max(np.abs(sci_image))/20,norm=LogNorm())
plt.plot(np.ones(len(sci_image))*(left_limit),np.array(range(len(sci_image))),'--',color='white')
plt.plot(np.ones(len(sci_image))*(right_limit),np.array(range(len(sci_image))),'--',color='white')
cbar=plt.colorbar(fraction=0.046, pad=0.04)
cbar.set_ticks([10,10**2,10**3,10**4,10**5])
plt.title('abs(Residual ( data-model))')
if show_flux_mask==True:
plt.imshow(flux_mask,alpha=0.55)
plt.grid(False)
plt.subplot(224)
plt.imshow(chi2_image,origin='lower',vmin=1,norm=LogNorm())
plt.plot(np.ones(len(sci_image))*(left_limit),np.array(range(len(sci_image))),'--',color='white')
plt.plot(np.ones(len(sci_image))*(right_limit),np.array(range(len(sci_image))),'--',color='white')
cbar=plt.colorbar(fraction=0.046, pad=0.04)
cbar.set_ticks([10,10**2,10**3,10**4,10**5])
if show_flux_mask==True:
plt.imshow(flux_mask,alpha=0.55)
plt.title('chi**2 map')
print('chi**2 max reduced is: '+str(np.sum((res_iapetus)**2/((var_image.shape[0]*var_image.shape[1])*var_image))))
np.sum(np.abs((res_iapetus-sci_image)))/np.sum((res_iapetus))
plt.tight_layout(pad=0.0, w_pad=1.8, h_pad=-7.0)
print('chi**2 reduced is: '+str(np.sum((res_iapetus-sci_image)**2/((var_image.shape[0]*var_image.shape[1])*var_image))))
print('Abs of residual divided by total flux is: '+str(np.sum(np.abs((res_iapetus-sci_image)))/np.sum((res_iapetus))))
print('Abs of residual divided by largest value of a flux in the image is: '+str(np.max(np.abs((res_iapetus-sci_image)/np.max(res_iapetus)))))
def create_basic_comparison_plot_log_artifical(self,custom_model_image=None,custom_mask=None,custom_sci_image=None,custom_var_image=None,use_max_chi_scaling=False):
# need to update for multivar
if custom_model_image is None:
optPsf_cut_fiber_convolved_downsampled=np.load(TESTING_FINAL_IMAGES_FOLDER+'optPsf_cut_fiber_convolved_downsampled.npy')
res_iapetus=optPsf_cut_fiber_convolved_downsampled
else:
res_iapetus=custom_model_image
noise=self.create_artificial_noise(custom_model_image=custom_model_image,custom_var_image=custom_var_image)
if custom_sci_image is None:
sci_image=self.sci_image
else:
sci_image=custom_sci_image
if custom_var_image is None:
var_image=self.var_image
else:
var_image=custom_var_image
size=sci_image.shape[0]
if size==40:
dithering=2
else:
dithering=1
if size==20:
x_center=find_centroid_of_flux(res_iapetus)[0]
else:
x_center=(size/2)
left_limit=np.round(x_center-3.5)+0.5
right_limit=np.round(x_center+3.5)-0.5
plt.figure(figsize=(20,20))
plt.subplot(221)
plt.imshow(res_iapetus+noise,origin='lower',vmin=1,vmax=np.max(np.abs(sci_image)),norm=LogNorm())
plt.plot(np.ones(len(sci_image))*(left_limit),np.array(range(len(sci_image))),'--',color='white')
plt.plot(np.ones(len(sci_image))*(right_limit),np.array(range(len(sci_image))),'--',color='white')
cbar=plt.colorbar(fraction=0.046, pad=0.04)
cbar.set_ticks([10,10**2,10**3,10**4,10**5])
plt.title('Model with artifical noise')
plt.grid(False)
plt.subplot(222)
plt.imshow(sci_image,origin='lower',vmin=1,vmax=np.max(np.abs(sci_image)),norm=LogNorm())
plt.plot(np.ones(len(sci_image))*(left_limit),np.array(range(len(sci_image))),'--',color='white')
plt.plot(np.ones(len(sci_image))*(right_limit),np.array(range(len(sci_image))),'--',color='white')
cbar=plt.colorbar(fraction=0.046, pad=0.04)
cbar.set_ticks([10,10**2,10**3,10**4,10**5])
plt.title('Data')
plt.grid(False)
plt.subplot(223)
plt.imshow(np.abs(res_iapetus-sci_image),origin='lower',vmax=np.max(np.abs(sci_image))/20,norm=LogNorm())
plt.plot(np.ones(len(sci_image))*(left_limit),np.array(range(len(sci_image))),'--',color='white')
plt.plot(np.ones(len(sci_image))*(right_limit),np.array(range(len(sci_image))),'--',color='white')
cbar=plt.colorbar(fraction=0.046, pad=0.04)
cbar.set_ticks([10,10**2,10**3,10**4,10**5])
plt.title('abs(Residual (model - data))')
plt.grid(False)
plt.subplot(224)
plt.imshow((res_iapetus-sci_image)**2/((1)*var_image),origin='lower',vmin=1,norm=LogNorm())
plt.plot(np.ones(len(sci_image))*(left_limit),np.array(range(len(sci_image))),'--',color='white')
plt.plot(np.ones(len(sci_image))*(right_limit),np.array(range(len(sci_image))),'--',color='white')
cbar=plt.colorbar(fraction=0.046, pad=0.04)
cbar.set_ticks([10,10**2,10**3,10**4,10**5])
plt.title('chi**2 map')
print(np.sum((res_iapetus-sci_image)**2/((var_image.shape[0]*var_image.shape[1])*var_image)))
np.sum(np.abs((res_iapetus-sci_image)))/np.sum((res_iapetus))
plt.tight_layout(pad=0.0, w_pad=1.8, h_pad=-7.0)
print('chi**2 reduced is: '+str(np.sum((res_iapetus-sci_image)**2/((var_image.shape[0]*var_image.shape[1])*var_image))))
print('Abs of residual divided by total flux is: '+str(np.sum(np.abs((res_iapetus-sci_image)))/np.sum((res_iapetus))))
print('Abs of residual divided by largest value of a flux in the image is: '+str(np.max(np.abs((res_iapetus-sci_image)/np.max(res_iapetus)))))
def create_artificial_noise(self, custom_model_image=None,custom_var_image=None):
if custom_var_image is None:
var_image=self.var_image
else:
var_image=custom_var_image
if custom_model_image is None:
optPsf_cut_fiber_convolved_downsampled=np.load(TESTING_FINAL_IMAGES_FOLDER+'optPsf_cut_fiber_convolved_downsampled.npy')
res_iapetus=optPsf_cut_fiber_convolved_downsampled
else:
res_iapetus=custom_model_image
artifical_noise=np.zeros_like(res_iapetus)
artifical_noise=np.array(artifical_noise)
for i in range(len(artifical_noise)):
for j in range(len(artifical_noise)):
artifical_noise[i,j]=np.random.randn()*np.sqrt(var_image[i,j]+40)
return artifical_noise
def create_cut_plots(self):
var_image=self.var_image
artifical_noise=self.create_artificial_noise()
sci_image=self.sci_image
optPsf_cut_fiber_convolved_downsampled=np.load(TESTING_FINAL_IMAGES_FOLDER+'optPsf_cut_fiber_convolved_downsampled.npy')
res_iapetus=optPsf_cut_fiber_convolved_downsampled
mid_point_of_sci_image=int(sci_image.shape[0]/2)
plt.figure(figsize=(25,10))
plt.subplot(121)
plt.title('horizontal direction')
plt.plot(np.array(range(len(res_iapetus))),np.log10(res_iapetus[mid_point_of_sci_image]),'blue',linestyle='--',label='model')
plt.plot(np.array(range(len(res_iapetus))),np.log10(np.abs(sci_image[mid_point_of_sci_image])),'orange',linestyle='--',label='data')
plt.plot(np.array(range(len(res_iapetus))),np.ones(len(res_iapetus))*np.log10(
|
np.max(sci_image[:,mid_point_of_sci_image])
|
numpy.max
|
# -*- coding: utf-8 -*-
"""
Functions for CHX transfocator system:
physical setup of transfocator system -> trans_setup
calculate optimal settings for desired image position and energy -> calc_transsetup
calculate current image position -> get_ip
by LW 03/13/2016
"""
import numpy as np
from chxtools import xfuncs as xf
from scipy.optimize import fminbound
from epics import *
def CRL_focalpoint(energy,lens_configuration):
"""
CRL_focalpoint(energy,lens_configuration):
lens_confguration is a dictionary of the form
[lens_radius1:numer lenses, lens_radius2: number_lenses, ...]
returns the focal length
"""
focal_distance=0.0
return focal_distance
def trans_setup():
"""
defining physical configuration of CHX transfocator system
call: trans_setup()
returns a dictionary: {'lens_material':lens_mat,'lens_number':lens_N,'lens_radius':lens_R,'trans_position':trans_pos}
dependencies: none
by LW 03/13/2016
"""
# slot7 slot6 slot5 slot4 slot3 slot2 slot1 <------ beam direction (slot 8 is currently B-fiber only)
# Be Be Be Be Be Be Be lens material
# 1.5 1.5 0.5 0.5 0.5 0.5 0.5 lens radius [mm]
# 1 1 5 8 4 2 1 number of lenses
lens_R=[0.5,0.5,0.5,0.5,0.5,1.5,1.5]
lens_mat=['Be','Be','Be','Be','Be','Be','Be']
lens_N=[1,2,4,8,5,1,1]
trans_pos=[35.2,35.8]
return {'lens_material':lens_mat,'lens_number':lens_N,'lens_radius':lens_R,'trans_position':trans_pos}
def calc_transsetup(image_pos,E='auto',silent=False):
"""
calc_transsetup(image_pos,E='auto'): funtion to calculate CHX transfocator settings
required arument:
image_pos: image position [m from source]
optional arguments:
E='auto': get beamline energy from PV (currently: DCM) | E=7894: manual overwrite of energy parameter
silent='False': silence the output (e.g. to use function in a loop): False/True
Note: 'virtual images' are discarded in the search
Note: search is implemented to go through the lens stacks with large numbers of lenses first
-> returned solution will be e.g. using lens stack #5 with 5 lenses, instead of #1 (1) and #3 (4)
function returns dictionary of the form: {'lens_config':bin_index,'z_position':zmin}
dependencies: imports numpy, fminbound (scipy.optimze) and xfuncs
calls trans_setup() to get physical configuration of transfocator system
by LW 03/14/2016
"""
image_pos=
|
np.array(image_pos)
|
numpy.array
|
"""Functions to deal with catalogs."""
import os
import copy
import distutils.spawn
from shutil import copyfile
import numpy as np
import healpy as hp
from astropy.wcs import WCS
from astropy.table import Table, Column, vstack
__all__ = ["remove_is_null", "moments_to_shape", "filter_through_bright_star_mask",
"ReferenceCatalog", "PS1_PATTERN"]
PS1_PATTERN = 'ps1-{:05d}.fits'
def remove_is_null(table, output=None, verbose=True, string='isnull', return_data=True):
"""
Remove the xxx_isnull columns from the catalog.
This is an annoying issue with FITS table from HSC database.
Parameters
----------
table : str or astropy.table object
Name of the FITS catalog or the data itself
cat_hdu : int, optional
The HDU of the catalog data.
Default: 1
string : str, optional
The name of the Null columns.
Default: 'isnull'
output : str, optional
If output is None, will write a new FITS table with '_clean'
suffix.
Default: None
return_data : bool, optional
Whether return the cleaned data.
Default : True
verbose : bool, optional
Default : True
"""
if isinstance(table, Table):
data = table
else:
if not os.path.isfile(table):
raise Exception("# Can not find catalog: %s" % table)
data = Table.read(table, format='fits')
if verbose:
print("Reading the data....")
col_names = data.colnames
col_remove = [col for col in col_names if string in col]
data.remove_columns(col_remove)
if output is None:
if verbose:
print("Saving data to %s ..." % output)
data.write(table, format='fits', overwrite=True)
else:
data.write(output.strip(), format='fits', overwrite=True)
if return_data:
return data
else:
return None
def moments_to_shape(catalog, shape_type='i_sdss_shape', axis_ratio=False,
radian=False, update=True, to_pixel=False):
"""
Convert the 2nd moments into elliptical shape: radius, ellipticity, position angle.
Adopted from `unagi.catalog`:
https://github.com/dr-guangtou/unagi/blob/master/unagi/catalog.py
Parameters
----------
catalog : Table data
"""
try:
xx = catalog["{}_11".format(shape_type)]
yy = catalog["{}_22".format(shape_type)]
xy = catalog["{}_12".format(shape_type)]
except KeyError:
print("Wrong column name!")
raise
e1 = (xx - yy) / (xx + yy)
e2 = (2.0 * xy / (xx + yy))
# Get the r50 or determinant radius
rad = np.sqrt(xx + yy)
rad = rad / 0.168 if to_pixel else rad
# Ellipticity or axis ratio
ell = np.sqrt(e1 ** 2.0 + e2 ** 2.0)
ell = 1.0 - ell if axis_ratio else ell
# Position angle in degree or radian
theta = (-0.5 * np.arctan2(e2, e1))
theta = (theta * 180. / np.pi) if not radian else theta
if update:
rad_col = "{}_r".format(shape_type)
theta_col = "{}_theta".format(shape_type)
if axis_ratio:
ell_col = "{}_ba".format(shape_type)
else:
ell_col = "{}_e".format(shape_type)
if rad_col in catalog.colnames:
catalog.remove_column(rad_col)
catalog.add_column(Column(data=rad, name=rad_col))
if ell_col in catalog.colnames:
catalog.remove_column(ell_col)
ell = np.asarray(ell)
catalog.add_column(Column(data=ell, name=ell_col))
if theta_col in catalog.colnames:
catalog.remove_column(theta_col)
theta = np.asarray(theta)
catalog.add_column(Column(data=theta, name=theta_col))
return catalog
return rad, ell, theta
def filter_through_bright_star_mask(catalog, mask_dir, reg_prefix='new_S18Amask',
filters='grizy', filter_type='outside',
ra='ra', dec='dec', output_suffix='bsm'):
"""Filter the catalog through the .reg files of the bright star masks."""
# Make the sure venice is installed
venice = distutils.spawn.find_executable("venice")
assert venice, "Venice is not installed!"
# Get the .reg files for the bright star mask
reg_files = [
os.path.join(mask_dir, reg_prefix + '_' + band + '.reg') for band in filters]
# Output catalog
output_catalogs = [
catalog.replace('.fits', '_bsm_' + band + '.fits') for band in filters]
output_final = catalog.replace('.fits', '_%s.fits' % output_suffix)
# Generate the commands
for ii, reg_mask in enumerate(reg_files):
if ii == 0:
venice_command = (
venice + ' -m ' + reg_mask + ' -f ' + filter_type + ' -cat ' + catalog +
' -xcol ' + ra + ' -ycol ' + dec + ' -o ' + output_catalogs[0]
)
else:
venice_command = (
venice + ' -m ' + reg_mask + ' -f ' + filter_type + ' -cat ' +
output_catalogs[ii - 1] + ' -xcol ' + ra + ' -ycol ' + dec +
' -o ' + output_catalogs[ii]
)
# Execute the command
_ = os.system(venice_command)
# Copy the last catalog to the final name
if not os.path.isfile(output_catalogs[-1]):
raise Exception("# Something is wrong with the Venice!")
else:
_ = copyfile(output_catalogs[-1], output_final)
# Delete the intermediate catalogs
for output in output_catalogs:
try:
os.remove(output)
except OSError:
pass
return Table.read(output_final)
def add_chunk_id(catalog):
"""Assign chunk ID based on HSC Tract/Patch."""
chunks = []
for obj in catalog:
tract = '{:5d}'.format(obj['tract'])
patch = '{0:03d}'.format(obj['patch'])
chunks.append('_'.join([tract, patch[0], patch[2]]))
catalog.add_column(Column(data=chunks, name='chunk_id'))
return catalog
class ReferenceCatalog():
"""
Photometric or astrometric reference catalog.
"""
def __init__(self, fits_dir, fits_pattern, nside=32, indexing='ring'):
"""
fits_pattern: string formatter with key "hp", e.g., 'dir/fn-%(hp)05i.fits'
"""
self.fits_dir = fits_dir
self.fits_pattern = os.path.join(fits_dir, fits_pattern)
self.nside = nside
self.indexing = indexing
def get_catalogs(self, pixels):
"""
Get the reference star catalogs.
"""
ref_cats = []
for pix in pixels:
ref_cats.append(Table.read(self.fits_pattern.format(pix)))
return vstack(ref_cats)
def get_hp_pixles(self, ra_arr, dec_arr):
"""
Get the healpix pixels that cover the (RA, Dec) ranges.
"""
hp_pixels = set()
# TODO: This is only for ring indexing
for rr, dd in zip(ra_arr, dec_arr):
hp_pixels.add(
hp.pixelfunc.ang2pix(self.nside, rr, dd, lonlat=True, nest=False))
return hp_pixels
def get_stars_radec(self, ra_range, dec_range, step=100, margin=0.1):
"""
Get the reference stars within a (RA, Dec) range.
"""
ra_min, ra_max = np.min(ra_range) - margin,
|
np.max(ra_range)
|
numpy.max
|
import vtk
from vtk.util import numpy_support
import numpy as np
import argparse
from netCDF4 import Dataset
import os
def convert_file(input, output):
reader = vtk.vtkXMLImageDataReader()
reader.SetFileName(args.input)
reader.Update()
imgdata = reader.GetOutput()
dims = imgdata.GetDimensions()
bounds = imgdata.GetBounds()
with Dataset(args.output, 'w') as nc:
nc.createDimension('xdim', dims[0])
nc.createDimension('ydim', dims[1])
nc.createDimension('zdim', dims[2])
nc.spatial_min = np.array([bounds[0], bounds[2], bounds[4]], dtype=np.float32)
nc.spatial_max = np.array([bounds[1], bounds[3], bounds[5]], dtype=np.float32)
nc.time_domain = np.array([0.0, 1.0], dtype=np.float32)
for sidx in range(0, imgdata.GetScalarSize()):
array = imgdata.GetPointData().GetArray(sidx)
if array == None:
continue
var = array.GetName()
field = numpy_support.vtk_to_numpy(array).reshape((dims[2], dims[1], dims[0]))
del reader
nc.createVariable(var, field.dtype, ('zdim', 'ydim', 'xdim'))
if field.dtype != np.uint8:
min = np.min(field)
max = np.max(field)
nc[var][:] = (field - min) / (max - min)
else:
for z in range(0, dims[2]):
nc[var][z, :] = field[z, :]
print(z)
def convert_dir(input_dir, output):
files = sorted(os.listdir(input_dir))[0:50:5]
reader = vtk.vtkXMLImageDataReader()
reader.SetFileName(input_dir + '/' + files[0])
reader.Update()
imgdata = reader.GetOutput()
dims = imgdata.GetDimensions()
bounds = imgdata.GetBounds()
scalar_ids = []
minima = []
maxima = []
with Dataset(output, 'w') as nc:
nc.createDimension('xdim', dims[0])
nc.createDimension('ydim', dims[1])
nc.createDimension('zdim', dims[2])
nc.createDimension('tdim', len(files))
nc.spatial_min = np.array([bounds[0], bounds[2], bounds[4]], dtype=np.float32)
nc.spatial_max = np.array([bounds[1], bounds[3], bounds[5]], dtype=np.float32)
nc.time_domain = np.array([0.0, len(files)], dtype=np.float32)
for sidx in range(0, imgdata.GetScalarSize()):
array = imgdata.GetPointData().GetArray(sidx)
if array == None or array.GetDataType() != vtk.VTK_FLOAT:
continue
name = array.GetName()
nc.createVariable(name, np.float32, ('tdim', 'zdim', 'ydim', 'xdim'))
field = numpy_support.vtk_to_numpy(array).reshape((dims[2], dims[1], dims[0]))
minima.append(np.min(field))
maxima.append(np.max(field))
scalar_ids.append(sidx)
print('Finding global min/max')
for t, f in enumerate(files):
reader.SetFileName(input_dir + '/' + f)
reader.Update()
imgdata = reader.GetOutput()
for i, sidx in enumerate(scalar_ids):
field = numpy_support.vtk_to_numpy(imgdata.GetPointData().GetArray(sidx)).reshape((dims[2], dims[1], dims[0]))
minima[i] = np.minimum(minima[i], (
|
np.min(field)
|
numpy.min
|
import torch
import numpy as np
from torch.autograd import Function, Variable
from torch.nn import Module
from torch.nn.parameter import Parameter
import operator
def jacobian(f, x, eps):
if x.ndimension() == 2:
assert x.size(0) == 1
x = x.squeeze()
e = Variable(torch.eye(len(x)).type_as(get_data_maybe(x)))
J = []
for i in range(len(x)):
J.append((f(x + eps*e[i]) - f(x - eps*e[i]))/(2.*eps))
J = torch.stack(J).transpose(0,1).transpose(1,2)
return J
def expandParam(X, n_batch, nDim):
if X.ndimension() in (0, nDim):
return X, False
elif X.ndimension() == nDim - 1:
return X.unsqueeze(0).expand(*([n_batch] + list(X.size()))), True
else:
raise RuntimeError("Unexpected number of dimensions.")
def bdiag(d):
assert d.ndimension() == 2
nBatch, sz = d.size()
dtype = d.type() if not isinstance(d, Variable) else d.data.type()
D = torch.zeros(nBatch, sz, sz).type(dtype)
I = torch.eye(sz).repeat(nBatch, 1, 1).type(dtype).byte()
D[I] = d.view(-1)
return D
def bger(x, y):
return np.matmul(np.expand_dims(x, 2), np.expand_dims(y, 1))
def bmv(X, y):
return np.matmul(X,
|
np.expand_dims(y, 2)
|
numpy.expand_dims
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 17 18:21:36 2020
@author: lukepinkel
"""
import tqdm # analysis:ignore
import patsy # analysis:ignore
import numpy as np # analysis:ignore
import scipy as sp # analysis:ignore
import scipy.stats # analysis:ignore
import pandas as pd # analysis:ignore
from ..utilities.linalg_operations import wdiag_outer_prod, diag_outer_prod
class OLS:
def __init__(self, formula=None, data=None, X=None, y=None):
if formula is not None and data is not None:
y, X = patsy.dmatrices(formula, data=data, return_type='dataframe')
xcols, xinds = X.columns, X.index
ycols, yinds = y.columns, y.index
X, y = X.values, y.values[:, 0]
elif X is not None and y is not None:
if type(X) not in [pd.DataFrame, pd.Series]:
xcols = [f'x{i}' for i in range(1, X.shape[1]+1)]
xinds = np.arange(X.shape[0])
else:
xcols, xinds = X.columns, X.index
X = X.values
if type(y) not in [pd.DataFrame, pd.Series]:
ycols = ['y']
yinds = np.arange(y.shape[0])
else:
ycols, yinds = y.columns, y.index
y = y.values
if X.ndim==1:
X = X[:, None]
self.X, self.y = X, y
self.xcols, self.xinds = xcols, xinds
self.ycols, self.yinds = ycols, yinds
self.G = X.T.dot(X)
self.L = np.linalg.cholesky(self.G)
self.Linv = np.linalg.inv(self.L)
self.n, self.p = X.shape[0], X.shape[1]
self.ymean = self.y.mean()
self.formula = formula
self.data = data
U, S, Vt= np.linalg.svd(X, full_matrices=False)
self.U, self.S, self.V = U, S, Vt.T
self.h = diag_outer_prod(U, U)
self.W = (self.V / self.S).dot(self.U.T)
self.vif = 1.0/np.diag((self.V * 1.0 / self.S**2).dot(self.V.T))
self.condition_indices = np.max(self.S**2) / self.S**2
self.vdp = np.sum(self.V**2, axis=0) / self.S**2
self.cond = np.vstack((self.vif, self.condition_indices, self.vdp)).T
self.cond = pd.DataFrame(self.cond, index=self.xcols,
columns=["VIF", "Cond", "VDP"])
def _fit_mats(self, X, y):
n, p = X.shape
G = X.T.dot(X)
c = X.T.dot(y)
L = np.linalg.cholesky(G)
w = sp.linalg.solve_triangular(L, c, lower=True)
s2 = (y.T.dot(y) - w.T.dot(w)) / (n - p)
beta = sp.linalg.solve_triangular(L.T, w, lower=False)
Linv = np.linalg.inv(L)
Ginv = np.diag(np.dot(Linv.T, Linv))
beta_se = s2 * Ginv
return beta, np.sqrt(beta_se)
def _fit_y(self, L, Linv, X, y):
n, p = X.shape
c = X.T.dot(y)
w = sp.linalg.solve_triangular(L, c, lower=True)
s2 = (y.T.dot(y) - w.T.dot(w)) / (n - p)
beta = sp.linalg.solve_triangular(L.T, w, lower=False)
Ginv = np.diag(np.dot(Linv.T, Linv))
beta_se = s2 * Ginv
return beta, np.sqrt(beta_se)
def fit(self, var_beta=None):
beta, beta_se = self._fit_mats(self.X, self.y)
self.beta = beta
yhat = self.X.dot(beta)
resids = self.y - yhat
if var_beta is not None:
if type(var_beta) is str:
var_beta = [var_beta]
rse = {}
for vb in var_beta:
_, _, rse[vb] = self.robust_rcov(vb, resids=resids, W=self.W)
else:
rse = None
res = pd.DataFrame(np.vstack((beta, beta_se)).T, index=self.xcols, columns=['beta', 'SE'])
if rse is not None:
for key, val in rse.items():
res["SE_"+key] = val
res['t'] = res['beta'] / res['SE']
if rse is not None:
for key, val in rse.items():
res["t_"+key] = res['beta'] / res["SE_"+key]
res['p'] = sp.stats.t(self.n-self.p).sf(np.abs(res['t']))*2.0
if rse is not None:
for key, val in rse.items():
res["p_"+key] = sp.stats.t(self.n-self.p).sf(np.abs(res['t_'+key]))*2.0
dfr = self.n - self.p - 1
dfm = self.p
dft = self.n - 1
ssr = np.sum(resids**2)
ssm = np.sum((yhat - self.ymean)**2)
sst = np.sum((self.y - self.ymean)**2)
msr = ssr / dfr
msm = ssm / dfm
mst = sst / dft
rsquared = 1.0 - ssr / sst
rsquared_adj = 1.0 - (msr / mst)
fvalue = msm / msr
fpval = sp.stats.f(dfm, dfr).sf(fvalue)
self.sumstats = pd.DataFrame([[rsquared, '-'],
[rsquared_adj, '-'],
[fvalue, fpval]])
self.sumstats.index =['R2', 'R2 Adj', 'F test']
self.sumstats.columns = ['Statistic', 'P']
ssq_ind = ['Residuals','Model',' Total']
ssq_col = ['Sum Square', 'Degrees of Freedom', 'Mean Square']
self.ssq = pd.DataFrame([[ssr, dfr, msr],
[ssm, dfm, msm],
[sst, dft, mst]], index=ssq_ind, columns=ssq_col)
self.res = res
self.beta, self.beta_se = beta, beta_se
self.tvalues = self.beta / self.beta_se
self.Ginv = np.dot(self.Linv.T, self.Linv)
self.Vbeta = self.Ginv * msr
self.s2 = msr
self.resids = resids
def _permutation_test_store(self,n_perms, L, Linv, X, y, verbose):
pbar = tqdm.tqdm(total=n_perms) if verbose else None
t_samples = np.zeros((n_perms, self.p))
for i in range(n_perms):
b, se = self._fit_y(L, Linv, X, y[np.random.permutation(self.n)])
t_samples[i] = b / se
if verbose:
pbar.update(1)
if verbose:
pbar.close()
return t_samples
def _permutation_test(self, n_perms, L, Linv, X, y, verbose):
pbar = tqdm.tqdm(total=n_perms, smoothing=0.001) if verbose else None
p_values = np.zeros((self.p))
p_values_fwer = np.zeros((self.p))
abst = np.abs(self.tvalues)
for i in range(n_perms):
b, se = self._fit_y(L, Linv, X, y[np.random.permutation(self.n)])
abstp = np.abs(b / se)
p_values_fwer += (abstp.max()>abst) / n_perms
p_values += (abstp>abst) / n_perms
if verbose:
pbar.update(1)
if verbose:
pbar.close()
return p_values_fwer, p_values
def _freedman_lane(self, vars_of_interest, n_perms=5000, verbose=True):
pbar = tqdm.tqdm(total=n_perms, smoothing=0.001) if verbose else None
p_values = np.zeros(len(vars_of_interest))
p_values_fwer = np.zeros(len(vars_of_interest))
abst = np.abs(self.tvalues[vars_of_interest])
ixc = np.setdiff1d(np.arange(self.p), vars_of_interest)
Xc, y = self.X[:, ixc], self.y
g, _ = self._fit_mats(Xc, y)
u = Xc.dot(g)
r = y - u
for i in range(n_perms):
b, se = self._fit_y(self.L, self.Linv, self.X, u + r[np.random.permutation(self.n)])
abstp = np.abs(b / se)[vars_of_interest]
p_values_fwer += (abstp.max()>abst) / n_perms
p_values += (abstp>abst) / n_perms
if verbose:
pbar.update(1)
if verbose:
pbar.close()
return p_values_fwer, p_values
def freedman_lane(self, vars_of_interest=None, n_perms=5000, verbose=True):
if hasattr(self, 'res')==False:
self.fit()
if vars_of_interest is None:
vars_of_interest = np.arange(self.p)
if "Intercept" in self.res.index:
ii = np.ones(self.p).astype(bool)
ii[self.res.index.get_loc("Intercept")] = False
vars_of_interest = vars_of_interest[ii]
vars_of_interest = np.arange(self.p) if vars_of_interest is None else vars_of_interest
pvals_fwer, pvals = self._freedman_lane(vars_of_interest, n_perms, verbose)
rows = self.res.index[vars_of_interest]
self.res['freedman_lane_p'] = '-'
self.res['freedman_lane_p_fwer'] = '-'
self.res.loc[rows, 'freedman_lane_p'] = pvals
self.res.loc[rows, 'freedman_lane_p_fwer'] = pvals_fwer
def permutation_test(self, n_perms=5_000, store_samples=False, verbose=True):
if hasattr(self, 'res')==False:
self.fit()
L, Linv, X, y = self.L, self.Linv, self.X, self.y
if store_samples:
t_samples = self._permutation_test_store(n_perms, L, Linv, X, y, verbose)
abst = t_samples
p_values_fwer = (abst.max(axis=1)>np.abs(self.tvalues)).sum(axis=0)/n_perms
p_values = (abst > np.abs(self.tvalues)).sum(axis=0) / n_perms
else:
p_values_fwer, p_values = self._permutation_test(n_perms, L, Linv, X, y, verbose)
t_samples = None
self.permutation_t_samples = t_samples
self.res['permutation_p'] = p_values
self.res['permutation_p_fwer'] = p_values_fwer
def _bootstrap(self, n_boot):
pbar = tqdm.tqdm(total=n_boot, smoothing=0.001)
beta_samples = np.zeros((n_boot, self.p))
beta_se_samples = np.zeros((n_boot, self.p))
for i in range(n_boot):
ix = np.random.choice(self.n, self.n)
Xb, Yb = self.X[ix], self.y[ix]
beta_samples[i], beta_se_samples[i] = self._fit_mats(Xb, Yb)
pbar.update(1)
pbar.close()
return beta_samples, beta_se_samples
def bootstrap(self, n_boot=5_000):
if hasattr(self, 'res')==False:
self.fit()
self.beta_samples, self.beta_se_samples = self._bootstrap(n_boot)
self.res.insert(self.res.columns.get_loc("SE")+1, "SE_boot", self.beta_samples.std(axis=0))
self.res.insert(self.res.columns.get_loc("t")+1, "t_boot", self.res['beta']/self.res['SE_boot'])
self.res.insert(self.res.columns.get_loc("p")+1, "p_boot",
sp.stats.t(self.n-self.p).sf(np.abs(self.res['t_boot']))*2.0)
def print_results(self):
opt_cont = ('display.max_rows', None, 'display.max_columns', None,
'display.float_format', '{:.4f}'.format)
with pd.option_context(*opt_cont):
print(self.res)
def robust_rcov(self, kind=None, resids=None, Ginv=None, X=None, U=None,
W=None):
kind = "HC3" if kind is None else kind
if Ginv is None:
if hasattr(self, "Ginv"):
Ginv = self.Ginv
else:
Ginv = np.dot(self.Linv.T, self.Linv)
if resids is None:
if hasattr(self, "resids"):
resids = self.resids
else:
resids = self.y - self.X.dot(self.beta)
if U is None:
if X is None:
U, S, V, h = self.U, self.S, self.V, self.h
else:
U, S, Vt = np.linalg.svd(X, full_matrices=False)
V = Vt.T
h = diag_outer_prod(U, U)
X = self.X if X is None else X
n, p = X.shape
u = resids**2
if kind == "HC0":
omega = u
elif kind == "HC1":
omega = n / (n - p) * u
elif kind == "HC2":
omega = u / (1.0 - h)
elif kind == "HC3":
omega = u / (1 - h)**2
elif kind == "HC4":
omega = u / (1.0 - h)**np.minimum(4.0, h / np.mean(h))
if W is None:
W = (V / S).dot(U.T)
se = np.sqrt(diag_outer_prod(W*omega, W))
return W, omega, se
def get_influence(self, r=None, h=None):
h = self.h if h is None else h
r = self.resids if r is None else r
ssr = np.sum(r**2)
dfe = self.n - self.p - 1.0
msr = ssr / dfe
n = self.n
p = self.p
s = np.sqrt(msr)
s_i = np.sqrt((ssr-r**2) / (dfe-1))
r_students = r / (s_i * np.sqrt(1.0 - h))
dfbeta = r / (1.0 - h) * self.W
h_tilde = 1.0 / n + h
cov_ratio = (s_i / s)**(2 * p) * 1.0 / (1.0 - h_tilde)
d_residuals = r / (1.0 - h_tilde)
s_star_i = 1/np.sqrt(n-p-1)*np.sqrt((n-p)*s**2/(1-h_tilde)-d_residuals**2)
s_i = s_star_i * np.sqrt(1.0 - h_tilde)
studentized_d_residuals = d_residuals / s_star_i
dfits = r * np.sqrt(h) / (s_i * (1.0 -h))
cooks_distance = d_residuals**2 * h_tilde / (s**2 * (p + 1))
res = dict(r_students=r_students, cov_ratio=cov_ratio,
s_i=s_i, studentized_d_residuals=studentized_d_residuals,
dfits=dfits, cooks_distance=cooks_distance)
res = pd.DataFrame(res)
dfbeta = pd.DataFrame(dfbeta.T, columns=self.xcols)
res = pd.concat([res, dfbeta], axis=1)
res["leverage"] = h
return res
def loglike(self, params, X=None, y=None):
X = self.X if X is None else X
y = self.y if y is None else y
if len(params)==(self.p+1):
beta = params[:-1]
sigma = np.exp(params[-1])
else:
beta = params
r = y - X.dot(beta)
sigma = np.sqrt(
|
np.dot(r, r)
|
numpy.dot
|
import numpy as np
import scipy
import pandas as pd
import matplotlib.pyplot as plt
import math
from astropy.io import fits
from operator import sub
from scipy import optimize
from itertools import zip_longest
import matplotlib.animation as animation
#global variables
nchannels = 0
basis_funcs = []
def animate(img):
fig = plt.figure()
imlist = []
for i in range(len(em[0, 0, :])):
image = em[:,:,i]
im = plt.imshow(image, animated = True, cmap = 'jet', vmin=0, vmax=np.max(em))
imlist.append([im])
ani = animation.ArtistAnimation(fig, imlist, interval = 20, blit = True)
plt.show()
def response(resp, lgt):
suvi_labels = [r'$94\,\,\AA$',r'$131\,\,\AA$',r'$171\,\,\AA$',r'$195\,\,\AA$', r'$284\,\,\AA$', r'$304\,\,\AA$']
fig,ax = plt.subplots(figsize=(10, 8))
ax.plot(lgt, resp[0], linewidth=2, linestyle = '-', label = suvi_labels[0])
ax.plot(lgt, resp[2], linewidth=2, linestyle = '-', label = suvi_labels[2])
#set plot options
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim([4, 8])
ax.set_ylim([1e-33, 1e-26])
#labels
ax.set_xlabel(r'$Log{T}\,\,\mathrm{(K)}$', fontsize = 22)
ax.set_ylabel(r'Temperature Response $(\mathrm{DN}\,\mathrm{cm}^{-5}\,\mathrm{pix}^{-1})$', fontsize=22)
#legend
ax.legend(loc='best', fontsize=14)
plt.tight_layout()
plt.show()
def linear_alg(resp, lgtaxis):
global nchannels
global basis_funcs
np.set_printoptions(threshold = np.nan)
basis_sigmas = [0, 0.1, 0.2]
rnge = lgtaxis[1]-lgtaxis[0]
emunit = 1e27
basis_funcs = np.zeros((len(lgtaxis), len(basis_sigmas)*len(lgtaxis)))
for i in range(len(basis_sigmas)):
if basis_sigmas[i] == 0:
np.fill_diagonal(basis_funcs, 1.0)
else:
ext = list(map(lambda x: x*rnge, range(-25, 25)))
line = list(map(lambda x: math.exp(x/basis_sigmas[i])**2, ext))
cut = np.argwhere(np.array(line) < 0.04)
for j in cut:
line[j[0]] = 0.0
norm = sum(line)
for j in range(len(lgtaxis)):
line = list(map(lambda x: math.exp(-((x-lgtaxis[j])/basis_sigmas[i])**2.0), lgtaxis))
cut = np.argwhere(np.array(line) < 0.04)
for k in cut:
line[k[0]] = 0.0
basis_funcs[0:len(lgtaxis), len(lgtaxis)*i + j] = line
if len(resp) > 0:
nchannels = resp[0].size
Dict = np.matmul(np.matrix.transpose(basis_funcs), resp)
Dict *= 1e26
return(Dict)
return
def DEM_solve(img, Dict, lgtaxis, tolfac, locations):
global basis_funcs
eps = 1e-3
relax = 1
symmbuff = 1.0
adaptive_tolfac = 1
dim = np.array(img).shape
print(dim)
NOCOUNTS = np.where(np.array(np.array(img).sum(axis=0)) < 10*eps) #flagged
ntemp = len(Dict[:,0])
coeffs =
|
np.zeros((dim[2], dim[1], ntemp))
|
numpy.zeros
|
# Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at
# the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights
# reserved. See files LICENSE and NOTICE for details.
#
# This file is part of CEED, a collection of benchmarks, miniapps, software
# libraries and APIs for efficient high-order finite element and spectral
# element discretizations for exascale applications. For more information and
# source code availability see http://github.com/ceed.
#
# The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
# a collaborative effort of two U.S. Department of Energy organizations (Office
# of Science and the National Nuclear Security Administration) responsible for
# the planning and preparation of a capable exascale ecosystem, including
# software, applications, hardware, advanced system engineering and early
# testbed platforms, in support of the nation's exascale computing imperative.
# @file
# Test Ceed Operator functionality
import os
import libceed
import numpy as np
import check
import buildmats as bm
TOL = libceed.EPSILON * 256
# -------------------------------------------------------------------------------
# Utility
# -------------------------------------------------------------------------------
def load_qfs_so():
from distutils.sysconfig import get_config_var
import ctypes
file_dir = os.path.dirname(os.path.abspath(__file__))
qfs_so = os.path.join(
file_dir,
"libceed_qfunctions" + get_config_var("EXT_SUFFIX"))
# Load library
return ctypes.cdll.LoadLibrary(qfs_so)
# -------------------------------------------------------------------------------
# Test creation, action, and destruction for mass matrix operator
# -------------------------------------------------------------------------------
def test_500(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
nelem = 15
p = 5
q = 8
nx = nelem + 1
nu = nelem * (p - 1) + 1
# Vectors
x = ceed.Vector(nx)
x_array = np.zeros(nx)
for i in range(nx):
x_array[i] = i / (nx - 1.0)
x.set_array(x_array, cmode=libceed.USE_POINTER)
qdata = ceed.Vector(nelem * q)
u = ceed.Vector(nu)
v = ceed.Vector(nu)
# Restrictions
indx = np.zeros(nx * 2, dtype="int32")
for i in range(nx):
indx[2 * i + 0] = i
indx[2 * i + 1] = i + 1
rx = ceed.ElemRestriction(nelem, 2, 1, 1, nx, indx,
cmode=libceed.USE_POINTER)
indu = np.zeros(nelem * p, dtype="int32")
for i in range(nelem):
for j in range(p):
indu[p * i + j] = i * (p - 1) + j
ru = ceed.ElemRestriction(nelem, p, 1, 1, nu, indu,
cmode=libceed.USE_POINTER)
strides = np.array([1, q, q], dtype="int32")
rui = ceed.StridedElemRestriction(nelem, q, 1, q * nelem, strides)
# Bases
bx = ceed.BasisTensorH1Lagrange(1, 1, 2, q, libceed.GAUSS)
bu = ceed.BasisTensorH1Lagrange(1, 1, p, q, libceed.GAUSS)
# QFunctions
file_dir = os.path.dirname(os.path.abspath(__file__))
qfs = load_qfs_so()
qf_setup = ceed.QFunction(1, qfs.setup_mass,
os.path.join(file_dir, "test-qfunctions.h:setup_mass"))
qf_setup.add_input("weights", 1, libceed.EVAL_WEIGHT)
qf_setup.add_input("dx", 1, libceed.EVAL_GRAD)
qf_setup.add_output("rho", 1, libceed.EVAL_NONE)
qf_mass = ceed.QFunction(1, qfs.apply_mass,
os.path.join(file_dir, "test-qfunctions.h:apply_mass"))
qf_mass.add_input("rho", 1, libceed.EVAL_NONE)
qf_mass.add_input("u", 1, libceed.EVAL_INTERP)
qf_mass.add_output("v", 1, libceed.EVAL_INTERP)
# Operators
op_setup = ceed.Operator(qf_setup)
op_setup.set_field("weights", libceed.ELEMRESTRICTION_NONE, bx,
libceed.VECTOR_NONE)
op_setup.set_field("dx", rx, bx, libceed.VECTOR_ACTIVE)
op_setup.set_field("rho", rui, libceed.BASIS_COLLOCATED,
libceed.VECTOR_ACTIVE)
op_setup.check()
op_mass = ceed.Operator(qf_mass)
op_mass.set_field("rho", rui, libceed.BASIS_COLLOCATED, qdata)
op_mass.set_field("u", ru, bu, libceed.VECTOR_ACTIVE)
op_mass.set_field("v", ru, bu, libceed.VECTOR_ACTIVE)
op_mass.check()
# Setup
op_setup.apply(x, qdata)
# Apply mass matrix
u.set_value(0)
op_mass.apply(u, v)
# Check
with v.array_read() as v_array:
for i in range(q):
assert abs(v_array[i]) < TOL
# -------------------------------------------------------------------------------
# Test creation, action, and destruction for mass matrix operator
# -------------------------------------------------------------------------------
def test_501(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
nelem = 15
p = 5
q = 8
nx = nelem + 1
nu = nelem * (p - 1) + 1
# Vectors
x = ceed.Vector(nx)
x_array = np.zeros(nx, dtype=ceed.scalar_type())
for i in range(nx):
x_array[i] = i / (nx - 1.0)
x.set_array(x_array, cmode=libceed.USE_POINTER)
qdata = ceed.Vector(nelem * q)
u = ceed.Vector(nu)
v = ceed.Vector(nu)
# Restrictions
indx = np.zeros(nx * 2, dtype="int32")
for i in range(nx):
indx[2 * i + 0] = i
indx[2 * i + 1] = i + 1
rx = ceed.ElemRestriction(nelem, 2, 1, 1, nx, indx,
cmode=libceed.USE_POINTER)
indu = np.zeros(nelem * p, dtype="int32")
for i in range(nelem):
for j in range(p):
indu[p * i + j] = i * (p - 1) + j
ru = ceed.ElemRestriction(nelem, p, 1, 1, nu, indu,
cmode=libceed.USE_POINTER)
strides = np.array([1, q, q], dtype="int32")
rui = ceed.StridedElemRestriction(nelem, q, 1, q * nelem, strides)
# Bases
bx = ceed.BasisTensorH1Lagrange(1, 1, 2, q, libceed.GAUSS)
bu = ceed.BasisTensorH1Lagrange(1, 1, p, q, libceed.GAUSS)
# QFunctions
file_dir = os.path.dirname(os.path.abspath(__file__))
qfs = load_qfs_so()
qf_setup = ceed.QFunction(1, qfs.setup_mass,
os.path.join(file_dir, "test-qfunctions.h:setup_mass"))
qf_setup.add_input("weights", 1, libceed.EVAL_WEIGHT)
qf_setup.add_input("dx", 1, libceed.EVAL_GRAD)
qf_setup.add_output("rho", 1, libceed.EVAL_NONE)
qf_mass = ceed.QFunction(1, qfs.apply_mass,
os.path.join(file_dir, "test-qfunctions.h:apply_mass"))
qf_mass.add_input("rho", 1, libceed.EVAL_NONE)
qf_mass.add_input("u", 1, libceed.EVAL_INTERP)
qf_mass.add_output("v", 1, libceed.EVAL_INTERP)
# Operators
op_setup = ceed.Operator(qf_setup)
op_setup.set_field("weights", libceed.ELEMRESTRICTION_NONE, bx,
libceed.VECTOR_NONE)
op_setup.set_field("dx", rx, bx, libceed.VECTOR_ACTIVE)
op_setup.set_field("rho", rui, libceed.BASIS_COLLOCATED,
libceed.VECTOR_ACTIVE)
op_mass = ceed.Operator(qf_mass)
op_mass.set_field("rho", rui, libceed.BASIS_COLLOCATED, qdata)
op_mass.set_field("u", ru, bu, libceed.VECTOR_ACTIVE)
op_mass.set_field("v", ru, bu, libceed.VECTOR_ACTIVE)
# Setup
op_setup.apply(x, qdata)
# Apply mass matrix
u.set_value(1.)
op_mass.apply(u, v)
# Check
with v.array_read() as v_array:
total = 0.0
for i in range(nu):
total = total + v_array[i]
assert abs(total - 1.0) < TOL
# -------------------------------------------------------------------------------
# Test creation, action, and destruction for mass matrix operator with multiple
# components
# -------------------------------------------------------------------------------
def test_502(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
nelem = 15
p = 5
q = 8
nx = nelem + 1
nu = nelem * (p - 1) + 1
# Vectors
x = ceed.Vector(nx)
x_array = np.zeros(nx, dtype=ceed.scalar_type())
for i in range(nx):
x_array[i] = i / (nx - 1.0)
x.set_array(x_array, cmode=libceed.USE_POINTER)
qdata = ceed.Vector(nelem * q)
u = ceed.Vector(2 * nu)
v = ceed.Vector(2 * nu)
# Restrictions
indx = np.zeros(nx * 2, dtype="int32")
for i in range(nx):
indx[2 * i + 0] = i
indx[2 * i + 1] = i + 1
rx = ceed.ElemRestriction(nelem, 2, 1, 1, nx, indx,
cmode=libceed.USE_POINTER)
indu =
|
np.zeros(nelem * p, dtype="int32")
|
numpy.zeros
|
import scipy
from glob import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
class DataLoader():
def __init__(self, dataset_name, img_res=(48, 48,1),path_csv=None,use_test_in_batch=False,normalize=True):
self.dataset_name = dataset_name
self.img_res = img_res
self.img_vect_train = None
self.img_vect_test = None
self.lab_vect_train = None
self.lab_vect_test = None
self.path_csv = path_csv
## dict
self.lab_dict = {0: "Angry", 1: "Disgust" , 2: "Fear" , 3: "Happy" , 4: "Sad" , 5: "Surprise" , 6: "Neutral"}
self.use_test_in_batch = use_test_in_batch
self.normalize = normalize
## load dataset
self._load_internally()
def _load_internally(self):
print(">> loading "+str(self.dataset_name)+" ...")
if self.dataset_name == 'fer2013':
if self.path_csv is None:
raw_data = pd.read_csv('./datasets/fer2013.csv')
else:
raw_data = pd.read_csv(self.path_csv)
else:
raise Exception("dataset not supported:"+str(self.dataset_name))
n_train = np.sum(raw_data['Usage'] == 'Training')
n_test = np.sum(raw_data['Usage'] != 'Training')
assert n_train + n_test == len(raw_data)
self.img_vect_train = np.zeros( (n_train,self.img_res[0],
self.img_res[1],self.img_res[2]) , 'float32')
self.img_vect_test = np.zeros( (n_test,self.img_res[0],
self.img_res[1],self.img_res[2]) , 'float32')
self.lab_vect_train = np.zeros( n_train , 'int32' )
self.lab_vect_test = np.zeros( n_test , 'int32' )
i_train , i_test = 0,0
for i in range(len(raw_data)):
img = raw_data["pixels"][i]
x_pixels = np.array(img.split(" "), 'float32')
if self.normalize:
x_pixels = x_pixels/127.5 - 1.
x_pixels = x_pixels.reshape(self.img_res)
us = raw_data["Usage"][i]
if us == 'Training':
self.img_vect_train[i_train] = x_pixels
self.lab_vect_train[i_train] = int(raw_data["emotion"][i])
i_train = i_train + 1
else:
self.img_vect_test[i_test] = x_pixels
self.lab_vect_test[i_test] = int(raw_data["emotion"][i])
i_test = i_test + 1
assert i_train == len(self.img_vect_train)
assert i_train == len(self.lab_vect_train)
assert i_test == len(self.lab_vect_test)
assert i_test == len(self.img_vect_test)
print("> loaded train:",len(self.img_vect_train)," - test:",len(self.lab_vect_test) )
self.img_vect_test_RGB = np.zeros((self.img_vect_test.shape[0],self.img_res[0],self.img_res[1],3))
for i in range(self.img_vect_test_RGB.shape[0]):
self.img_vect_test_RGB[i] = cv2.cvtColor(self.img_vect_test[i], cv2.COLOR_GRAY2RGB)
self.img_vect_train_RGB = np.zeros((self.img_vect_train.shape[0],self.img_res[0],self.img_res[1],3))
for i in range(self.img_vect_train_RGB.shape[0]):
self.img_vect_train_RGB[i] = cv2.cvtColor(self.img_vect_train[i], cv2.COLOR_GRAY2RGB)
##
leo = cv2.imread('./images/leo_gray__crop_48_48.jpg', cv2.IMREAD_GRAYSCALE )
self.leo = leo.reshape((1,self.img_res[0],self.img_res[1],self.img_res[2]))
self.leo_lab = 6 * np.ones( 1 , 'int32' ) # neutral
if self.use_test_in_batch:
self.lab_vect_train = np.concatenate([self.lab_vect_train,self.lab_vect_test,self.leo_lab])
self.img_vect_train = np.concatenate([self.img_vect_train,self.img_vect_test,self.leo])
def load_leo(self):
return self.leo_lab , self.leo
def load_data(self, domain=None, batch_size=1, is_testing=False,convertRGB=False):
if is_testing:
if domain is None:
idx = np.random.choice(self.img_vect_test.shape[0],size=batch_size)
else:
assert domain in [0,1,2,3,4,5,6]
idx0 = np.argwhere(self.lab_vect_test == domain)
idx1 = np.random.choice(idx0.shape[0],size=batch_size)
idx = idx0[idx1]
idx = np.squeeze(idx)
batch_images = self.img_vect_test[idx]
labels = self.lab_vect_test[idx]
else:
if domain is None:
idx = np.random.choice(self.lab_vect_train.shape[0],size=batch_size)
else:
assert domain in [0,1,2,3,4,5,6]
idx0 = np.argwhere(self.lab_vect_train == domain)
idx1 = np.random.choice(idx0.shape[0],size=batch_size)
idx = idx0[idx1]
idx = np.squeeze(idx)
batch_images = self.img_vect_train[idx]
labels = self.lab_vect_train[idx]
batch_images = np.resize(batch_images,(batch_size,self.img_res[0],self.img_res[1],self.img_res[2]))
if convertRGB:
_batch_images = np.zeros((batch_size,self.img_res[0],self.img_res[1],3))
for i in range(batch_size):
_batch_images[i] = cv2.cvtColor(batch_images[i], cv2.COLOR_GRAY2RGB)
batch_images = _batch_images
if is_testing:
return labels , batch_images
for i in range(batch_size):
if np.random.random() > 0.5:
batch_images[i] =
|
np.fliplr(batch_images[i])
|
numpy.fliplr
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.