repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
3d_sir
3d_sir-master/sir3d/synth/multiprocessing.py
import numpy as np try: from mpi4py import MPI _mpi_available = True except: _mpi_available = False from enum import IntEnum import h5py from tqdm import tqdm, trange import logging from . import slant class tags(IntEnum): READY = 0 DONE = 1 EXIT = 2 START = 3 class Iterator(object): def __init__(self, use_mpi=False, batch=1, n_batches=None, workers_slant=None, withstokes=True): # Initializations and preliminaries self.use_mpi = use_mpi if (self.use_mpi): if (not _mpi_available): raise Exception("You need MPI and mpi4py installed in your system to use this option.") self.comm = MPI.COMM_WORLD # get MPI communicator object self.size = self.comm.size # total number of processes self.rank = self.comm.rank # rank of this process self.status = MPI.Status() # get MPI status object if (workers_slant is None): self.workers_slant = self.size else: if (workers_slant > self.size): self.workers_slant = self.size else: self.workers_slant = workers_slant if (self.size == 1): print("You have activated mpi but you do not have agents available or you need to start the code with mpiexec.") # The code can still run in single-core with a message self.use_mpi = False else: self.rank = 0 self.logger = logging.getLogger("Iterator") self.logger.setLevel(logging.DEBUG) self.logger.handlers = [] ch = logging.StreamHandler() formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) self.logger.addHandler(ch) self.batch = batch self.stop_after_n_batches = n_batches self.withstokes = withstokes def get_rank(self, n_agents=0): if (self.use_mpi): if (n_agents >= self.size): raise Exception("Number of requested agents {0} is >= number number of available cores ({1})".format(n_agents, size)) return self.rank def use_model(self, model=None): # Then broadcast if (self.use_mpi): if (self.rank == 0): self.model = model self.logger.info('Broadcasting models to all agents') self.comm.Barrier() self.comm.bcast(self.model, root=0) self.comm.Barrier() else: model = None self.comm.Barrier() self.model = self.comm.bcast(model, root=0) self.comm.Barrier() # self.model.init_sir_agents() self.model.init_sir(self.model.spectral_regions_dict) else: self.model = model if (self.rank == 0): self.logger.info('All agents ready') def nonmpi_work(self, rangex, rangey): """ Do the synthesis/inversion for all pixels in the models Parameters ---------- model : model Model to be synthesized Returns ------- None """ if (rangex is not None): x = np.arange(rangex[0], rangex[1]) else: x = np.arange(self.model.nx) if (rangey is not None): y = np.arange(rangey[0], rangey[1]) else: y = np.arange(self.model.nz) if (self.model.atmosphere_type == 'MURAM'): self.T = np.memmap(self.model.T_file, dtype='float32', mode='r', shape=self.model.model_shape) self.P = np.memmap(self.model.P_file, dtype='float32', mode='r', shape=self.model.model_shape) self.rho = np.memmap(self.model.rho_file, dtype='float32', mode='r', shape=self.model.model_shape) self.vz = np.memmap(self.model.vz_file, dtype='float32', mode='r', shape=self.model.model_shape) self.Bx = np.memmap(self.model.Bx_file, dtype='float32', mode='r', shape=self.model.model_shape) self.By = np.memmap(self.model.By_file, dtype='float32', mode='r', shape=self.model.model_shape) self.Bz = np.memmap(self.model.Bz_file, dtype='float32', mode='r', shape=self.model.model_shape) self.n_pixels = self.model.nx * self.model.nz # Showing to the user that no stokes will by synthetized if not (self.withstokes): self.logger.info("Avoiding the synthesis module and only saving the model.") # Write stokes file only if withstokes if (self.withstokes): self.f_stokes_out = h5py.File(self.model.output_file, 'w') self.stokes_db = self.f_stokes_out.create_dataset('stokes', (len(x), len(y), 4, self.model.n_lambda_sir)) self.lambda_db = self.f_stokes_out.create_dataset('lambda', (self.model.n_lambda_sir,)) # If we want to extract a model sampled at selected taus interpolate_model = False if (self.model.interpolated_model_filename is not None): self.f_model_out = h5py.File(self.model.interpolated_model_filename, 'w') self.model_db = self.f_model_out.create_dataset('model', (len(x), len(y), 7, self.model.n_tau)) interpolate_model = True # To save the result in a model with the same size of the range for cx, ix in enumerate(tqdm(x, desc='x')): for cz, iz in enumerate(tqdm(y, desc='y')): if (self.model.vz_type == 'vz'): vz = self.vz[ix,:,iz] else: vz = self.vz[ix,:,iz] / self.rho[ix,:,iz] # Generic implementation: always get two outputs (stokes=None when no synthetsis) stokes, model = self.model.synth(self.model.deltaz, self.T[ix,:,iz].astype('float64'), self.P[ix,:,iz].astype('float64'), self.rho[ix,:,iz].astype('float64'), vz.astype('float64'), self.Bx[ix,:,iz].astype('float64'), self.By[ix,:,iz].astype('float64'), self.Bz[ix,:,iz].astype('float64'), interpolate_model=interpolate_model,withstokes=self.withstokes) if (self.withstokes): self.stokes_db[cx,cz,:,:] = stokes[1:,:] self.model_db[cx,cz,:,:] = model if (self.withstokes): self.lambda_db[:] = stokes[0,:] self.f_stokes_out.close() self.f_model_out.close() # To fix print problem of tqdm print() def mpi_master_work_synth(self, rangex, rangey): """ MPI master work Parameters ---------- None Returns ------- None """ if (self.model.atmosphere_type == 'MURAM'): self.T = np.memmap(self.model.T_file, dtype='float32', mode='r', shape=self.model.model_shape) self.P = np.memmap(self.model.P_file, dtype='float32', mode='r', shape=self.model.model_shape) self.rho = np.memmap(self.model.rho_file, dtype='float32', mode='r', shape=self.model.model_shape) self.vz = np.memmap(self.model.vz_file, dtype='float32', mode='r', shape=self.model.model_shape) self.Bx = np.memmap(self.model.Bx_file, dtype='float32', mode='r', shape=self.model.model_shape) self.By = np.memmap(self.model.By_file, dtype='float32', mode='r', shape=self.model.model_shape) self.Bz = np.memmap(self.model.Bz_file, dtype='float32', mode='r', shape=self.model.model_shape) if (self.model.need_slant): self.vx = np.memmap(self.model.vx_file, dtype='float32', mode='r', shape=self.model.model_shape) self.vy = np.memmap(self.model.vy_file, dtype='float32', mode='r', shape=self.model.model_shape) if (rangex is not None): x = np.arange(rangex[0], rangex[1]) else: x = np.arange(self.model.nx) if (rangey is not None): y = np.arange(rangey[0], rangey[1]) else: y = np.arange(self.model.nz) self.n_pixels = len(x) * len(y) self.n_batches = self.n_pixels // self.batch X, Y = np.meshgrid(x, y) X = X.flatten() Y = Y.flatten() divX = np.array_split(X, self.n_batches) divY = np.array_split(Y, self.n_batches) # Showing to the user that no stokes will by synthetized if not (self.withstokes): self.logger.info("Avoiding the synthesis module and only saving the model") # Write stokes file only if withstokes if (self.withstokes): self.f_stokes_out = h5py.File(self.model.output_file, 'w') self.stokes_db = self.f_stokes_out.create_dataset('stokes', (self.model.nx, self.model.nz, 4, self.model.n_lambda_sir), dtype='float32') self.lambda_db = self.f_stokes_out.create_dataset('lambda', (self.model.n_lambda_sir,), dtype='float32') # If we want to extract a model sampled at selected taus interpolate_model = False if (self.model.interpolated_model_filename is not None): self.f_model_out = h5py.File(self.model.interpolated_model_filename, 'w') self.model_db = self.f_model_out.create_dataset('model', (self.model.nx, self.model.nz,7, self.model.n_tau), dtype='float32') interpolate_model = True ############################################## # Slant models if needed ############################################## if (self.model.need_slant): self.thetaX = np.arccos(self.model.mux) self.thetaY = np.arccos(self.model.muy) # Shift in both directions at each height in pixel units self.shiftx = self.model.deltaz * np.tan(self.thetaX) / self.model.deltaxy self.shifty = self.model.deltaz * np.tan(self.thetaY) / self.model.deltaxy task_index = 0 num_workers = self.workers_slant - 1 #self.size - 1 closed_workers = 0 self.last_received = 0 self.last_sent = 0 self.logger.info("Starting slanting of models with {0} workers and {1} heights".format(num_workers, self.model.ny)) self.T_new = np.empty(self.T.shape, dtype=self.T.dtype) self.P_new = np.empty(self.P.shape, dtype=self.P.dtype) self.rho_new = np.empty(self.rho.shape, dtype=self.rho.dtype) self.vx_new = np.empty(self.vx.shape, dtype=self.vx.dtype) self.vy_new = np.empty(self.vy.shape, dtype=self.vy.dtype) self.vz_new = np.empty(self.vz.shape, dtype=self.vz.dtype) self.Bx_new = np.empty(self.Bx.shape, dtype=self.Bx.dtype) self.By_new = np.empty(self.By.shape, dtype=self.By.dtype) self.Bz_new = np.empty(self.Bz.shape, dtype=self.Bz.dtype) with tqdm(total=self.model.ny, ncols=140) as pbar: while (closed_workers < num_workers): data_received = self.comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=self.status) source = self.status.Get_source() tag = self.status.Get_tag() if tag == tags.READY: # Worker is ready, send a task if (task_index < self.model.ny): data_to_send = {'index': task_index, 'shX': self.shiftx[task_index], 'shY': self.shifty[task_index], 'mux': self.model.mux, 'muy': self.model.muy} data_to_send['model'] = [ self.T[:,task_index,:], self.P[:,task_index,:], self.rho[:,task_index,:], self.vx[:,task_index,:], self.vy[:,task_index,:], self.vz[:,task_index,:], self.Bx[:,task_index,:], self.By[:,task_index,:], self.Bz[:,task_index,:]] self.comm.send(data_to_send, dest=source, tag=tags.START) task_index += 1 pbar.update(1) self.last_sent = '{0} to {1}'.format(task_index, source) pbar.set_postfix(sent=self.last_sent, received=self.last_received) else: self.comm.send(None, dest=source, tag=tags.EXIT) elif tag == tags.DONE: index = data_received['index'] model = data_received['model'] self.T_new[:, index, :] = model[0] self.P_new[:, index, :] = model[1] self.rho_new[:, index, :] = model[2] self.vx_new[:, index, :] = model[3] self.vy_new[:, index, :] = model[4] self.vz_new[:, index, :] = model[5] self.Bx_new[:, index, :] = model[6] self.By_new[:, index, :] = model[7] self.Bz_new[:, index, :] = model[8] self.last_received = '{0} from {1}'.format(index, source) pbar.set_postfix(sent=self.last_sent, received=self.last_received) elif tag == tags.EXIT: closed_workers += 1 del self.T del self.P del self.rho del self.vx del self.vy del self.vz del self.Bx del self.By del self.Bz self.deltaz = self.model.deltaz_new self.T = self.T_new self.P = self.P_new self.rho = self.rho_new self.vz = self.vz_new self.Bx = self.Bx_new self.By = self.By_new self.Bz = self.Bz_new self.comm.Barrier() ######################################### # Loop over all pixels doing the synthesis/inversion and saving the results ######################################### task_index = 0 num_workers = self.size - 1 closed_workers = 0 self.last_received = 0 self.last_sent = 0 self.logger.info("Starting calculations with {0} workers and {1} batches".format(num_workers, self.n_batches)) with tqdm(total=self.n_batches, ncols=140) as pbar: while (closed_workers < num_workers): data_received = self.comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=self.status) source = self.status.Get_source() tag = self.status.Get_tag() if tag == tags.READY: # Worker is ready, send a task if (task_index < self.n_batches): ix = divX[task_index] iz = divY[task_index] data_to_send = {'index': task_index, 'indX': ix, 'indY': iz, 'interpolate': interpolate_model} if (self.model.vz_type == 'vz'): vz = self.vz[ix,:,iz] else: vz = self.vz[ix,:,iz] / self.rho[ix,:,iz] data_to_send['model'] = [self.model.deltaz, self.T[ix,:,iz].astype('float64'), self.P[ix,:,iz].astype('float64'), self.rho[ix,:,iz].astype('float64'), vz.astype('float64'), self.Bx[ix,:,iz].astype('float64'), self.By[ix,:,iz].astype('float64'), self.Bz[ix,:,iz].astype('float64')] self.comm.send(data_to_send, dest=source, tag=tags.START) task_index += 1 pbar.update(1) self.last_sent = '{0} to {1}'.format(task_index, source) pbar.set_postfix(sent=self.last_sent, received=self.last_received) else: self.comm.send(None, dest=source, tag=tags.EXIT) elif tag == tags.DONE: index = data_received['index'] indX = data_received['indX'] indY = data_received['indY'] if (interpolate_model): model = data_received['model'] for i in range(len(indX)): self.model_db[indX[i],indY[i],:,:] = model[i,:,:] if (self.withstokes): stokes = data_received['stokes'] for i in range(len(indX)): self.stokes_db[indX[i],indY[i],:,:] = stokes[i,1:,:] self.last_received = '{0} from {1}'.format(index, source) pbar.set_postfix(sent=self.last_sent, received=self.last_received) elif tag == tags.EXIT: closed_workers += 1 if (self.withstokes): self.lambda_db[:] = stokes[0,0,:] self.f_stokes_out.close() self.f_model_out.close() def mpi_agents_work_synth(self): """ MPI agents work Parameters ---------- None Returns ------- None """ ############################### # Slant models if needed ############################### if (self.model.need_slant): if (self.rank <= self.workers_slant): while True: self.comm.send(None, dest=0, tag=tags.READY) data_received = self.comm.recv(source=0, tag=MPI.ANY_TAG, status=self.status) tag = self.status.Get_tag() if tag == tags.START: task_index = data_received['index'] shX = data_received['shX'] shY = data_received['shY'] xmu = data_received['mux'] ymu = data_received['muy'] ysign = np.sign(xmu) xsign = np.sign(ymu) ymu2 = np.sqrt(1.0 - ymu**2) xmu2 = np.sqrt(1.0 - xmu**2) data_to_send = {'index': task_index} T, P, rho, vx, vy, vz, Bx, By, Bz = data_received['model'] T = slant.fftshift_image(T, dx=shX, dy=shY, useLog=True) P = slant.fftshift_image(P, dx=shX, dy=shY, useLog=True) rho = slant.fftshift_image(rho, dx=shX, dy=shY, useLog=True) vx = slant.fftshift_image(vx, dx=shX, dy=shY, useLog=False) vy = slant.fftshift_image(vy, dx=shX, dy=shY, useLog=False) vz = slant.fftshift_image(vz, dx=shX, dy=shY, useLog=False) Bx = slant.fftshift_image(Bx, dx=shX, dy=shY, useLog=False) By = slant.fftshift_image(By, dx=shX, dy=shY, useLog=False) Bz = slant.fftshift_image(Bz, dx=shX, dy=shY, useLog=False) # Project vz1 = vz * xmu * ymu - ysign * vy * xmu * ymu2 - xsign * vx * xmu2 vy1 = vy * ymu + vz * ymu2 * ysign vx1 = vx * xmu + (vz * ymu - ysign * vy * ymu2) * xmu2 * xsign Bz1 = Bz * xmu * ymu - ysign * By * xmu * ymu2 - xsign * Bx * xmu2 By1 = By * ymu + Bz * ymu2 * ysign Bx1 = Bx * xmu + (Bz * ymu - ysign * By * ymu2) * xmu2 * xsign model = [T, P, rho, vx1, vy1, vz1, Bx1, By1, Bz1] data_to_send['model'] = model self.comm.send(data_to_send, dest=0, tag=tags.DONE) elif tag == tags.EXIT: break self.comm.send(None, dest=0, tag=tags.EXIT) self.comm.Barrier() ############################### # Synthesis ############################### while True: self.comm.send(None, dest=0, tag=tags.READY) data_received = self.comm.recv(source=0, tag=MPI.ANY_TAG, status=self.status) tag = self.status.Get_tag() if tag == tags.START: task_index = data_received['index'] indX = data_received['indX'] indY = data_received['indY'] interpolate_model = data_received['interpolate'] data_to_send = {'index': task_index, 'indX': indX, 'indY': indY} z, T, P, rho, vz, Bx, By, Bz = data_received['model'] # Generic implementation: always get two outputs (stokes=None when no synthetsis) stokes, model = self.model.synth2d(z, T, P, rho, vz, Bx, By, Bz, interpolate_model=interpolate_model,withstokes=self.withstokes) if (interpolate_model): data_to_send['model'] = model if (self.withstokes): data_to_send['stokes'] = stokes self.comm.send(data_to_send, dest=0, tag=tags.DONE) elif tag == tags.EXIT: break self.comm.send(None, dest=0, tag=tags.EXIT) def run_all_pixels(self, rangex=None, rangey=None): """ Run synthesis for all pixels Parameters ---------- None Returns ------- None """ if (self.use_mpi): if (self.rank == 0): self.mpi_master_work_synth(rangex=rangex, rangey=rangey) else: self.mpi_agents_work_synth() else: self.nonmpi_work(rangex=rangex, rangey=rangey)
22,823
41.033149
155
py
3d_sir
3d_sir-master/sir3d/synth/jaime_slant.py
import numpy as np import sys """ Model slant+projection tools """ # ***************************************************************************** def fftshift_image(im_in, dy=0.0, dx=0.0, isPeriodic=True, useLog=False): """ FFTSHIFT_IMAGE, shifts an image by dy, dx pixels using Fourier transforms. Input: im: 2D numpy array with the image (ny,nx) dy: shift along the leftmost axis in pixels dx: shift along the rightmost axis in pixels isPeriodic: if the image is not periodic, it places it into a (2*Ny,2*Nx) container so it can make a periodic version of the image. Much slower. AUTHOR: J. de la Cruz Rodriguez (ISP-SU 2020) """ # # scale image to numbers and amplitudes around 1 # if(useLog): im = np.log(np.ascontiguousarray(im_in, dtype='float64')) else: im = np.ascontiguousarray(im_in, dtype='float64') ny, nx = im.shape me = im.mean() st = np.std(im) im = (im-me)/st # # FFT of the input image, check for periodicity # if(isPeriodic): ny1, nx1 = im.shape ft = np.fft.rfft2(im) else: ny, nx = im.shape ft = np.zeros((2*ny, 2*nx), dtype='float64', order='c') ft[0:ny,0:nx] = im ft[0:ny,nx::] = im[:,::-1] ft[ny::,0:nx] = im[::-1,:] ft[ny::,nx::] = im[::-1,::-1] ny1, nx1 = ft.shape ft = np.fft.rfft2(ft) # # get spatial frequency mesh, the x-axis has only the positive frequencies # because the input data were non-complex numbers, so the negative part is # redundant. # fx, fy = np.meshgrid(np.fft.rfftfreq(nx1), np.fft.fftfreq(ny1)) # # Multiply by exponential phase factor and return to image space # if(useLog): return np.exp((np.real((np.fft.irfft2(ft * np.exp(-2j*np.pi*(fx*-dx + fy*-dy))))[0:ny,0:nx])*st)+me) else: return (np.real((np.fft.irfft2(ft * np.exp(-2j*np.pi*(fx*-dx + fy*-dy))))[0:ny,0:nx])*st)+me # ***************************************************************************** class SlantModel: """ SlantModel class it computes the shift that needs to by applied to each layer in order to simulate an off-center observations with MHD models. Assumes axis ordering (nz, ny, nx) in python ordering. Source: Inspired in M. Carlsson's "mu_atmos3d.pro", but with extra functionality (can slant in both axes) and it keeps z=0 unshifted instead of the upper boundary. Plane shifts are performed by adding a global phase in Fourier space. Coded by J. de la Cruz Rodriguez (ISP-SU 2020) """ # ---------------------------------------------------------------------------------- def __init__(self, z, y, x, mu_x = 1.0, mu_y=1.0): """ This constructor precomputes the shift that needs to be applied to each layer and the new Z-scale. Input: z: 1D array with the z-scale of the model x: 1D array with the x-scale of the model y: 1D array with the y-scale of the model mu_x: heliocentric distance in the x axis (mu_x = cos(theta_x)) mu_y: heliocentric distance in the y axis (mu_y = cos(theta_y)) """ # # If z=0 is in the photosphere, we can keep z=0 unshifted # self.idx = np.argmin(np.abs(z)) # # Precompute shift for each plane of the model # zx2 = (z - z[self.idx]) / np.abs(mu_x) + z[self.idx] self.shift_x = -np.sign(mu_x)*(zx2 - zx2[self.idx]) * np.sqrt(1.0 - mu_x**2) / x.max() * (x.size - 1.0) zy2 = (z - z[self.idx]) / np.abs(mu_y) + z[self.idx] self.shift_y = -np.sign(mu_y)*(zy2 - zy2[self.idx]) * np.sqrt(1.0 - mu_y**2) / y.max() * (y.size - 1.0) # # stretch z-axis and store it # xangle = np.arccos(mu_x) yangle = np.arccos(mu_y) tmu = np.cos(np.sqrt(xangle**2 + yangle**2)) print("SlantModel::__init__: mu={0}".format(tmu)) self.z_new = (z - z[self.idx]) / abs(tmu) + z[self.idx] self.mu_x = mu_x self.mu_y = mu_y # ---------------------------------------------------------------------------------- def slantVariable(self, var, useLog=False): """ Slants a variable of the model. This routine should be applied to all variables. The shifting is performed by applying a phase shift in Fourier domain. Input: var: 3D cubes with dimensions (nz, ny, nx) """ var1 = np.empty(var.shape, dtype=var.dtype) nz, ny, nx = var.shape if(nz != self.z_new.size): print("slantVariable: ERROR, the object was initialized with nz={0}, but the provided cube has nz={1}".format(self.z_new.size, nz)) per = 0; oper = -1; scl = 100.0 / (nz-1.0) for kk in range(nz): if((np.abs(self.shift_y[kk])<1.e-3) and (np.abs(self.shift_x[kk])<1.e-3)): var1[kk] = var[kk] else: var1[kk] = fftshift_image(var[kk], dy=self.shift_y[kk], dx=self.shift_x[kk], isPeriodic = True, useLog=useLog) per = int(kk*scl) if(per != oper): oper = per sys.stdout.write("\rSlantModel::slantVariable: {0}{1}".format(per, "%")) sys.stdout.flush() sys.stdout.write("\rSlantModel::SlantVariable: {0}{1}\n".format(100, "%")) return var1 # ---------------------------------------------------------------------------------- def get_new_z(self): """ Returns the new z-scale of the slanted model """ return self.z_new*1 # ---------------------------------------------------------------------------------- def project_field(self, vy, vx, vz): """ Projects vector variables into the new LOS. This routines should be applied to velocities and magnetic field after performing the slant. The projection is applied in-place, so it does not return anything, but it overwrites the input arrays. """ ysign = np.sign(self.mu_y) xsign = np.sign(self.mu_x) xmu = self.mu_x ymu = self.mu_y ymu2 = np.sqrt(1.0 - ymu**2) xmu2 = np.sqrt(1.0 - xmu**2) vz1 = vz * xmu * ymu - ysign * vy * xmu * ymu2 - xsign * vx * xmu2 vy1 = vy * ymu + vz * ymu2 * ysign vx1 = vx * xmu + (vz * ymu - ysign * vy * ymu2) * xmu2 * xsign vz[:] = vz1 vx[:] = vx1 vy[:] = vy1 # ---------------------------------------------------------------------------------- # *****************************************************************************
7,068
30.558036
143
py
3d_sir
3d_sir-master/sir3d/psf/degrade_backup.py
import numpy as np try: from mpi4py import MPI _mpi_available = True except: _mpi_available = False from enum import IntEnum import h5py from tqdm import tqdm, trange import logging import scipy.interpolate import skimage.transform import pyfftw.interfaces as fft # from ipdb import set_trace as stop class tags(IntEnum): READY = 0 DONE = 1 EXIT = 2 START = 3 class PSF(object): def __init__(self, input_stokes, input_model, output_spatial_stokes, output_spatial_model, output_spatial_spectral_stokes, spatial_psf=None, spectral_psf=None, final_wavelength_axis=None, zoom_factor=None, use_mpi=True, batch=256): # Initializations and preliminaries self.use_mpi = use_mpi if (self.use_mpi): if (not _mpi_available): raise Exception("You need MPI and mpi4py installed in your system to use this option.") self.comm = MPI.COMM_WORLD # get MPI communicator object self.size = self.comm.size # total number of processes self.rank = self.comm.rank # rank of this process self.status = MPI.Status() # get MPI status object if (self.size == 1): raise Exception("You do not have agents available or you need to start the code with mpiexec.") else: self.rank = 0 self.logger = logging.getLogger("iterator") self.logger.setLevel(logging.DEBUG) self.logger.handlers = [] ch = logging.StreamHandler() formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) self.logger.addHandler(ch) self.batch = batch self.lambda_final = final_wavelength_axis self.n_lambda_new = len(self.lambda_final) self.zoom_factor = zoom_factor self.f_output_spatial_model = output_spatial_model self.f_output_spatial_stokes = output_spatial_stokes self.f_output_full_stokes = output_spatial_spectral_stokes if (self.rank == 0): print("Reading cube in memory", flush=True) f = h5py.File(input_stokes, 'r') self.input_stokes = f['stokes'][:] self.input_lambda = f['lambda'][:] f.close() # Remove pixels that were not OK print("Removing bad pixels") indx, indy = np.where(self.input_stokes[:,:,0,0] == -99.0) n = len(indx) for i in range(n): whichx = [indx[i]-1, indx[i], indx[i], indx[i]+1] whichy = [indy[i], indy[i]+1, indy[i]-1, indy[i]] tmp = self.input_stokes[indx[i],indy[i],:,:] * 0.0 for j in range(4): tmp += self.input_stokes[whichx[j],whichy[j],:,:] self.input_stokes[indx[i],indy[i],:,:] = tmp / 4.0 f = h5py.File(input_model, 'r') self.input_model = f['model'][:] f.close() self.nx, self.ny, self.n_stokes, self.n_lambda = self.input_stokes.shape self.nx, self.ny, self.n_var, self.n_tau = self.input_model.shape self.nx_new = np.round(self.zoom_factor * self.nx).astype('int') self.ny_new = np.round(self.zoom_factor * self.ny).astype('int') self.spatial_psf = spatial_psf self.spectral_psf = spectral_psf self.zoom_factor = zoom_factor if (not self.zoom_factor): self.zoom_factor = 1.0 # Spatial PSF if (self.spatial_psf is not None): print("Reading spatial PSF", flush=True) psf_size = self.spatial_psf.shape[0] psf = np.zeros((self.nx, self.ny)) psf[int(self.nx/2.-psf_size/2.+1):int(self.nx/2.+psf_size/2.+1),int(self.ny/2.-psf_size/2.+1):int(self.ny/2.+psf_size/2.+1)] = self.spatial_psf psf = np.fft.fftshift(psf) self.psf_spatial_fft = fft.numpy_fft.fft2(psf) # Spectral PSF if (self.spectral_psf is not None): print("Reading spectral PSF", flush=True) interpolator = scipy.interpolate.interp1d(spectral_psf[:,0], spectral_psf[:,1], bounds_error=False, fill_value=0.0) psf_spectral = interpolator(self.input_lambda - np.mean(self.input_lambda)) psf = np.fft.fftshift(psf_spectral) self.psf_spectral_fft = fft.numpy_fft.fft(psf) ind = np.searchsorted(self.input_lambda, self.lambda_final) self.delta1 = (self.input_lambda[ind+1] - self.lambda_final) / (self.input_lambda[ind+1] - self.input_lambda[ind]) self.delta2 = (self.lambda_final - self.input_lambda[ind]) / (self.input_lambda[ind+1] - self.input_lambda[ind]) self.ind = ind self.broadcast() def get_rank(self, n_agents=0): if (self.use_mpi): if (n_agents >= self.size): raise Exception("Number of requested agents {0} is >= number number of available cores ({1})".format(n_agents, size)) return self.rank def broadcast(self): if (self.rank == 0): print("Broadcasting...", flush=True) self.comm.Barrier() self.comm.bcast(self.psf_spatial_fft, root=0) self.comm.bcast(self.psf_spectral_fft, root=0) self.comm.bcast(self.delta1, root=0) self.comm.bcast(self.delta2, root=0) self.comm.bcast(self.ind, root=0) self.comm.bcast(self.n_lambda_new, root=0) self.comm.Barrier() print("End broadcasting...", flush=True) else: self.comm.Barrier() self.psf_spatial_fft = self.comm.bcast(None, root=0) self.psf_spectral_fft = self.comm.bcast(None, root=0) self.delta1 = self.comm.bcast(None, root=0) self.delta2 = self.comm.bcast(None, root=0) self.ind = self.comm.bcast(None, root=0) self.n_lambda_new = self.comm.bcast(None, root=0) self.comm.Barrier() def mpi_master_work_spatial(self, rangex, rangey): """ MPI master work Parameters ---------- None Returns ------- None """ self.output_spatial_stokes = h5py.File(self.f_output_spatial_stokes, 'w') self.output_spatial_model = h5py.File(self.f_output_spatial_model, 'w') self.stokes_spatial = self.output_spatial_stokes.create_dataset('stokes', (self.nx_new, self.ny_new, self.n_stokes, self.n_lambda)) self.lambda_spatial = self.output_spatial_stokes.create_dataset('lambda', (self.n_lambda,)) self.lambda_spatial[:] = self.input_lambda self.model_spatial = self.output_spatial_model.create_dataset('model', (self.nx_new, self.ny_new, self.n_var, self.n_tau)) tmp = np.zeros((self.nx_new, self.ny_new, self.n_stokes, self.n_lambda)) # The spatial smearing is done by the master because it is fast for i in trange(self.n_stokes, desc='stokes'): for j in trange(self.n_lambda, desc='lambda', leave=False): if (self.spectral_psf is not None): im_fft = fft.numpy_fft.fft2(self.input_stokes[:,:,i,j]) im_conv = np.real(fft.numpy_fft.ifft2(self.psf_spatial_fft * im_fft)) else: im_conv = np.copy(self.input_stokes[:,:,i,j]) if (self.zoom_factor != 1.0): minim, maxim = np.min(im_conv), np.max(im_conv) im_final = skimage.transform.rescale((im_conv - minim)/(maxim-minim), scale=[self.zoom_factor, self.zoom_factor], order=1) im_final = im_final * (maxim - minim) + minim else: im_final = np.copy(im_conv) tmp[:,:,i,j] = im_final self.stokes_spatial[:] = tmp tmp = np.zeros((self.nx_new, self.ny_new, self.n_var, self.n_tau)) for i in trange(self.n_var, desc='variable'): for j in trange(self.n_tau, desc='nz', leave=False): im = self.input_model[:,:,i,j] if (self.zoom_factor != 1.0): # im_final = nd.zoom(im, [zoom_factor, zoom_factor]) minim, maxim = np.min(im), np.max(im) im_final = skimage.transform.rescale((im - minim)/(maxim-minim), scale=[self.zoom_factor, self.zoom_factor], order=1) im_final = im_final * (maxim - minim) + minim else: im_final = np.copy(im) tmp[:,:,i,j] = im_final self.model_spatial[:] = tmp print("Saving files...", flush=True) self.output_spatial_stokes.close() self.output_spatial_model.close() del tmp def mpi_master_work_spectral(self, rangex, rangey): """ MPI master work Parameters ---------- None Returns ------- None """ print("Reading degraded cube in memory", flush=True) f = h5py.File(self.f_output_spatial_stokes, 'r') self.stokes_spatial = f['stokes'][:] f.close() self.output_full_stokes = h5py.File(self.f_output_full_stokes, 'w') self.stokes_full = self.output_full_stokes.create_dataset('stokes', (self.nx_new, self.ny_new, self.n_stokes, self.n_lambda_new)) self.lambda_full = self.output_full_stokes.create_dataset('lambda', (self.n_lambda_new,)) x = np.arange(self.nx_new) y = np.arange(self.ny_new) self.n_pixels = len(x) * len(y) self.n_batches = self.n_pixels // self.batch X, Y = np.meshgrid(x, y) X = X.flatten() Y = Y.flatten() divX = np.array_split(X, self.n_batches) divY = np.array_split(Y, self.n_batches) task_index = 0 num_workers = self.size - 1 closed_workers = 0 self.last_received = 0 self.last_sent = 0 tmp = np.zeros((self.nx_new, self.ny_new, self.n_stokes, self.n_lambda_new)) with tqdm(total=self.n_batches, ncols=140) as pbar: while (closed_workers < num_workers): data_received = self.comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=self.status) source = self.status.Get_source() tag = self.status.Get_tag() if tag == tags.READY: # Worker is ready, send a task if (task_index < self.n_batches): ix = divX[task_index] iy = divY[task_index] data_to_send = {'index': task_index, 'indX': ix, 'indY': iy} data_to_send['stokes'] = self.stokes_spatial[ix,iy,:,:] self.comm.send(data_to_send, dest=source, tag=tags.START) task_index += 1 pbar.update(1) self.last_sent = '{0}->{1}'.format(task_index, source) pbar.set_postfix(sent=self.last_sent, received=self.last_received) else: self.comm.send(None, dest=source, tag=tags.EXIT) elif tag == tags.DONE: index = data_received['index'] stokes = data_received['stokes'] indX = data_received['indX'] indY = data_received['indY'] for i in range(len(indX)): tmp[indX[i],indY[i],:,:] = stokes[i,:,:] self.last_received = '{0}->{1}'.format(index, source) pbar.set_postfix(sent=self.last_sent, received=self.last_received) elif tag == tags.EXIT: closed_workers += 1 self.stokes_full[:] = tmp self.lambda_full[:] = self.lambda_final self.output_full_stokes.close() def mpi_agents_work_spectral(self): """ MPI agents work Parameters ---------- None Returns ------- None """ while True: self.comm.send(None, dest=0, tag=tags.READY) data_received = self.comm.recv(source=0, tag=MPI.ANY_TAG, status=self.status) tag = self.status.Get_tag() if tag == tags.START: task_index = data_received['index'] indX = data_received['indX'] indY = data_received['indY'] stokes = data_received['stokes'] data_to_send = {'index': task_index, 'indX': indX, 'indY': indY} n = len(indX) stokes_out = np.zeros((n,4,self.n_lambda_new)) for i in range(4): # Compute the convolution using FFT along the wavelength axis f_im = fft.numpy_fft.fft(stokes[:,i,:], axis=1) tmp = np.real(fft.numpy_fft.ifft(f_im * self.psf_spectral_fft[None,:], axis=1)) # Finally carry out the linear interpolation along the wavelength axis to rebin to the Hinode wavelength axis stokes_out[:,i,:] = tmp[:,self.ind] * self.delta1[None,:] + tmp[:,self.ind+1] * self.delta2[None,:] data_to_send['stokes'] = stokes_out self.comm.send(data_to_send, dest=0, tag=tags.DONE) elif tag == tags.EXIT: break self.comm.send(None, dest=0, tag=tags.EXIT) def run_all_pixels_spatial(self, rangex=None, rangey=None): """ Run synthesis/inversion for all pixels Parameters ---------- None Returns ------- None """ if (self.use_mpi): if (self.rank == 0): self.mpi_master_work_spatial(rangex=rangex, rangey=rangey) else: pass else: self.nonmpi_work(rangex=rangex, rangey=rangey) def run_all_pixels_spectral(self, rangex=None, rangey=None): """ Run synthesis/inversion for all pixels Parameters ---------- None Returns ------- None """ if (self.use_mpi): if (self.rank == 0): self.mpi_master_work_spectral(rangex=rangex, rangey=rangey) else: self.mpi_agents_work_spectral() else: self.nonmpi_work(rangex=rangex, rangey=rangey)
15,334
36.585784
159
py
3d_sir
3d_sir-master/sir3d/psf/__init__.py
from .degrade import *
23
11
22
py
3d_sir
3d_sir-master/sir3d/psf/degrade.py
import numpy as np try: from mpi4py import MPI _mpi_available = True except: _mpi_available = False from enum import IntEnum import h5py from tqdm import tqdm, trange import logging import scipy.interpolate import skimage.transform import pyfftw.interfaces as fft # from ipdb import set_trace as stop class tags(IntEnum): READY = 0 DONE = 1 EXIT = 2 START = 3 class PSF(object): def __init__(self, use_mpi=True, batch=256): # Initializations and preliminaries self.use_mpi = use_mpi if (self.use_mpi): if (not _mpi_available): raise Exception("You need MPI and mpi4py installed in your system to use this option.") self.comm = MPI.COMM_WORLD # get MPI communicator object self.size = self.comm.size # total number of processes self.rank = self.comm.rank # rank of this process self.status = MPI.Status() # get MPI status object if (self.size == 1): raise Exception("You do not have agents available or you need to start the code with mpiexec.") else: self.rank = 0 self.logger = logging.getLogger("iterator") self.logger.setLevel(logging.DEBUG) self.logger.handlers = [] ch = logging.StreamHandler() formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) self.logger.addHandler(ch) self.batch = batch def get_rank(self, n_agents=0): if (self.use_mpi): if (n_agents >= self.size): raise Exception("Number of requested agents {0} is >= number number of available cores ({1})".format(n_agents, size)) return self.rank def broadcast_spatial(self): if (self.rank == 0): print("Broadcasting...", flush=True) self.comm.Barrier() self.comm.bcast(self.psf_spatial_fft, root=0) self.comm.Barrier() print("End broadcasting...", flush=True) else: self.comm.Barrier() self.psf_spatial_fft = self.comm.bcast(None, root=0) self.comm.Barrier() def broadcast_spectral(self): if (self.rank == 0): print("Broadcasting...", flush=True) self.comm.Barrier() self.comm.bcast(self.psf_spectral_fft, root=0) self.comm.bcast(self.delta1, root=0) self.comm.bcast(self.delta2, root=0) self.comm.bcast(self.ind, root=0) self.comm.bcast(self.n_lambda_new, root=0) self.comm.Barrier() print("End broadcasting...", flush=True) else: self.comm.Barrier() self.psf_spectral_fft = self.comm.bcast(None, root=0) self.delta1 = self.comm.bcast(None, root=0) self.delta2 = self.comm.bcast(None, root=0) self.ind = self.comm.bcast(None, root=0) self.n_lambda_new = self.comm.bcast(None, root=0) self.comm.Barrier() def mpi_master_work_spatial(self, input_stokes, input_model, output_spatial_stokes, output_spatial_model, spatial_psf=None, zoom_factor=None): """ MPI master work Parameters ---------- None Returns ------- None """ self.zoom_factor = zoom_factor print("Reading Stokes cube in memory for spatial smearing", flush=True) f = h5py.File(input_stokes, 'r') self.input_stokes = f['stokes'][:] self.input_lambda = f['lambda'][:] f.close() # Remove pixels that were not OK print("Removing bad pixels") indx, indy = np.where(self.input_stokes[:,:,0,0] == -99.0) n = len(indx) for i in range(n): whichx = [indx[i]-1, indx[i], indx[i], indx[i]+1] whichy = [indy[i], indy[i]+1, indy[i]-1, indy[i]] tmp = self.input_stokes[indx[i],indy[i],:,:] * 0.0 for j in range(4): tmp += self.input_stokes[whichx[j],whichy[j],:,:] self.input_stokes[indx[i],indy[i],:,:] = tmp / 4.0 print("Reading model cube in memory for spatial smearing", flush=True) f = h5py.File(input_model, 'r') self.input_model = f['model'][:] f.close() self.nx, self.ny, self.n_stokes, self.n_lambda = self.input_stokes.shape self.nx, self.ny, self.n_var, self.n_tau = self.input_model.shape self.nx_new = np.round(self.zoom_factor * self.nx).astype('int') self.ny_new = np.round(self.zoom_factor * self.ny).astype('int') self.spatial_psf = spatial_psf self.zoom_factor = zoom_factor if (not self.zoom_factor): self.zoom_factor = 1.0 # Spatial PSF if (self.spatial_psf is not None): print("Reading spatial PSF", flush=True) psf_size = self.spatial_psf.shape[0] psf = np.zeros((self.nx, self.ny)) psf[int(self.nx/2.-psf_size/2.+1):int(self.nx/2.+psf_size/2.+1),int(self.ny/2.-psf_size/2.+1):int(self.ny/2.+psf_size/2.+1)] = self.spatial_psf psf = np.fft.fftshift(psf) self.psf_spatial_fft = fft.numpy_fft.fft2(psf) self.output_spatial_stokes = h5py.File(output_spatial_stokes, 'w') self.output_spatial_model = h5py.File(output_spatial_model, 'w') self.stokes_spatial = self.output_spatial_stokes.create_dataset('stokes', (self.nx_new, self.ny_new, self.n_stokes, self.n_lambda)) self.lambda_spatial = self.output_spatial_stokes.create_dataset('lambda', (self.n_lambda,)) self.lambda_spatial[:] = self.input_lambda self.model_spatial = self.output_spatial_model.create_dataset('model', (self.nx_new, self.ny_new, self.n_var, self.n_tau)) tmp = np.zeros((self.nx_new, self.ny_new, self.n_stokes, self.n_lambda)) # The spatial smearing is done by the master because it is fast for i in trange(self.n_stokes, desc='stokes'): for j in trange(self.n_lambda, desc='lambda', leave=False): if (self.spatial_psf is not None): im_fft = fft.numpy_fft.fft2(self.input_stokes[:,:,i,j]) im_conv = np.real(fft.numpy_fft.ifft2(self.psf_spatial_fft * im_fft)) else: im_conv = np.copy(self.input_stokes[:,:,i,j]) if (self.zoom_factor != 1.0): minim, maxim = np.min(im_conv), np.max(im_conv) im_final = skimage.transform.rescale((im_conv - minim)/(maxim-minim), scale=[self.zoom_factor, self.zoom_factor], order=1) im_final = im_final * (maxim - minim) + minim else: im_final = np.copy(im_conv) tmp[:,:,i,j] = im_final self.stokes_spatial[:] = tmp tmp = np.zeros((self.nx_new, self.ny_new, self.n_var, self.n_tau)) for i in trange(self.n_var, desc='variable'): for j in trange(self.n_tau, desc='nz', leave=False): im = self.input_model[:,:,i,j] if (self.zoom_factor != 1.0): # im_final = nd.zoom(im, [zoom_factor, zoom_factor]) minim, maxim = np.min(im), np.max(im) im_final = skimage.transform.rescale((im - minim)/(maxim-minim), scale=[self.zoom_factor, self.zoom_factor], order=1) im_final = im_final * (maxim - minim) + minim else: im_final = np.copy(im) tmp[:,:,i,j] = im_final self.model_spatial[:] = tmp print("Saving files...", flush=True) self.output_spatial_stokes.close() self.output_spatial_model.close() del tmp def mpi_master_work_spectral(self, input_stokes, output_spatial_spectral_stokes, spectral_psf=None, final_wavelength_axis=None): """ MPI master work Parameters ---------- None Returns ------- None """ self.lambda_final = final_wavelength_axis self.n_lambda_new = len(self.lambda_final) print("Reading Stokes cube in memory for spectral smearing", flush=True) f = h5py.File(input_stokes, 'r') self.stokes_spatial = f['stokes'][:] self.input_lambda = f['lambda'][:] f.close() # Remove pixels that were not OK print("Removing bad pixels") indx, indy = np.where(self.stokes_spatial[:,:,0,0] == -99.0) n = len(indx) for i in range(n): whichx = [indx[i]-1, indx[i], indx[i], indx[i]+1] whichy = [indy[i], indy[i]+1, indy[i]-1, indy[i]] tmp = self.stokes_spatial[indx[i],indy[i],:,:] * 0.0 for j in range(4): tmp += self.stokes_spatial[whichx[j],whichy[j],:,:] self.stokes_spatial[indx[i],indy[i],:,:] = tmp / 4.0 self.nx, self.ny, self.n_stokes, _ = self.stokes_spatial.shape self.spectral_psf = spectral_psf # Spectral PSF if (self.spectral_psf is not None): print("Reading spectral PSF", flush=True) interpolator = scipy.interpolate.interp1d(spectral_psf[:,0], spectral_psf[:,1], bounds_error=False, fill_value=0.0) psf_spectral = interpolator(self.input_lambda - np.mean(self.input_lambda)) psf = np.fft.fftshift(psf_spectral) self.psf_spectral_fft = fft.numpy_fft.fft(psf) ind = np.searchsorted(self.input_lambda, self.lambda_final) self.delta1 = (self.input_lambda[ind+1] - self.lambda_final) / (self.input_lambda[ind+1] - self.input_lambda[ind]) self.delta2 = (self.lambda_final - self.input_lambda[ind]) / (self.input_lambda[ind+1] - self.input_lambda[ind]) self.ind = ind self.broadcast_spectral() self.output_full_stokes = h5py.File(output_spatial_spectral_stokes, 'w') self.stokes_full = self.output_full_stokes.create_dataset('stokes', (self.nx, self.ny, self.n_stokes, self.n_lambda_new)) self.lambda_full = self.output_full_stokes.create_dataset('lambda', (self.n_lambda_new,)) x = np.arange(self.nx) y = np.arange(self.ny) self.n_pixels = len(x) * len(y) self.n_batches = self.n_pixels // self.batch X, Y = np.meshgrid(x, y) X = X.flatten() Y = Y.flatten() divX = np.array_split(X, self.n_batches) divY = np.array_split(Y, self.n_batches) task_index = 0 num_workers = self.size - 1 closed_workers = 0 self.last_received = 0 self.last_sent = 0 tmp = np.zeros((self.nx, self.ny, self.n_stokes, self.n_lambda_new)) with tqdm(total=self.n_batches, ncols=140) as pbar: while (closed_workers < num_workers): data_received = self.comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=self.status) source = self.status.Get_source() tag = self.status.Get_tag() if tag == tags.READY: # Worker is ready, send a task if (task_index < self.n_batches): ix = divX[task_index] iy = divY[task_index] data_to_send = {'index': task_index, 'indX': ix, 'indY': iy} data_to_send['stokes'] = self.stokes_spatial[ix,iy,:,:] self.comm.send(data_to_send, dest=source, tag=tags.START) task_index += 1 pbar.update(1) self.last_sent = '{0}->{1}'.format(task_index, source) pbar.set_postfix(sent=self.last_sent, received=self.last_received) else: self.comm.send(None, dest=source, tag=tags.EXIT) elif tag == tags.DONE: index = data_received['index'] stokes = data_received['stokes'] indX = data_received['indX'] indY = data_received['indY'] for i in range(len(indX)): tmp[indX[i],indY[i],:,:] = stokes[i,:,:] self.last_received = '{0}->{1}'.format(index, source) pbar.set_postfix(sent=self.last_sent, received=self.last_received) elif tag == tags.EXIT: closed_workers += 1 self.stokes_full[:] = tmp self.lambda_full[:] = self.lambda_final self.output_full_stokes.close() def mpi_agents_work_spectral(self): """ MPI agents work Parameters ---------- None Returns ------- None """ self.broadcast_spectral() while True: self.comm.send(None, dest=0, tag=tags.READY) data_received = self.comm.recv(source=0, tag=MPI.ANY_TAG, status=self.status) tag = self.status.Get_tag() if tag == tags.START: task_index = data_received['index'] indX = data_received['indX'] indY = data_received['indY'] stokes = data_received['stokes'] data_to_send = {'index': task_index, 'indX': indX, 'indY': indY} n = len(indX) stokes_out = np.zeros((n,4,self.n_lambda_new)) for i in range(4): # Compute the convolution using FFT along the wavelength axis f_im = fft.numpy_fft.fft(stokes[:,i,:], axis=1) tmp = np.real(fft.numpy_fft.ifft(f_im * self.psf_spectral_fft[None,:], axis=1)) # Finally carry out the linear interpolation along the wavelength axis to rebin to the Hinode wavelength axis stokes_out[:,i,:] = tmp[:,self.ind] * self.delta1[None,:] + tmp[:,self.ind+1] * self.delta2[None,:] data_to_send['stokes'] = stokes_out self.comm.send(data_to_send, dest=0, tag=tags.DONE) elif tag == tags.EXIT: break self.comm.send(None, dest=0, tag=tags.EXIT) def run_all_pixels_spatial(self, input_stokes, input_model, output_spatial_stokes, output_spatial_model, spatial_psf=None, zoom_factor=None): """ Run synthesis/inversion for all pixels Parameters ---------- None Returns ------- None """ if (self.use_mpi): if (self.rank == 0): self.mpi_master_work_spatial(input_stokes, input_model, output_spatial_stokes, output_spatial_model, spatial_psf, zoom_factor) else: pass else: self.nonmpi_work() def run_all_pixels_spectral(self, input_stokes, output_spatial_spectral_stokes, spectral_psf=None, final_wavelength_axis=None): """ Run synthesis/inversion for all pixels Parameters ---------- None Returns ------- None """ if (self.use_mpi): if (self.rank == 0): self.mpi_master_work_spectral(input_stokes, output_spatial_spectral_stokes, spectral_psf, final_wavelength_axis) else: self.mpi_agents_work_spectral() else: self.nonmpi_work()
16,222
36.640371
158
py
HamiltonianOpInf
HamiltonianOpInf-main/KdV_BBM_utils.py
# Functions for running the KdV and BBM experiments # Anthony Gruber 3-31-2023 # Import necessary packages from functools import partial import numpy as np from numpy.linalg import norm, solve from numpy.fft import rfft, irfft from scipy.optimize import root from scipy.integrate import solve_ivp from scipy.linalg import circulant from scipy.sparse import csc_matrix, identity, diags from scipy.sparse.linalg import spsolve # My own file from OpInf_utils import FDapprox # Finite differences: 1st order periodic def FDforward(y, step): diffs = np.zeros_like(y) diffs[:-1] = (y[1:] - y[:-1]) / step diffs[-1] = (y[-1] - y[0]) / step return diffs # Build tridiagonal matrix def tridiag(a, b, c, k1=-1, k2=0, k3=1): return np.diag(a, k1) + np.diag(b, k2) + np.diag(c, k3) # Newton solve given res, jac functions # Gets super slow if you crank up the iterations or reduce the tolerance. def do_newton(res, jac, xOld, tol=1e-6, maxIt=10, verbose=False, sparse=True): if not sparse: solver = solve else: solver = spsolve xNew = xOld err = norm(res(xNew)) iter = 0 while err > tol: xNew = xNew - solver(jac(xNew), res(xNew)) iter += 1 if iter > maxIt: err = norm(res(xNew)) if verbose: print('err =',err) break return xNew # Define KdV v1 Hamiltonian (depends on parameters) def KdV_Hamil_v1(X, dx, a=-6, p=0, v=-1): arr = a/6 * X**3 + p/2 * X**2 - v/2 * FDforward(X, dx)**2 return np.sum(arr, axis=0) * dx # Define KdV v2 Hamiltonian def KdV_Hamil_v2(X, dx): arr = 0.5 * X**2 return np.sum(arr, axis=0) * dx # State-dependent part of L for KdV v2 def C(x, A, a=-6): res = a/3 * np.multiply(A.todense(), x.reshape(-1,1)) return csc_matrix(res - res.T) # Generates initial condition for KdV soliton # Not general -- should probably fix this... def KdV_soliton_IC(x): return 1 / np.cosh(x / np.sqrt(2))**2 # Assemble operators needed for KdV FOM problem in both formulations def build_KdV_mats(N, xEnds): x = np.linspace(xEnds[0], xEnds[1], N) dx = x[1] - x[0] # Build derivative matrix A (this is v1 L) A = tridiag(-np.ones(N-1), np.zeros(N), np.ones(N-1)) A[-1,0] = 1 A[0,-1] = -1 A *= 1 / (2*dx) A = csc_matrix(A) # Build Laplace mtx B B = tridiag(np.ones(N-1), -2*np.ones(N), np.ones(N-1)) B[-1,0] = 1 B[0,-1] = 1 B *= (1/dx)**2 B = csc_matrix(B) # Build pentadiagonal circulant matrix Ec = np.zeros(N) Ec[1] = 2 Ec[2] = -1 Ec[-2] = 1 Ec[-1] = -2 E = circulant(Ec / 2) E *= (1/dx)**3 E = csc_matrix(E) return A, B, E # Function to collect snapshots of KdV v1 FOM # TIme discretization is AVF def integrate_KdV_v1_FOM(tRange, ic, A, B, a=-6, p=0, v=-1): N = ic.shape[0] Nt = tRange.shape[0] dt = tRange[1] - tRange[0] # Build gradH for KdV v1, depends on central diff mtx B def gradH_v1(x, B, a=-6, p=0, v=-1): return 0.5*a*x**2 + p*x + v*B@x # For root-finding alg term2mat = p*identity(N) + v*B def Nfunc(xOld, xNew): xMid = 0.5 * (xOld + xNew) term1 = a/6 * (xOld**2 + xOld*xNew + xNew**2) rhs = A @ (term1 + term2mat @ xMid) return xNew - (xOld + dt * rhs) def Nderiv(xOld, xNew): N = xNew.shape[0] term1 = a/6 * A @ (diags(xOld) + 2*diags(xNew)) term2 = 1/2 * A @ term2mat return identity(N) - dt * (term1 + term2) # Generating snapshots Xdata = np.zeros((N, Nt)) Xdata[:,0] = ic.flatten() for i,t in enumerate(tRange[:-1]): res = partial(Nfunc, Xdata[:,i]) jac = partial(Nderiv, Xdata[:,i]) Xdata[:,i+1] = do_newton(res, jac, Xdata[:,i]) # Snapshots of gradH and time derivative Xdot gradHdata = gradH_v1(Xdata, B, a, p, v) XdataDot = FDapprox(Xdata.T, dt).T return Xdata, XdataDot, gradHdata # Function to collect snapshots of KdV v2 FOM # Time discretization is AVF # This version is much slower than Hamil v1 def integrate_KdV_v2_FOM(tRange, ic, A, E, a=-6, p=0, v=-1): N = ic.shape[0] Nt = tRange.shape[0] dt = tRange[1] - tRange[0] # For root-finding alg term2mat = p*A + v*E def Nfunc(xOld, xNew): xMid = 0.5 * (xOld + xNew) term1 = C(xMid, A, a) rhs = (term1 + term2mat) @ xMid return xNew - (xOld + dt * rhs) def Nderiv(xOld, xNew): xMid = 0.5 * (xOld + xNew) term1 = C(xMid, A, a) term2 = 0.5 * term2mat return identity(N) - dt * (term1 + term2) # Generating snapshots Xdata = np.zeros((N, Nt)) Xdata[:,0] = ic.flatten() for i,t in enumerate(tRange[:-1]): res = partial(Nfunc, Xdata[:,i]) jac = partial(Nderiv, Xdata[:,i]) Xdata[:,i+1] = do_newton(res, jac, Xdata[:,i]) # Snapshots of gradH and time derivative Xdot gradHdata = Xdata XdataDot = FDapprox(Xdata.T, dt).T return Xdata, XdataDot, gradHdata # Precomputing the KdV intrusive ROM operators once and for all def build_KdV_ROM_Ops(UUlist, A, B, E, ic, n=150, a=-6, p=0, v=-1, MC=False): ic = ic.flatten() U1, U2 = UUlist[0][:,:n], UUlist[1][:,:n] N = U1.shape[0] # For Hamiltonian LHat = U1.T @ A @ U1 cVecV1 = np.zeros(n) cVecV2 = np.zeros(n) CmatV1 = U1.T @ (p*identity(N)+v*B) @ U1 CmatV2 = U2.T @ (p*A + v*E) @ U2 temp1 = np.einsum('ia,ib->iab', U1, U1) TtensV1 = a/2 * np.einsum('ia,ibc', U1, temp1) temp2 = np.einsum('ia,ib->iab', U2, U2) temp2 = temp2.transpose(1,2,0) @ (A @ U2) TtensV2 = a/3 * (temp2.transpose(0,2,1)-temp2.transpose(2,1,0)) # TtensV2 = a/3 * (temp2 - temp2.transpose(1,2,0)) # For Galerkin cVecV1G = np.zeros(n) cVecV2G = np.zeros(n) CmatV1G = U1.T @ A @ (p*identity(N)+v*B) @ U1 CmatV2G = U2.T @ (p*A + v*E) @ U2 TtensV1G = a/2 * np.einsum('aj,jbc', U1.T @ A, temp1) # Extra terms in case of mean-centering if MC: # For Hamiltonian ich = U2.T @ ic cVecV1 += U1.T @ (a/2 * (ic**2) + (p*identity(N)+v*B) @ ic) cVecV2 += (U2.T @ C(ic, A, a) @ U2 + CmatV2) @ ich CmatV1 += a * U1.T @ (ic.reshape(-1,1) * U1) CmatV2 += U2.T @ C(ic, A, a) @ U2 + TtensV2.transpose(0,2,1) @ ich # CmatV2 += U2.T @ C(ic, A, a) @ U2 + TtensV2 @ ich # For Galerkin cVecV1G += U1.T @ A @ (a/2 * ic**2 + (p*identity(N)+v*B) @ ic) CmatV1G += U1.T @ A @ (a * ic.reshape(-1,1) * U1) cVecV2G += U2.T @ (C(ic, A, a) + p*A + v*E) @ ic temp2 = np.einsum('ia,ib->abi', U2, U2) # TV2p1 = temp2 @ A.todense() TV2p1 = np.einsum('abi,ij', temp2, A.todense()) TV2p2 = np.einsum('aj,jb->abj', U2.T@A, U2) TpartV2 = a/3 * (TV2p1 + TV2p2) CmatV2G += TpartV2 @ ic + U2.T @ C(ic, A, a) @ U2 return ( [cVecV1, CmatV1, TtensV1, LHat], [cVecV1G, CmatV1G, TtensV1G], [cVecV2, CmatV2, TtensV2], [cVecV2G, CmatV2G, TtensV2] ) # Function to integrate the ROMs for KdV v1 # OpList assumes order of [cVec, Cmat, Ttens, L] # This function is overloaded for BBM case also def integrate_KdV_v1_ROM(tTest, OpList, ic, UU, n, MC=False, Hamiltonian=True, Newton=True): nt = tTest.shape[0] dt = tTest[1] - tTest[0] ic = ic.reshape(-1,1) # Building operators for ROM problem U = UU[:,:n] cVec = OpList[0][:n] Cmat = OpList[1][:n,:n] Ttens = OpList[2][:n,:n,:n] if Hamiltonian: LHat = OpList[-1][:n,:n] else: LHat = np.eye(n) # Functions for root finding def Nfunc(xHatOld, xHatNew): xHatMid = 0.5 * (xHatOld + xHatNew) tensTerm = ( 2*(Ttens @ xHatOld) @ xHatMid + (Ttens @ xHatNew) @ xHatNew ) / 3 rhs = cVec + Cmat @ xHatMid + tensTerm return xHatNew - (xHatOld + dt * LHat @ rhs) Id = np.eye(n) def Nderiv(xHatOld, xHatNew): tensTerm = Ttens @ xHatOld + 2*Ttens @ xHatNew return Id - dt * LHat @ (Cmat/2 + tensTerm/3) # Initialize array and set initial conditions xHat = np.zeros((n, nt)) if MC: xHat[:,0] = np.zeros(n) else: xHat[:,0] = U.T @ ic.flatten() # Integrate FOM/ROMs over test interval for i,time in enumerate(tTest[:-1]): res = partial(Nfunc, xHat[:,i]) if not Newton: xHat[:,i+1] = root(res, xHat[:,i], method='krylov').x else: jac = partial(Nderiv, xHat[:,i]) xHat[:,i+1] = do_newton(res, jac, xHat[:,i], maxIt=3, sparse=False) # Reconstruct FO solutions if MC: xRec = ic + U @ xHat else: xRec = U @ xHat return xRec # Function to integrate the HROMs for KdV v2 # OpList assumes order of [cVec, Cmat, Ttens] # Option for standard AVF or "full AVF" integrated def integrate_KdV_v2_ROM(tTest, OpList, ic, UU, n, MC=False, Newton=False, AVF=True): nt = tTest.shape[0] dt = tTest[1] - tTest[0] ic = ic.reshape(-1,1) Id = np.eye(n) # Building operators for ROM problem U = UU[:,:n] cVec = OpList[0][:n] Cmat = OpList[1][:n,:n] Ttens = OpList[2][:n,:n,:n] # Functions for root finding def NfuncAVF(xHatOld, xHatNew): xHatMid = 0.5 * (xHatOld + xHatNew) Tterm = (Ttens @ xHatMid) @ xHatMid rhs = cVec + Cmat @ xHatMid + Tterm return xHatNew - (xHatOld + dt * rhs) def NderivAVF(xHatOld, xHatNew): xHatMid = 0.5 * (xHatOld + xHatNew) Tterm = (Ttens + Ttens.transpose(0,2,1)) @ xHatMid return Id - dt * (Cmat/2 + Tterm/4) def NfuncInteg(xHatOld, xHatNew): xHatMid = 0.5 * (xHatOld + xHatNew) Tterm1 = (Ttens @ xHatNew) @ xHatNew + (Ttens @ xHatOld) @ xHatOld Tterm2 = (Ttens @ xHatNew) @ xHatOld + (Ttens @ xHatOld) @ xHatNew Tterm = Tterm1/3 + Tterm2/6 rhs = cVec + Cmat @ xHatMid + Tterm return xHatNew - (xHatOld + dt * rhs) def NderivInteg(xHatOld, xHatNew): Tsym = Ttens + Ttens.transpose(0,2,1) Tterm = 1/3 * Tsym @ xHatNew + 1/6 * Tsym @ xHatOld return Id - dt * (Cmat/2 + Tterm) # Toggle for time integration if AVF: Nfunc, Nderiv = NfuncAVF, NderivAVF else: Nfunc, Nderiv = NfuncInteg, NderivInteg # Initialize array and set initial conditions xHat = np.zeros((n, nt)) if MC: xHat[:,0] = np.zeros(n) else: xHat[:,0] = U.T @ ic.flatten() # Integrate FOM/ROMs over test interval for i,time in enumerate(tTest[:-1]): res = partial(Nfunc, xHat[:,i]) if not Newton: xHat[:,i+1] = root(res, xHat[:,i], method='krylov').x else: jac = partial(Nderiv, xHat[:,i]) xHat[:,i+1] = do_newton(res, jac, xHat[:,i], maxIt=10, sparse=False, tol=1e-12) # Reconstruct FO solutions if MC: xRec = ic + U @ xHat else: xRec = U @ xHat return xRec # This function is adapted from code found at # https://github.com/W-J-Trenberth/Spectral-method-for-the-BBM-equation def BBM_solver(t, u_0, a=1, b=1, y=1): '''A function to solve the BBM equation $$\partial_t u + a\partial_x u + bu\partial_x u - y\partial_{xxt}u = 0$$. Using Fourier analysis this equation can be written in the form $$\partial_t u =F(u)$$. This function uses the fast Fourier transform to compute $F(u)$ quickly solve_ivp to solve the resulting system of ODEs. Parameters ------------------------------------- t: A time to evaluate the solution at. u_0: an array eps: a number representing the strength of the dispersion. Returns ------------------------------------ out: the solution to BBM evaluated at time t starting from inital data u_0. ''' #The RHS of the ODE. def RHS(t, u): N = len(u)//2 + 1 n = np.arange(0, N) termFT = -2*np.pi*1j*n / (1+y*4*np.pi**2*n**2) rhsFT = termFT * rfft(a*u + 0.5*b*u**2) return irfft(rhsFT) #Use solve_ivp to solve the ODE. # xDot = RHS(t, u_0) sol = solve_ivp(RHS, [0,t], u_0, t_eval=[t], method='DOP853').y return np.reshape(sol, -1) # Computes the BBM Hamiltonian def BBM_Hamil(X, dx, a=1, b=1): out = 0.5*(a*X**2 + (b/3)*X**3) return np.sum(out, axis=0) * dx def BBM_momentum(X, B, dx, y=1): out = X - y * B @ X return np.sum(out, axis=0) * dx def BBM_KinE(X, dx, y=1): out = X**2 + y * FDforward(X, dx)**2 #(A @ X)**2 return 0.5 * np.sum(out, axis=0) * dx # Precomputing the BBM ROM operators once and for all def build_BBM_ROM_ops(UU, ic, n=150, MC=False, a=1, b=1): U = UU[:,:n] cVec = np.zeros(n) Cmat = a * np.identity(n) temp = np.einsum('ik,il->ikl', U, U) Ttens = b/2 * np.einsum('ij,ikl', U, temp) if MC: cVec += U.T @ (a * ic + b/2 * ic**2) Cmat += b * U.T @ (ic.reshape(-1,1) * U) return [cVec, Cmat, Ttens, np.identity(n)] # # Function to integrate the OpInf HROM for the BBM equation # def integrate_BBM_OpInf_HROM(tTest, LHat, TT, ic, UU, n, Newton=True): # nt = tTest.shape[0] # dt = tTest[1] - tTest[0] # # Building operators for ROM problem # U = UU[:,:n] # Ttens = TT[:n,:n,:n] # def Nfunc(xHatOld, xHatNew): # xHatMid = 0.5 * (xHatOld + xHatNew) # nlin2part = ( 2*(Ttens @ xHatOld) @ xHatMid # + (Ttens @ xHatNew) @ xHatNew ) / 3 # rhs = xHatMid + nlin2part # return xHatNew - (xHatOld + dt * LHat @ rhs) # Id = np.eye(n) # def Nderiv(xHatOld, xHatNew): # term = Ttens @ xHatOld + 2*Ttens @ xHatNew # return Id - dt * (LHat / 2 + LHat @ term / 3) # # Initialize array and set initial conditions # xHat = np.zeros((n, nt)) # xHat[:,0] = U.T @ ic.flatten() # # Integrate FOM/ROMs over test interval # for i,time in enumerate(tTest[:-1]): # res = partial(Nfunc, xHat[:,i]) # if not Newton: # xHat[:,i+1] = root(res, xHat[:,i], method='krylov').x # else: # jac = partial(Nderiv, xHat[:,i]) # xHat[:,i+1] = do_newton(res, jac, xHat[:,i], # maxIt=3, sparse=False) # # Reconstruct FO solutions # xRec = U @ xHat # return xRec
14,804
29.588843
79
py
HamiltonianOpInf
HamiltonianOpInf-main/OpInf_utils.py
# Convenience functions for running the various OpInf procedures # Anthony Gruber 5-28-2023 import numpy as np from numpy.linalg import solve from scipy.sparse import csc_matrix, identity, issparse from scipy.sparse.linalg import spsolve # Finite differences: 4th order in middle, 1st order at ends def FDapprox(y, step): diffs = np.zeros_like(y) diffs[0] = (y[1] - y[0]) / step diffs[1] = (y[2] - y[1]) / step diffs[2:-2] = (-y[4:] + 8*y[3:-1] - 8*y[1:-3] + y[:-4]) / (12 * step) diffs[-2] = (y[-2] - y[-3]) / step diffs[-1] = (y[-1] - y[-2]) / step return diffs # Vectorize column-wise def vec(A): m, n = A.shape[0], A.shape[1] return A.reshape(m*n, order='F') # Build sparse m x n matrix K such that K @ vec(A) = vec(A.T) def commutation_matrix_sp(m, n): row = np.arange(m*n) col = row.reshape((m, n), order='F').ravel() data = np.ones(m*n, dtype=np.int8) return csc_matrix((data, (row, col)), shape=(m*n, m*n)) # Build precomputable parts of OpInf procedures # Note that the RHS of canonical OpInf is not truncatable # This is only useful for non-block basis, when reduced quantities of # lower dimension can be precomputed. def build_OpInf_stuff(UU, xData, xDotData, gradHData, J, n=150, MC=False): # Modification for block basis # Truncation doesn't work here, so just return stuff if isinstance(UU, list): if not MC: return [[UU, xData, xDotData, gradHData, J] for i in range(3)] else: return [UU, xData, xDotData, gradHData, J] else: U = UU[:,:n] xDotHat = U.T @ xDotData gradHatH = U.T @ gradHData sgHatH = gradHatH @ gradHatH.T NCrhs = xDotHat @ gradHatH.T - gradHatH @ xDotHat.T NCops = [sgHatH, NCrhs] if not MC: xHat = U.T @ xData xHxHt = xHat @ xHat.T Grhs = xHat @ xDotHat.T Jhat = U.T @ J @ U Cops = [xHat, xDotHat, xHxHt, Jhat] Gops = [xHxHt, Grhs] return [NCops, Cops, Gops] else: # Recall that mean-centering only makes sense for N-C-OpInf return NCops # Infer L in xDot = L gradH(x) def NC_H_OpInf(OpList, n, eps=0.): # Modification for block basis # No fancy tricks, have to compute everything if isinstance(OpList[0], list): Uq = OpList[0][0][:,:n//2] Up = OpList[0][1][:,:n//2] Zb = np.zeros_like(Uq) U = csc_matrix(np.block([[Uq, Zb], [Zb, Up]])) xDotHat = U.T @ OpList[2] gHatH = U.T @ OpList[3] sgHatH = gHatH @ gHatH.T temp = xDotHat @ gHatH.T rhs = temp - temp.T else: # Load precomputed data sgHatH = OpList[0][:n,:n] rhs = OpList[1][:n,:n] # Solving NC-H-OpInf Problem iMat = np.eye(n) P = csc_matrix( np.kron(iMat, sgHatH) + np.kron(sgHatH, iMat) ) reg = 2 * eps * identity(n*n) Lhat = spsolve(P + reg, vec(rhs)).reshape((n,n), order='F') # reg = eps * identity(n*n) # Lhat = spsolve(P @ reg, vec(rhs)).reshape((n,n), order='F') return 0.5 * (Lhat - Lhat.T) # Infer A in xDot = JAx # Can add snapshots of forcing if desired (TODO) # BorisZhu implments "H-OpInf" method from Sharma, Kramer, and Wang (2022) def C_H_OpInf(OpList, n, Sigma=None, eps=0., approx=True, BorisZhu=False): # Modification for block basis # No fancy tricks, have to compute everything if isinstance(OpList[0], list): Uq = OpList[0][0][:,:n//2] Up = OpList[0][1][:,:n//2] Zb = np.zeros_like(Uq) U = csc_matrix(np.block([[Uq, Zb], [Zb, Up]])) xHat = U.T @ OpList[1] xDotHat = U.T @ OpList[2] xHxHt = xHat @ xHat.T Jhat = U.T @ OpList[4] @ U else: # Load precomputed data xHat = OpList[0][:n] xDotHat = OpList[1][:n] xHxHt = OpList[2][:n,:n] Jhat = OpList[3][:n,:n] # Only applicable when POD basis comes from SVD of X, as usual if Sigma is not None: xHxHt = np.diag(Sigma[:n]**2) # This is always true if basis comes from symplectic lift # Otherwise, this approximation seems to work better.... if approx: JhtJh = np.eye(n) else: if issparse(Jhat): JhtJh = (Jhat.T @ Jhat).todense() else: JhtJh = Jhat.T @ Jhat if BorisZhu: # implement original H-OpInf method # This method relies on a symplectic lift basis N = n//2 xH1xH1t = xHat[:N] @ xHat[:N].T xH2xH2t = xHat[N:] @ xHat[N:].T temp1 = xDotHat[:N] @ xHat[N:].T temp2 = -xDotHat[N:] @ xHat[:N].T rhs1 = temp1 + temp1.T rhs2 = temp2 + temp2.T # Solving the two requisite sub-problems P21 = csc_matrix( np.kron(np.eye(N), xH2xH2t) + np.kron(xH2xH2t, np.eye(N)) ) P22 = csc_matrix( np.kron(np.eye(N), xH1xH1t) + np.kron(xH1xH1t, np.eye(N)) ) A11 = spsolve(P21, vec(rhs1)).reshape((N,N), order='F') A22 = spsolve(P22, vec(rhs2)).reshape((N,N), order='F') # Returning the block diagonal OpInf'd matrix Zb = np.zeros((N,N)) A = csc_matrix(np.block([[A22, Zb], [Zb, A11]])) return A else: # implement my C-H-OpInf method # Can use whatever basis you want temp = Jhat.T @ xDotHat @ xHat.T rhs = temp + temp.T P = csc_matrix( np.kron(JhtJh, xHxHt) + np.kron(xHxHt, JhtJh) ) reg = 2 * eps * identity(n*n) Ahat = spsolve(P + reg, vec(rhs)).reshape((n,n), order='F') # reg = eps * identity(n*n) # Ahat = spsolve(P @ reg, vec(rhs)).reshape((n,n), order='F') return 0.5 * (Ahat + Ahat.T) # Solving generic OpInf Problem with Willcox method. # Tikhonov parameter scaled by XX^T def G_OpInf(OpList, n, Sigma=None, eps=1.0e-15): # Modification for block basis # Have to recompute everything... if isinstance(OpList[0], list): Uq = OpList[0][0][:,:n//2] Up = OpList[0][1][:,:n//2] Zb = np.zeros_like(Uq) U = csc_matrix(np.block([[Uq, Zb], [Zb, Up]])) xHat = U.T @ OpList[1] xDotHat = U.T @ OpList[2] xHxHt = xHat @ xHat.T rhs = xHat @ xDotHat.T else: xHxHt = OpList[0][:n,:n] rhs = OpList[1][:n,:n] if Sigma is not None: xHxHt = np.diag(Sigma[:n]**2) LHS = (1+eps) * xHxHt return solve(LHS, rhs).T
6,737
32.859296
77
py
HamiltonianOpInf
HamiltonianOpInf-main/ROM_utils.py
# Convenience functions for running the ROMs # Anthony Gruber 3-31-2023 # Standared numpy/scipy imports import numpy as np from numpy.linalg import norm, eigvals from scipy.linalg import lu_factor, lu_solve from scipy.sparse import csc_matrix, identity from scipy.sparse.linalg import factorized # For additional plotting and gifing from functools import partial import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation # Relative L2 error def relError(x, xHat): num = norm(x - xHat) den = norm(x) return num / den def integrate_LFOM(tRange, ics, M, C, K, p, gamma=1./2, beta=1./4): N, n_t = M.shape[0], tRange.shape[0] h = tRange[1] - tRange[0] M = csc_matrix(M) C = csc_matrix(C) K = csc_matrix(K) q = np.zeros((N, n_t)) qDot = np.zeros((N, n_t)) qDotDot = np.zeros((N, n_t)) q[:,0], qDot[:,0], qDotDot[:,0] = ics LHS = M + gamma * h * C + beta * h**2 * K solve = factorized(LHS) for i,t in enumerate(tRange[:-1]): rhst1 = C @ (qDot[:,i] + (1-gamma) * h * qDotDot[:,i]) rhst2 = K @ (q[:,i] + h * qDot[:,i] + (0.5-beta) * h**2 * qDotDot[:,i]) rhs = p(tRange[i+1]) - rhst1 - rhst2 qDotDot[:,i+1] = solve(rhs) qDot[:,i+1] = qDot[:,i] + h * ((1-gamma) * qDotDot[:,i] + gamma * qDotDot[:,i+1]) q[:,i+1] = q[:,i] + h * qDot[:,i] \ + h**2 * ((0.5-beta) * qDotDot[:,i] + beta * qDotDot[:,i+1]) return (q, qDot, qDotDot) # Function to collect snapshots of Linear FOM using implicit midpoint method # L is SS mtx, A is mtx rep. of grad H def integrate_Linear_HFOM(tRange, ic, L, A, safe=False): N = L.shape[0] Nt = tRange.shape[0] dt = tRange[1] - tRange[0] # Store L @ A as sparse mtx LA = csc_matrix(L @ A) if safe: reEigs = eigvals(LA.todense()).real if any(reEigs) > 0: print('FOM is unstable!') # Initialize solution array xData = np.zeros((N, Nt)) xData[:,0] = ic LHS = identity(N, format='csc') - dt/2 * LA solve = factorized(LHS) # Implicit midpoint method for i,t in enumerate(tRange[:-1]): rhs = dt * LA @ xData[:,i] delXi = solve(rhs) xData[:,i+1] = xData[:,i] + delXi # Snapshots of gradH and time derivative Xdot gradHdata = A @ xData # xDataDot = FDapprox(xData.T, dt).T xDataDot = LA @ xData return xData, xDataDot, gradHdata # Function which builds operators needed for intrusive ROM # Only useful when the basis is not blocked. def build_Linear_ROM_Ops(UU, L, A, ic, n=100, MC=False): ic = ic.flatten() # Modification for block basis # Truncation doesn't work here, so just return stuff... if isinstance(UU, list): return [[L, A] for i in range(2)] else: U = UU[:,:n] # LHat = -np.linalg.inv(U.T @ L @ U) LHat = U.T @ L @ U AHat = U.T @ A @ U LAHat = U.T @ L @ A @ U x0partHamb4L = np.zeros(n) x0PartGal = np.zeros(n) if MC: x0partHamb4L += U.T @ A @ ic x0PartGal += U.T @ L @ A @ ic HamiltonianOps = [LHat, AHat, x0partHamb4L] GalerkinOps = [LAHat, x0PartGal] return HamiltonianOps, GalerkinOps # Function which integrates a linear ROM using AVF # Option for Hamiltonian or standard Galerkin # Option for mean-centering def integrate_Linear_ROM(tTest, OpList, ic, UU, n, MC=False, Hamiltonian=False): nt = tTest.shape[0] dt = tTest[1] - tTest[0] ic = ic.reshape(-1,1) # Modification for block basis # This looks insane, but we can't precompute anything here... if isinstance(UU, list): Uq = UU[0][:,:n//2] Up = UU[1][:,:n//2] Zb = np.zeros_like(Uq) U = csc_matrix(np.block([[Uq, Zb], [Zb, Up]])) L = OpList[0] A = OpList[1] if Hamiltonian: if L.shape[0] == len(ic.flatten()): LHat = U.T @ L @ U # LHat = -np.linalg.inv(U.T @ L @ U) else: # If OpList[0] has been replaced with an inferred Lhat LHat = L AHat = U.T @ A @ U x0part = np.zeros(n) if MC: x0part += LHat @ U.T @ A @ ic.flatten() LAHat = LHat @ AHat else: LAHat = U.T @ L @ A @ U x0part = np.zeros(n) if MC: x0part += U.T @ L @ A @ ic.flatten() else: U = UU[:,:n] if Hamiltonian: LHat = OpList[0][:n,:n] AHat = OpList[1][:n,:n] x0b4L = OpList[2][:n] LAHat = LHat @ AHat x0part = LHat @ x0b4L else: LAHat = OpList[0][:n,:n] x0part = OpList[1][:n] # Initialize arrays xHat = np.zeros((n, nt)) # Set initial conditions if MC: xHat[:,0] = np.zeros(n) else: xHat[:,0] = U.T @ ic.flatten() # Define LHS operators LHS = np.eye(n) - dt/2 * LAHat lu, piv = lu_factor(LHS) # Integrate ROMs over test interval for i,t in enumerate(tTest[:-1]): rhs = dt * (x0part + LAHat @ xHat[:,i]) delxHati = lu_solve((lu, piv), rhs) xHat[:,i+1] = xHat[:,i] + delxHati # Reconstruct FO solutions if MC: xRec = ic + U @ xHat else: xRec = U @ xHat return xRec # Function which integrates Generic OpInf or (C or NC) H-OpInf ROM def integrate_OpInf_ROM(tTest, DhatOp, ic, UU, L=None, approx=False): n = DhatOp.shape[0] nt = tTest.shape[0] dt = tTest[1] - tTest[0] ic = ic.flatten() # Modification for block basis if isinstance(UU, list): Uq = UU[0][:,:n//2] Up = UU[1][:,:n//2] Zb = np.zeros_like(Uq) U = csc_matrix(np.block([[Uq, Zb], [Zb, Up]])) else: U = UU[:,:n] # Building operators for ROM problem xHatOp = np.zeros((n, nt)) xHatOp[:,0] = U.T @ ic # Define LHS operators if L is not None: if not approx: Lred = U.T @ L @ U else: Lred = np.block([[np.zeros((n//2,n//2)), np.eye(n//2)], [-np.eye(n//2), np.zeros((n//2,n//2))]]) LHSop = np.eye(n) - dt/2 * Lred @ DhatOp rhsOp = dt * Lred @ DhatOp else: LHSop = np.eye(n) - dt/2 * DhatOp rhsOp = dt * DhatOp # Integrate ROMs over test interval lu, piv = lu_factor(LHSop) for i,t in enumerate(tTest[:-1]): rhs = rhsOp @ xHatOp[:,i] delXhatOpi = lu_solve((lu, piv), rhs) xHatOp[:,i+1] = xHatOp[:,i] + delXhatOpi # Reconstruct FO solutions XrecOp = U @ xHatOp return XrecOp # Make mp4 movie of an array def animate_array(arrs, styles, labels, xCoords, eps_x=0.01, eps_y=0.1, yLims=None, legend_loc=0, save=True): n_t = arrs[0].shape[-1] # Define the update function that will be called at each iteration def update(lines, data, frame): for i,line in enumerate(lines): line.set_ydata(data[i][:,frame]) return lines, fig, ax = plt.subplots() ## Should fix this to take care of zero.... xmin = (xCoords.min()*(1-eps_x) if xCoords.min() > 0 else xCoords.min()*(1+eps_x)) xmax = (xCoords.max()*(1+eps_x) if xCoords.max() > 0 else xCoords.min()*(1-eps_x)) ymin = (arrs.min()*(1-eps_y) if arrs.min() > 0 else arrs.min()*(1+eps_y)) ymax = (arrs.max()*(1+eps_y) if arrs.max() > 0 else arrs.max()*(1-eps_y)) ax.set_xlim(xmin,xmax) ax.set_ylim(ymin,ymax) if yLims: ax.set_ylim(yLims[0],yLims[1]) lines = [0 for i in range(len(arrs))] for i,line in enumerate(lines): lines[i], = ax.plot(xCoords, arrs[i][:,0], linestyle=styles[i], label=labels[i]) plt.legend(loc=legend_loc) plotFunc = partial(update, lines, arrs) anim = FuncAnimation(fig, plotFunc, frames=n_t, interval=20) anim.save('myarray.mp4') if save else None plt.show()
8,335
29.093863
78
py
degree2
degree2-master/vector_valued_smfs.py
# -*- coding: utf-8 -*- ''' The space of vector valued Siegel modular forms of degree two. cf Satoh, On vector valued Siegel modular forms of degree two, Ibukiyama, Vector valued Siegel modular forms of symmetric tensor weight of small degrees. ''' import operator from sage.misc.cachefunc import cached_method from degree2.hecke_module import HeckeModule from degree2.basic_operation import PrecisionDeg2 from degree2.utils import (linearly_indep_rows_index_list, is_number) from degree2.scalar_valued_smfs import tuples_even_wt_modular_forms from degree2.tsushima_dimension_formula import hilbert_series_maybe from degree2.const import ConstMul, CalculatorVectValued from degree2.const import ScalarModFormConst as SMFC from degree2.vector_valued_impl.utils import data_dir import degree2.vector_valued_impl.sym2.even_structure as impl_sym2_even import degree2.vector_valued_impl.sym2.odd_structure as impl_sym2_odd import degree2.vector_valued_impl.sym4.even_structure as impl_sym4_even import degree2.vector_valued_impl.sym4.odd_structure as impl_sym4_odd import degree2.vector_valued_impl.sym10.even_structure as impl_sym10_even import degree2.vector_valued_impl.sym10.odd_structure as impl_sym10_odd def _consts_i_dct(): consts_i_dct = {(2, 0): (impl_sym2_even.gen_consts(), impl_sym2_even.ignored_dct()), (2, 1): (impl_sym2_odd.gen_consts(), impl_sym2_odd.ignored_dct()), (4, 0): (impl_sym4_even.gen_consts(), impl_sym4_even.ignored_dct()), (4, 1): (impl_sym4_odd.gen_consts(), impl_sym4_odd.ignored_dct()), (10, 0): (impl_sym10_even.gen_consts(), impl_sym10_even.ignored_dct()), (10, 1): (impl_sym10_odd.gen_consts(), impl_sym10_odd.ignored_dct())} return consts_i_dct def vector_valued_siegel_modular_forms(sym_wt, wt, prec, data_directory=data_dir): r''' Returns the space of vector valued Siegel modular forms of degree 2 and weight \det^{wt} \otimes sym(sym_wt). ''' if sym_wt not in [2, 4, 10]: raise NotImplementedError consts_i_dct = _consts_i_dct() parity = wt % 2 gen_consts, ignored_dct = consts_i_dct[(sym_wt, parity)] return _Symj(wt, prec, data_directory=data_directory, j=sym_wt, gen_consts=gen_consts, ignored_dct=ignored_dct) class VectorValuedSiegelModularForms(HeckeModule): def __init__(self, wt, sym_wt, prec): self._wt = wt self._sym_wt = sym_wt self._prec = PrecisionDeg2(prec) @property def wt(self): return self._wt @property def sym_wt(self): return self._sym_wt @property def prec(self): return self._prec def dimension(self): raise NotImplementedError def basis(self): raise NotImplementedError @cached_method def _linearly_indep_tuples_of_given_bd(self, bd): basis = self.basis() dim = self.dimension() if is_number(bd): bd = list(PrecisionDeg2(bd)) tpls = sorted(list(bd), key=lambda x: (x[0] + x[2], max(x[0], x[2]))) tpls_w_idx = reduce(operator.add, [[(t, i) for i in range(self.sym_wt + 1)] for t in tpls], []) ml = [[f.forms[i][t] for f in basis] for t, i in tpls_w_idx] index_list = linearly_indep_rows_index_list(ml, dim) res = [tpls_w_idx[i] for i in index_list] return res def strum_bd_list(self): return None def linearly_indep_tuples(self): bd = self.strum_bd_list() if bd is None: bd = frozenset(self.prec) return self._linearly_indep_tuples_of_given_bd(bd) def _from_ts_wts(ts): lsts = [[4], [6], [10], [12]] return (reduce(operator.add, (a * b for a, b in zip(lsts, t))) for t in ts) class GivenWtBase(VectorValuedSiegelModularForms): '''A base class for the space of vector valued Siegel modular forms of weight det^wt Sym(j). ''' def __init__(self, sym_wt, wt, prec, calculator=None, gen_consts=None): super(GivenWtBase, self).__init__(wt, sym_wt, prec) self._calculator = calculator self._gen_consts = gen_consts def dimension(self): if self.sym_wt == 8 and self.wt == 4: return 1 elif self.sym_wt <= 10 and self.wt <= 4: return 0 elif self.wt > 4: pl = hilbert_series_maybe(self.sym_wt, prec=self.wt + 1) return pl[self.wt] else: raise NotImplementedError( "The dimensions of small determinant weights" + " are not known in general.") def _basis_const(self): '''This method should yield a generator that consists of an instance of ConstMul.''' pass def _basis_const_base(self, ignored_dct): '''This method is used for implmentation of _basis_const. ignored_dct is a dictionary whose key is an element of self._gen_consts and its value is a sub lift of [4, 6, 10, 12]. For exmaple if ignored_dct = {c: [4]} and F is a vector valued modular form that corresponds to c, then we do not use F * (a monomial including es4) when constructing a basis. ''' wt_to_idx = {4: 0, 6: 1, 10: 2, 12: 3} for c in self._gen_consts: k = c.weight() if c in ignored_dct: idcs = [wt_to_idx[w] for w in ignored_dct[c]] ts = [t for t in tuples_even_wt_modular_forms(self.wt - k) if all(t[i] == 0 for i in idcs)] else: ts = tuples_even_wt_modular_forms(self.wt - k) for t in _from_ts_wts(ts): yield ConstMul(c, SMFC(t)) @cached_method def basis(self): gens_dct = self._calculator.forms_dict(self.prec) res = [] for bc in self._basis_const(): res.append(bc.calc_form_from_f(gens_dct[bc._const_vec], self.prec)) return res class _Symj(GivenWtBase): def __init__(self, wt, prec, data_directory=data_dir, j=None, gen_consts=None, ignored_dct=None): calculator = CalculatorVectValued(gen_consts, data_directory) super(_Symj, self).__init__(j, wt, prec, calculator=calculator, gen_consts=gen_consts) self._ignored_dct = ignored_dct def _basis_const(self): return self._basis_const_base(self._ignored_dct) def calculator_symj(j, parity, data_directory=data_dir): consts, _ = _consts_i_dct()[j, parity] return CalculatorVectValued(consts, data_directory)
6,907
34.06599
82
py
degree2
degree2-master/const.py
# -*- coding: utf-8 -*- ''' A module for construction of vector valued Siegel modular forms. ''' from __future__ import print_function from abc import ABCMeta, abstractmethod, abstractproperty import os import hashlib import time from sage.all import (cached_method, mul, fork, matrix, QQ, gcd, latex, PolynomialRing, ZZ) from degree2.all import degree2_modular_forms_ring_level1_gens from degree2.utils import find_linearly_indep_indices from degree2.scalar_valued_smfs import x5__with_prec from degree2.rankin_cohen_diff import (vector_valued_rankin_cohen, rankin_cohen_pair_sym, rankin_cohen_pair_det2_sym, rankin_cohen_triple_det_sym, rankin_cohen_triple_det3_sym) from degree2.elements import SymWtModFmElt as SWMFE from degree2.basic_operation import PrecisionDeg2 scalar_wts = [4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16] gens_latex_name = {4: "\\phi_{4}", 6: "\\phi_{6}", 5: "\\chi_{5}", 10: "\\chi_{10}", 12: "\\chi_{12}", 35: "\\chi_{35}"} def _prec_value(prec): if prec in ZZ: return prec elif isinstance(prec, PrecisionDeg2): return prec._max_value() else: raise NotImplementedError class ScalarModFormConst(object): def __init__(self, wts): """ Used for construction of scalar valued Siegel modular forms of even weights. wts is a list or a dict. If wts is a list, elements should be in [4, 5, 6, 10, 12, 35]. Each integer corresponds to the weight of a generator. Then self.calc_form returns a monomial of generators corresponding to wts. If wts is a dict, its keys should be a tuple each element in [4, 5, 6, 12, 35]. self.calc_form returns a polynomial of generators corresponding to wts. """ if not isinstance(wts, (list, dict)): raise TypeError self._wts = wts @property def wts(self): return self._wts def name(self): return "f_{wts}".format(wts="_".join(str(a) for a in self.wts)) def __eq__(self, other): if isinstance(other, ScalarModFormConst): return self._frozen_wts() == other._frozen_wts() else: raise NotImplementedError def weight(self): return sum(self.wts) def __repr__(self): return "ScalarModFormConst({a})".format(a=str(self.wts)) def _frozen_wts(self): if isinstance(self.wts, list): return tuple(self.wts) else: return frozenset((k, v) for k, v in self.wts.iteritems()) @property def _key(self): return self._frozen_wts() def _to_wts_dict(self): if isinstance(self.wts, dict): return self.wts else: return {tuple(self.wts): QQ(1)} def _chi5_degree(self): coeffs_dct = self._to_wts_dict() return max([ks.count(5) for ks in coeffs_dct]) def calc_form(self, prec): es4, es6, x10, x12, x35 = degree2_modular_forms_ring_level1_gens(prec) x5 = x5__with_prec(prec) d = {4: es4, 6: es6, 10: x10, 12: x12, 5: x5, 35: x35} return self._calc_from_gens_dict(d) def _calc_from_gens_dict(self, dct): coeffs_dct = self._to_wts_dict() def _monm(ws): return mul(dct[k] for k in ws) return sum(_monm(k) * v for k, v in coeffs_dct.iteritems()) def _polynomial_expr(self): R = PolynomialRing(QQ, names="phi4, phi6, chi10, chi12, chi35, chi5") es4, es6, chi10, chi12, chi35, chi5 = R.gens() d = {4: es4, 6: es6, 10: chi10, 12: chi12, 35: chi35, 5: chi5} return self._calc_from_gens_dict(d) def _latex_(self): return latex(self._polynomial_expr()) def latex_expt(n): if n == 1: return "" else: return "^{%s}" % str(n) def latex_rankin_cohen(i, j, lcs): if i == 0: sub = "\\mathrm{Sym}(%s)" % (j,) else: sub = "\\det%s \\mathrm{Sym}(%s)" % (latex_expt(i), j) l = ", ".join([c for c in lcs]) return "\\left\\{%s\\right\\}_{%s}" % (l, sub) scalar_mod_form_wts = {4: [[4]], 5: [[5]], 6: [[6]], 8: [[4, 4]], 9: [[4, 5]], 10: [[10], [4, 6]], 12: [[12], [6, 6], [4, 4, 4]], 13: [[4, 4, 5]], 14: [[10, 4], [6, 4, 4]], 15: [[5, 10], [4, 5, 6]], 16: [[4, 12], [6, 10], [4, 6, 6], [4, 4, 4, 4]]} def _scalar_mod_form_consts(): return {k: [ScalarModFormConst(a) for a in v] for k, v in scalar_mod_form_wts.items()} scalar_mod_form_consts = _scalar_mod_form_consts() def rankin_cohen_quadruple_det_sym(j, f1, f2, f3, f4): """ Returns a modular form of wt sym(j) det^(sum + 1). """ return f3 * rankin_cohen_triple_det_sym(j, f1, f2, f4) def rankin_cohen_quadruple_det_sym_1(j, f1, f2, f3, f4): """ Returns a modular form of wt sym(j) det^(sum + 1). """ F = rankin_cohen_pair_sym(j, f1, f2) * f3 return vector_valued_rankin_cohen(f4, F) def rankin_cohen_quadruple_det3_sym(j, f1, f2, f3, f4): """ Returns a modular form of wt sym(j) det^(sum + 3). """ return f3 * rankin_cohen_triple_det3_sym(j, f1, f2, f4) def rankin_cohen_quadruple_det3_sym_1(j, f1, f2, f3, f4): """ Returns a modular form of wt sym(j) det^(sum + 3). """ F = rankin_cohen_pair_det2_sym(j, f1, f2) * f3 return vector_valued_rankin_cohen(f4, F) class ConstVectBase(object): __metaclass__ = ABCMeta @abstractmethod def calc_form(self, prec): '''Return the corresponding modular form with precision prec. ''' pass @abstractmethod def _latex_using_dpd_depth1(self, dpd_dct): '''dpd_dct is a dictionary whose set of keys is equal to self.dependencies_depth1() and its value is a variable name. This method returns a LaTeX expression of self. ''' pass @abstractmethod def weight(self): pass def _fname(self, data_dir): return os.path.join(data_dir, self._unique_name + ".sobj") def save_form(self, form, data_dir): form.save_as_binary(self._fname(data_dir)) def load_form(self, data_dir): try: return SWMFE.load_from(self._fname(data_dir)) except IOError: raise IOError("cache file for %s is not found" % (repr(self), )) def calc_form_and_save(self, prec, data_dir, force=False): def calc(): return self.calc_form(prec) self._do_and_save(calc, data_dir, force=force) def _saved_form_has_suff_prec(self, prec, data_dir): '''Return true if the cache file exists and the precision of the cached form in data_dir has greater than or equal to given prec. ''' if not os.path.exists(self._fname(data_dir)): return False f = self.load_form(data_dir) return bool(f.prec >= PrecisionDeg2(prec)) def _do_and_save(self, call_back, data_dir, force=False): '''Compute a modular form by call_back save the result to data_dir. If force is True, it overwrites the existing file. ''' if force or not os.path.exists(self._fname(data_dir)): f = call_back() self.save_form(f, data_dir) @abstractproperty def _key(self): pass @abstractproperty def sym_wt(self): pass @property def _unique_name(self): ''' Returns a unique name by using hashlib.sha1. ''' m = hashlib.sha1() m.update(str(self._key)) return m.hexdigest() def __hash__(self): return hash(self._key) def __eq__(self, other): return isinstance(other, ConstVectBase) and self._key == other._key @abstractmethod def needed_prec_depth1(self, prec): '''prec: an integer or an instance of PrecisionDeg2. This method should return a non-negative integer. To compute self with precision prec, dependencies_depth1 and instances of ScalarModFormConst that self depends on have to be with precision this value. ''' pass @abstractmethod def dependencies_depth1(self): '''This method should return a list of instances of (child classes of) ConstVectBase needed for the computation of self. It is not necessary to add dependencies of the dependencies to the list. ''' pass def walk(self): '''Returns a generator that yields all dependencies of self and Self. It yields Elements that have less dependencies early. ''' dep_dpth1 = self.dependencies_depth1() if not dep_dpth1: yield self else: for c in dep_dpth1: for a in c.walk(): yield a yield self @abstractmethod def calc_form_from_dependencies_depth_1(self, prec, depds_dct): '''depds_dct is a dictionary whose set of keys contains dependencies_depth1. And its at an element of dependencies_depth1 is a modular form with precision prec. This method computes a modular form corresponding to self form depds_dct. ''' pass class ConstVectValued(ConstVectBase): def __init__(self, sym_wt, consts, inc, tp): self._sym_wt = sym_wt self._consts = consts self._inc = inc self._type = tp def dependencies_depth1(self): return [] @property def sym_wt(self): return self._sym_wt def weight(self): return sum([c.weight() for c in self.consts]) + self.inc @property def consts(self): return self._consts @property def inc(self): return self._inc @property def type(self): return self._type def __iter__(self): for a in [self.consts, self.inc, self.type]: yield a def __repr__(self): return "ConstVectValued({sym_wt}, {a}, {b}, {c})".format( sym_wt=str(self.sym_wt), a=str(self.consts), b=str(self.inc), c="None" if self.type is None else "'%s'" % self.type) @property def _key(self): res = ("ConstVectValued", self.sym_wt, tuple([a._frozen_wts() for a in self.consts]), self.inc, self.type) return res def needed_prec_depth1(self, prec): prec = _prec_value(prec) nm_of_x5 = sum(c._chi5_degree() for c in self.consts) return prec + nm_of_x5 // 2 def calc_form_from_dependencies_depth_1(self, prec, depds_dct): return self.calc_form(prec) def calc_form(self, prec): prec = self.needed_prec_depth1(prec) funcs = {2: self._calc_form2, 3: self._calc_form3, 4: self._calc_form4} l = len(self.consts) if l in [2, 3, 4]: return funcs[l](prec) else: raise NotImplementedError def forms(self, prec): return [c.calc_form(prec) for c in self.consts] def _calc_form2(self, prec): funcs = {0: rankin_cohen_pair_sym, 2: rankin_cohen_pair_det2_sym} func = funcs[self.inc] forms = self.forms(prec) return func(self.sym_wt, *forms) def _calc_form3(self, prec): funcs = {1: rankin_cohen_triple_det_sym, 3: rankin_cohen_triple_det3_sym} func = funcs[self.inc] forms = self.forms(prec) return func(self.sym_wt, *forms) def _calc_form4(self, prec): if self.inc == 1: funcs = {'a': rankin_cohen_quadruple_det_sym, 'b': rankin_cohen_quadruple_det_sym_1} func = funcs[self.type] forms = self.forms(prec) return func(self.sym_wt, *forms) elif self.inc == 3: funcs = {'a': rankin_cohen_quadruple_det3_sym, 'b': rankin_cohen_quadruple_det3_sym_1} func = funcs[self.type] forms = self.forms(prec) return func(self.sym_wt, *forms) else: raise NotImplementedError def _latex_(self): if len(self.consts) in [2, 3]: return self._latex() elif len(self.consts) == 4: return self._latex4() else: raise NotImplementedError def _latex_using_dpd_depth1(self, dpd_dct): return self._latex() def _latex(self): lcs = [c._latex_() for c in self.consts] return latex_rankin_cohen(self.inc, self.sym_wt, lcs) def _latex4(self): f1, f2, f3, f4 = self.consts if self.type == "a": lcs = [c._latex_() for c in [f1, f2, f4]] lrc = latex_rankin_cohen(self.inc, self.sym_wt, lcs) return "%s %s" % (f3._latex_(), lrc) elif self.type == "b": lrcp = latex_rankin_cohen(self.inc - 1, self.sym_wt, [c._latex_() for c in [f1, f2]]) lvec = "%s %s" % (f3._latex_(), lrcp) lcs = [f4._latex_(), lvec] return latex_rankin_cohen(1, self.sym_wt, lcs) class ConstVectValuedHeckeOp(ConstVectBase): def __init__(self, const_vec, m=2): self._const_vec = const_vec self._m = m self._sym_wt = const_vec.sym_wt def weight(self): return self._const_vec.weight() @property def sym_wt(self): return self._sym_wt def __repr__(self): return "ConstVectValuedHeckeOp({a}, m={m})".format( a=repr(self._const_vec), m=str(self._m)) @property def _key(self): return ("ConstVectValuedHeckeOp", self._const_vec._key, self._m) def dependencies_depth1(self): return [self._const_vec] def needed_prec_depth1(self, prec): prec = _prec_value(prec) return self._m * prec def calc_form(self, prec): f = self._const_vec.calc_form(self._m * prec) return self.calc_form_from_f(f, prec) def calc_form_from_f(self, f, prec): return f.hecke_operator_acted(self._m, prec) def calc_form_from_dependencies_depth_1(self, prec, depds_dct): f = depds_dct[self._const_vec] return self.calc_form_from_f(f, prec) def _latex_(self): return "\\mathrm{T}(%s) %s" % (self._m, self._const_vec._latex_()) def _latex_using_dpd_depth1(self, dpd_dct): return r"%s \mid \mathrm{T}(%s)" % (dpd_dct[self._const_vec], self._m) class ConstDivision(ConstVectBase): '''Returns a construction for a vector valued modulular form by dividing a scalar valued modular form. This construction correponds to sum(F*a for F, a in zip(consts, coeffs)) / scalar_const. Needed prec is increased by inc. ''' def __init__(self, consts, coeffs, scalar_const, inc): self._consts = consts self._coeffs = coeffs self._inc = inc self._scalar_const = scalar_const @property def sym_wt(self): return self._consts[0].sym_wt @cached_method def weight(self): return self._consts[0].weight() - self._scalar_const.weight() def __repr__(self): return "ConstDivision({consts}, {coeffs}, {scc}, {inc})".format( consts=str(self._consts), coeffs=str(self._coeffs), scc=self._scalar_const, inc=str(self._inc)) def dependencies_depth1(self): return self._consts def needed_prec_depth1(self, prec): prec = _prec_value(prec) return prec + self._inc def calc_form(self, prec): forms = [c.calc_form(prec + self._inc) for c in self._consts] return self.calc_from_forms(forms, prec) @property def _key(self): return ("ConstDivision", tuple([c._key for c in self._consts]), tuple([a for a in self._coeffs]), self._scalar_const._key, self._inc) def calc_from_forms(self, forms, prec): f = self._scalar_const.calc_form(prec + self._inc) g = sum((a * f for a, f in zip(self._coeffs, forms))) return g.divide(f, prec, parallel=True) def calc_form_from_dependencies_depth_1(self, prec, depds_dct): forms = [depds_dct[c] for c in self._consts] return self.calc_from_forms(forms, prec) def _latex_using_dpd_depth1(self, dpd_dct): names = [dpd_dct[c] for c in self._consts] _gcd = QQ(gcd(self._coeffs)) coeffs = [c / _gcd for c in self._coeffs] coeffs_names = [(c, n) for c, n in zip(coeffs, names) if c != 0] tail_terms = ["%s %s %s" % ("+" if c > 0 else "", c, n) for c, n in coeffs_names[1:]] c0, n0 = coeffs_names[0] head_term = str(c0) + " " + str(n0) return r"\frac{{{pol_num}}}{{{pol_dnm}}} \left({terms}\right)".format( pol_dnm=latex(_gcd.denominator() * self._scalar_const._polynomial_expr()), pol_num=latex(_gcd.numerator()), terms=" ".join([head_term] + tail_terms)) class ConstDivision0(ConstDivision): ''' This class is obsolete. Use ConstDivision instead. ''' def __init__(self, consts, coeffs, scalar_const): ConstDivision.__init__(self, consts, coeffs, scalar_const, 0) def __repr__(self): return "ConstDivision0({consts}, {coeffs}, {scc})".format( consts=str(self._consts), coeffs=str(self._coeffs), scc=str(self._scalar_const)) @property def _key(self): return ("ConstDivision0", tuple([c._key for c in self._consts]), tuple([a for a in self._coeffs]), self._scalar_const._key) class ConstMul(ConstVectBase): def __init__(self, const, scalar_const): self._const_vec = const self._scalar_const = scalar_const @property def sym_wt(self): return self._const_vec[0].sym_wt def weight(self): return self._const_vec.weight() + self._scalar_const.weight() def __repr__(self): return "ConstMul({const}, {scc})".format( const=str(self._const_vec), scc=self._scalar_const) @property def _key(self): return ("ConstMul", self._const_vec._key, self._scalar_const._key) def calc_form(self, prec): f = self._const_vec.calc_form(prec) return self.calc_form_from_f(f, prec) def dependencies_depth1(self): return [self._const_vec] def needed_prec_depth1(self, prec): if self._scalar_const._chi5_degree() > 0: raise NotImplementedError return _prec_value(prec) def calc_form_from_f(self, f, prec): g = self._scalar_const.calc_form(self.needed_prec_depth1(prec)) return f * g def calc_form_from_dependencies_depth_1(self, prec, depds_dct): f = depds_dct[self._const_vec] return self.calc_form_from_f(f, prec) def _latex_using_dpd_depth1(self, dpd_dct): return "%s %s" % (self._scalar_const._latex_(), dpd_dct[self._const_vec]) def dependencies(vec_const): '''Returns a set of instances of ConstVectBase needed for the computation of vec_const. ''' dep_dpth1 = vec_const.dependencies_depth1() if not dep_dpth1: # No dependencies. return set([]) else: return reduce(lambda x, y: x.union(y), (dependencies(c) for c in dep_dpth1), set(dep_dpth1)) def needed_precs(vec_const, prec): '''Returns a dict whose set of keys is equal to the union of dependencies(vec_const) and set([vec_const]) and whose values are equal to needed_prec_depth1. ''' prec = _prec_value(prec) dep_dpth1 = vec_const.dependencies_depth1() res = {} nprec = vec_const.needed_prec_depth1(prec) res[vec_const] = nprec dcts = [needed_precs(c, nprec) for c in dep_dpth1] for c in dependencies(vec_const): res[c] = max(d.get(c, prec) for d in dcts) return res class CalculatorVectValued(object): def __init__(self, const_vecs, data_dir): self._const_vecs = const_vecs self._data_dir = data_dir def file_name(self, c): return c._fname(self._data_dir) def _mat_ls(self, consts, prec): prec = PrecisionDeg2(prec) sym_wt = consts[0].sym_wt d = self.forms_dict(prec) ts = [(t, i) for t in prec for i in range(sym_wt + 1)] return [[d[c][t] for t in ts] for c in consts] def rank(self, consts, prec=5): return matrix(self._mat_ls(consts, prec)).rank() def linearly_indep_consts(self, consts, prec=5): ms = self._mat_ls(consts, prec) idcs = find_linearly_indep_indices(ms, matrix(ms).rank()) return [consts[i] for i in idcs] def all_dependencies(self): '''Returns a set of all dependencies needed for the computation. ''' return reduce(lambda x, y: x.union(y), (dependencies(c) for c in self._const_vecs)) @cached_method def all_needed_precs(self, prec): '''Returns a dict whose set of keys is equal to the union of all_dependencies and set(self._const_vecs) and whose values are equal to needed_prec. ''' prec = _prec_value(prec) res = {} dcts = [needed_precs(c, prec) for c in self._const_vecs] kys = self.all_dependencies().union(set(self._const_vecs)) for c in kys: res[c] = max(d.get(c, prec) for d in dcts) return res def rdeps(self, const): '''Returns a subset of the union of all_dependencies and set(self._const_vecs) cosisting elements that depend on const with depth1. ''' return {c for c in self.all_dependencies().union(set(self._const_vecs)) if const in c.dependencies_depth1()} def rdep_prec(self, const, prec): '''We have to compute const with this precision to compute self._consts with precision prec. ''' d = self.all_needed_precs(prec) _rdeps = self.rdeps(const) if _rdeps: return max(d[a] for a in _rdeps) else: return _prec_value(prec) def calc_forms_and_save(self, prec, verbose=False, do_fork=False, force=False): '''Compute self._const_vecs and save the result to self._data_dir. If verbose is True, then it shows a message when each computation is done. If force is True, then it overwrites existing files. If do_fork is True, fork the process in each computation. ''' if not os.path.exists(self._data_dir): raise IOError("%s does not exist." % (self._data_dir,)) def msg(c, prc): return "{t}: Computing {c} with prec {prc}".format( c=repr(c), t=str(time.ctime()), prc=str(prc)) if verbose: print("Start: " + time.ctime()) computed_consts = [] def calc_and_save(c, prc): def call_back(): depds_dct = {dp: dp.load_form(self._data_dir) for dp in c.dependencies_depth1()} f = c.calc_form_from_dependencies_depth_1(prc, depds_dct) return f c._do_and_save(call_back, self._data_dir, force=force) if do_fork: calc_and_save = fork(calc_and_save) for c in self._const_vecs: for b in c.walk(): if b not in computed_consts: prc = self.rdep_prec(b, prec) if verbose: print(msg(b, prc)) if not b._saved_form_has_suff_prec(prc, self._data_dir): calc_and_save(b, self.rdep_prec(b, prec)) computed_consts.append(b) if verbose: print("Finished: " + time.ctime()) def forms_dict(self, prec): return {c: (c.load_form(self._data_dir))._down_prec(prec) for c in self._const_vecs} def unique_names_dict(self): return {c: c._unique_name for c in self._const_vecs} def check_collision(consts): keys = [a._key for a in consts] names = [a._unique_name for a in consts] assert len(keys) == len(names)
24,913
30.10362
79
py
degree2
degree2-master/hecke_module.py
# -*- coding: utf-8 -*- from abc import ABCMeta, abstractmethod, abstractproperty import operator import sage from sage.all import (factor, ZZ, QQ, PolynomialRing, matrix, zero_vector, vector, gcd, valuation) from sage.misc.cachefunc import cached_method from degree2.utils import (_is_triple_of_integers, is_number, uniq, polynomial_func, pmap) from degree2.basic_operation import reduced_form_with_sign, number_of_procs from degree2.modular_form_module import ModularFormModule class HalfIntegralMatrices2(object): ''' An instance of this class corresponds to a tuple (n, r, m). ''' def __eq__(self, other): if isinstance(other, tuple): return self._t == other elif isinstance(other, HalfIntegralMatrices2): return self._t == other._t else: raise NotImplementedError def __repr__(self): return str(self._t) def __init__(self, tpl): (self._n, self._r, self._m) = tpl if not _is_triple_of_integers(tpl): raise TypeError("tpl must be a triple of integers.") self._t = tpl def __hash__(self): return self._t.__hash__() def __add__(self, other): return HalfIntegralMatrices2((self._n + other._n, self._r + other._r, self._m + other._m)) def __neg__(self): return tuple(-x for x in self._t) def __sub__(self, other): return self + other.__neg__() def __getitem__(self, matlist): ''' matlist is a list such as [[a,b], [c,d]] that corresponds to a 2-by-2 matrix. Returns matlist.transpose() * self * matlist. ''' ((a, b), (c, d)) = matlist (n, r, m) = self._t return HalfIntegralMatrices2((a ** 2 * n + a * c * r + c ** 2 * m, 2 * a * b * n + (a * d + b * c) * r + 2 * c * d * m, b ** 2 * n + b * d * r + d ** 2 * m)) def is_divisible_by(self, a): return all([x % a == 0 for x in self._t]) def __rmul__(self, a): return HalfIntegralMatrices2((self._n * a, self._r * a, self._m * a)) def __div__(self, a): return HalfIntegralMatrices2((self._n // a, self._r // a, self._m // a)) class HeckeModuleElement(object): __metaclass__ = ABCMeta @abstractproperty def wt(self): pass @abstractproperty def sym_wt(self): pass @abstractproperty def base_ring(self): pass @abstractmethod def _none_zero_tpl(self): pass @abstractmethod def __getitem__(self, t): pass @abstractmethod def hecke_operator_acted(self, m, prec=None): pass def _hecke_tp(self, p, tpl): ''' Returns tpls-th Fourier coefficient of T(p)(self), where p : prime. cf. Andrianov, Zhuravlev, Modular Forms and Hecke Operators, pp 242. ''' n, r, m = tpl return self[(p * n, p * r, p * m)] + self._hecke_tp_psum(p, tpl) def _hecke_tp_psum(self, p, tpl): return sum([self[t] * v for t, v in self._hecke_tp_psum_alst(p, tpl)]) @cached_method def _hecke_tp_psum_alst(self, p, tpl): n, r, m = tpl k = self.wt res = [] if n % p == 0 and m % p == 0 and r % p == 0: res.append(((n / p, r / p, m / p), p ** (2 * k - 3))) if m % p == 0: res.append(((m / p, -r, p * n), p ** (k - 2))) l = [u for u in range(p) if (n + r * u + m * (u ** 2)) % p == 0] for u in l: res.append( (((n + r * u + m * (u ** 2)) / p, r + 2 * u * m, p * m), p ** (k - 2))) return res def _hecke_tp_needed_tuples(self, p, tpl): n, r, m = tpl return ([t for t, _ in self._hecke_tp_psum_alst(p, tpl)] + [(p * n, p * r, p * m)]) def _hecke_tp2_for_eigenform(self, p, tpl, lambda_p=None): ''' Assuming self is an eigenform, returns tpls-th Fourier coefficient of T(p^2)(self), where p : prime and lambda_p is the eigenvalue for T(p). cf Andrianov, Zhuravlev, Modular Forms and Hecke Operators, pp 242. ''' # Assume we know the Hecke eigenvalue for T(p), and return p**i * t th # Fourier coeff. def fc(i, t): if i == 0: return self[t] else: tp = tuple([p ** (i - 1) * x for x in t]) def idc(n, r, m): e = min(valuation(reduce(gcd, (n, r, m)), p), i - 1) return (e, tuple([x // p ** e for x in (n, r, m)])) alst = [] for u, v in self._hecke_tp_psum_alst(p, tp): b = idc(*u) alst.append((b[0], b[1], v)) psum = sum([v * fc(e, u) for e, u, v in alst]) return lambda_p * fc(i - 1, t) - psum return sum([v * fc(i, t) for i, t, v in self._hecke_tp2_sum_alst(p, tpl)]) def _hecke_tp2(self, p, tpl): ''' Returns tpls-th Fourier coefficient of T(p^2)(self), where p : prime cf Andrianov, Zhuravlev, Modular Forms and Hecke Operators, pp 242. ''' return sum(v * self[(p**i * n, p**i * r, p**i * m)] for i, (n, r, m), v in self._hecke_tp2_sum_alst(p, tpl)) @cached_method def _hecke_tp2_sum_alst(self, p, tpl): ''' Returns alist of elms (i, (n, r, m), v) s.t. sum of v * self[(p**i * n, p**i * r, p**i, m)] is _hecke_tp2(p, tpl). ''' R = HalfIntegralMatrices2(tpl) k = self.wt def psum_alst(i1, i2, i3): if not R.is_divisible_by(p ** i3): return [] a = p ** (i2 * (k - 2) + i3 * (2 * k - 3)) tpls = [] for tD in reprs(i2): if R[tD].is_divisible_by(p ** (i2 + i3)): A = R[tD] / p ** (i2 + i3) tpls.append(A._t) return [(i1, t, a) for t in tpls] def reprs(i2): if i2 == 0: return [[[1, 0], [0, 1]]] else: l1 = [[[1, 0], [u, p ** i2]] for u in range(p ** i2)] l2 = [[[p * u, p ** i2], [-1, 0]] for u in range(p ** (i2 - 1))] return l1 + l2 idcs = [(i1, i2, i3) for i1 in range(3) for i2 in range(3) for i3 in range(3) if i1 + i2 + i3 == 2] return reduce(operator.add, [psum_alst(*i) for i in idcs], []) def _hecke_tp2_needed_tuples(self, p, tpl): def nd_tpls(i, t): if i == 0: return [t] else: n, r, m = tpl return reduce( operator.add, [[x[0] for x in self._hecke_tp_psum_alst(p, (p ** a * n, p ** a * r, p ** a * m))] for a in range(i)], []) res = [] for i, t, _ in self._hecke_tp2_sum_alst(p, tpl): res += nd_tpls(i, t) return [(n, r, m) for n, r, m in res if (n % p, r % p, m % p) != (0, 0, 0)] def _hecke_eigen_needed_tuples(self, m): tpl = self._none_zero_tpl() p, i = factor(m)[0] if not (ZZ(m).is_prime_power() and 0 < i < 3): raise RuntimeError("m must be a prime or the square of a prime.") if i == 1: return uniq(reduced_form_with_sign(t)[0] for t in self._hecke_tp_needed_tuples(p, tpl)) if i == 2: l1 = self._hecke_eigen_needed_tuples(p) l = [reduced_form_with_sign(t)[0] for t in self._hecke_tp2_needed_tuples(p, tpl)] return uniq(l1 + l) def _hecke_op_vector_vld(self, p, i, tpl): ''' Assuming self is a vector valued Siegel modular form, returns tpl th Fourier coefficient of T(p^i)self. Here tpl is an triple of integers or a tuple (t, a) with t: triple of integers and a: intger. cf. Arakawa, vector valued Siegel's modular forms of degree two and the associated Andrianov L-functions, pp 166. ''' if isinstance(tpl[0], tuple): return self._hecke_op_vector_vld(p, i, tpl[0]).vec[tpl[1]] p = ZZ(p) zero = SymTensorRepElt.zero(self.sym_wt, self.wt) if isinstance(tpl, tuple): tpl = HalfIntegralMatrices2(tpl) def term(al, bt, gm, u): if not (al + bt + gm == i and tpl.is_divisible_by(p ** gm) and tpl[u].is_divisible_by(p ** (gm + bt))): return zero else: t = p ** al * (tpl[u] / p ** (bt + gm)) return (u.transpose() ** (-1)) * self[t] res = zero mu = 2 * self.wt + self.sym_wt - 3 for al in range(i + 1): for bt in range(i + 1 - al): for gm in range(i + 1 - al - bt): for u in reprs_of_double_cosets(p, bt): u = matrix(u) res += (p ** (i * mu + bt - mu * al) * term(al, bt, gm, u)) return res def hecke_operator(self, m, tpl): ''' Assumes m is a prime or the square of a prime. And returns the tpl th Fourier coefficient of T(m)self. cf Andrianov, Zhuravlev, Modular Forms and Hecke Operators, pp 242. ''' p, i = factor(m)[0] if not (ZZ(m).is_prime_power() and 0 < i < 3): raise RuntimeError("m must be a prime or the square of a prime.") if self.sym_wt == 0: if i == 1: return self._hecke_tp(p, tpl) elif i == 2: return self._hecke_tp2(p, tpl) else: return self._hecke_op_vector_vld(p, i, tpl) def hecke_eigenvalue(self, m): ''' Assuming self is an eigenform, returns mth Hecke eigenvalue. ''' t = self._none_zero_tpl() K = self.base_ring if hasattr(K, "fraction_field"): K = K.fraction_field() if self.sym_wt == 0 and ZZ(m).is_prime_power() and factor(m)[0][1] == 2: p = factor(m)[0][0] lp = self.hecke_eigenvalue(p) return K(self._hecke_tp2_for_eigenform(p, t, lp)) / self[t] else: return K(self.hecke_operator(m, t) / self[t]) def euler_factor_of_spinor_l(self, p, var="x"): ''' Assuming self is eigenform, this method returns p-Euler factor of spinor L as a polynomial. ''' K = self.base_ring if hasattr(K, "fraction_field"): K = K.fraction_field() R = PolynomialRing(K, 1, names=var, order='neglex') x = R.gens()[0] a1 = self.hecke_eigenvalue(p) a2 = self.hecke_eigenvalue(p ** 2) mu = 2 * self.wt + self.sym_wt - 3 return (1 - a1 * x + (a1 ** 2 - a2 - p ** (mu - 1)) * x ** 2 - a1 * p ** mu * x ** 3 + p ** (2 * mu) * x ** 4) def euler_factor_of_standard_l(self, p, var="x"): ''' Assuming self is eigenform, this method returns p-Euler factor of standard L as a polynomial. ''' K = self.base_ring if hasattr(K, "fraction_field"): K = K.fraction_field() mu = 2 * self.wt + self.sym_wt - 3 b = p ** mu laml = self.hecke_eigenvalue(p) laml2 = self.hecke_eigenvalue(p ** 2) a1 = laml ** 2 / QQ(b) a2 = laml2 / QQ(b) + QQ(1) / QQ(p) R = PolynomialRing(K, 1, names=var, order='neglex') x = R.gens()[0] return 1 + (a2 - a1 + 1) * x + a2 * x ** 2 - a2 * x ** 3 \ + (-a2 + a1 - 1) * x ** 4 - x ** 5 class HeckeModule(ModularFormModule): __metaclass__ = ABCMeta @cached_method def hecke_matrix(self, a): return self.matrix_representaion(lambda f, t: f.hecke_operator(a, t)) def hecke_charpoly(self, m, var='x', algorithm='linbox'): return self.hecke_matrix(m).charpoly(var, algorithm) def eigenform_with_eigenvalue_t2(self, lm): ''' Assuming the characteristic polynomial of T(2) has no double eigenvalues, this method returns an eigenform whose eigenvalue is eigenvalue. ''' res = self.eigenvector_with_eigenvalue( lambda f, t: f.hecke_operator(2, t), lm) if self.is_eigen_form(res): return res else: raise RuntimeError("This should not happen.") def is_eigen_form(self, f, tupls=False): if tupls is False: tupls = self.linearly_indep_tuples() lm = f.hecke_eigenvalue(2) return all([f.hecke_operator(2, t) == lm * f[t] for t in tupls]) def basis_of_subsp_annihilated_by(self, pol, a=2, parallel=False): ''' Returns the basis of the subspace annihilated by pol(T(a)). ''' S = PolynomialRing(QQ, names="x") pol = S(pol) A = self.hecke_matrix(a) B = polynomial_func(pol)(A.transpose()) if parallel: with number_of_procs(1): res = pmap(self._to_form, B.kernel().basis()) else: res = [self._to_form(v) for v in B.kernel().basis()] return res def basis(self): pass def linearly_indep_tuples(self): pass def reprs_of_double_cosets(p, i): ''' p: prime. Returns representatives of GL2(Z)diag(1, p^i)GL2(Z)/GL2(Z). ''' if i == 0: return [[[1, 0], [0, 1]]] else: l1 = [[[1, 0], [u, p ** i]] for u in range(p ** i)] l2 = [[[p * u, p ** i], [-1, 0]] for u in range(p ** (i - 1))] return l1 + l2 symmetric_tensor_pol_ring = PolynomialRing(QQ, names="u1, u2") class SymTensorRepElt(object): r''' An element of Sym(j)\otimes det^{wt}. ''' def __init__(self, vec, wt): ''' vec is a returned valued of degree2.SymWtModFmElt.__getitem__. ''' self._vec = vec self._sym_wt = len(vec) - 1 self._wt = wt @property def vec(self): return self._vec @property def sym_wt(self): return self._sym_wt @property def wt(self): return self._wt @classmethod def zero(cls, j, wt): return cls(zero_vector(j + 1), wt) def __eq__(self, other): if is_number(other) and other == 0: return self.vec == 0 if isinstance(other, SymTensorRepElt): return self.wt == other.wt and self.vec == other.vec else: raise NotImplementedError def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return self.vec.__repr__() def _to_pol(self): u1, u2 = symmetric_tensor_pol_ring.gens() m = self.sym_wt return sum([a * u1 ** (m - i) * u2 ** i for a, i in zip(self.vec, range(m + 1))]) def group_action(self, mt): ''' mt is an element of GL2. Returns a vector corresponding to mt . self, where . means the group action. ''' (a, b), (c, d) = mt vec_pl = self._to_pol() u1, u2 = vec_pl.parent().gens() vec_pl = vec_pl.subs({u1: u1 * a + u2 * c, u2: u1 * b + u2 * d}) dt = (a * d - b * c) ** self.wt vec = vector([dt * vec_pl[(self.sym_wt - i, i)] for i in range(self.sym_wt + 1)]) return SymTensorRepElt(vec, self.wt) def __add__(self, other): if other == 0: return self elif (isinstance(other, SymTensorRepElt) and self.wt == other.wt): return SymTensorRepElt(self.vec + other.vec, self.wt) else: raise NotImplementedError def __radd__(self, other): return self.__add__(other) def __mul__(self, other): if is_number(other): return SymTensorRepElt(self.vec * other, self.wt) else: raise NotImplementedError def __rmul__(self, other): if is_number(other): return self.__mul__(other) elif isinstance(other, list) or (hasattr(other, "parent") and isinstance( other.parent(), sage.matrix.matrix_space.MatrixSpace)): return self.group_action(other) else: raise NotImplementedError def __neg__(self): return self.__mul__(-1) def __sub__(self, other): return self.__add__(other.__neg__()) def __div__(self, other): if isinstance(other, SymTensorRepElt): return self.vec / other.vec else: raise NotImplementedError def __getitem__(self, i): return self.vec[i]
17,368
31.648496
87
py
degree2
degree2-master/modular_form_module.py
# -*- coding: utf-8; mode: sage -*- from abc import ABCMeta, abstractmethod from sage.all import matrix, QQ, PolynomialRing, identity_matrix, vector class ModularFormModule(object): __metaclass__ = ABCMeta @abstractmethod def basis(self): ''' Should return a list [b_i for i = 0, ..., n-1] b_i should be an instance of HeckeModuleElement ''' pass @abstractmethod def linearly_indep_tuples(self): ''' Should return a list [t_i for i = 0, ..., n-1] so that matrix(b_i[t_j]) must be regular, where self.basis() = [b_i for i = 0, ..., n-1]. t_i should be a triple of integers in the scalar valued case. In the vector valued case, t_i should be a tuple (t, i) where t is a triple of integers and i is an integer (see the definition of __getitem__ of vector valued Siegel modular forms). ''' pass def matrix_representaion(self, lin_op): '''Let lin_op(f, t) be an endomorphsim of self, where f is a modular form and t is a object corresponding to a matrix. This medthod returns the matrix representation of lin_op. ''' basis = self.basis() lin_indep_tuples = self.linearly_indep_tuples() m1 = matrix([[f[t] for t in lin_indep_tuples] for f in basis]) m2 = matrix([[lin_op(f, t) for t in lin_indep_tuples] for f in basis]) return (m2 * m1 ** (-1)).transpose() def eigenvector_with_eigenvalue(self, lin_op, lm): '''Let lin_op(f, t) be an endomorphsim of self and assume it has a unique eigenvector (up to constant) with eigenvalue lm. This medhod returns an eigenvector. ''' basis = self.basis() dim = len(basis) if hasattr(lm, "parent"): K = lm.parent() if hasattr(K, "fraction_field"): K = K.fraction_field() else: K = QQ A = self.matrix_representaion(lin_op) S = PolynomialRing(K, names="x") x = S.gens()[0] f = S(A.charpoly()) g = S(f // (x - lm)) cffs_g = [g[y] for y in range(dim)] A_pws = [] C = identity_matrix(dim) for i in range(dim): A_pws.append(C) C = A * C for i in range(dim): clm_i = [a.columns()[i] for a in A_pws] w = sum((a * v for a, v in zip(cffs_g, clm_i))) if w != 0: egvec = w break res = sum([a * b for a, b in zip(egvec, basis)]) # TODO: Use a construction class to construct basis. if all(hasattr(b, "_construction") and b._construction is not None for b in basis): res._construction = sum([a * b._construction for a, b in zip(egvec, basis)]) if hasattr(res, 'set_parent_space'): res.set_parent_space(self) return res def _to_vector(self, fm, tpls=None): ''' Returns a vector corresponding to fm. By this method, self.basis() becomes the standard basis. ''' if tpls is None: tpls = self.linearly_indep_tuples() m1 = matrix([[f[t] for t in tpls] for f in self.basis()]) v = vector([fm[t] for t in tpls]) return v * m1 ** (-1) def _to_form(self, v): ''' The inverse to _to_vector. ''' basis = self.basis() return sum((f * a for a, f in zip(v, basis))) def contains(self, f): '''If self._to_form(self._to_vector(f)) is equal to f with this precision, then return True otherwise False. f may not be contained in self even if this method returns True. ''' return self._to_form(self._to_vector(f)) == f
3,849
33.684685
72
py
degree2
degree2-master/all.py
# -*- coding: utf-8 -*- from degree2.scalar_valued_smfs import ( x10_with_prec, x12_with_prec, x35_with_prec, eisenstein_series_degree2, KlingenEisensteinAndCuspForms, y12_with_prec, CuspFormsDegree2, degree2_modular_forms_ring_level1_gens) from degree2.scalar_valued_smfs import SpaceOfModForms as ModularFormsDegree2 from degree2.elements import (ModFormQexpLevel1, QexpLevel1, SymWtGenElt, SymWtModFmElt) from degree2.rankin_cohen_diff import ( diff_opetator_4, rankin_cohen_pair_sym, rankin_cohen_pair_det2_sym, rankin_cohen_triple_det_sym2, rankin_cohen_triple_det_sym4) from degree2.basic_operation import number_of_procs as degree2_number_of_procs
759
32.043478
78
py
degree2
degree2-master/tsushima_dimension_formula.py
# -*- coding: utf-8 -*- from sage.all import (NumberField, var, QQ, PolynomialRing, cached_function, dimension_cusp_forms, O, PowerSeriesRing) global_ring = PolynomialRing(QQ, names="t,s") def derivative_exp(f, n, t): if n == 0: return f else: return derivative_exp(t * f.derivative(t), n - 1, t) def derivative_pol(f, pl): pl = pl * one t, s = pl.parent().gens() def mul(a, g): nm = g.numerator() dm = g.denominator() return a * nm / dm return sum([mul(v, derivative_exp(derivative_exp(f, a, t), b, s)) for (a, b), v in pl.dict().iteritems()]) def trace(f, key): al = root_of_unities()[key] K = al.parent() res = 0 nm = f.numerator() dm = f.denominator() for be in al.galois_conjugates(K): phi = K.hom(be, K) res += nm.map_coefficients(phi) / dm.map_coefficients(phi) return global_ring.fraction_field()(res) @cached_function def root_of_unities(): x = var("x") dct = {"i": x ** 2 + 1, "rho": x ** 2 + x + 1, "omega": x ** 4 + x ** 3 + x ** 2 + x + 1, "sigma": x ** 4 - x ** 2 + 1} dctnm = {k: NumberField(v, names=k) for k, v in dct.iteritems()} return {k: v.gens()[0] for k, v in dctnm.iteritems()} two = QQ(2) thr = QQ(3) fiv = QQ(5) one = global_ring(1) base_rat_func = 1 / ((1 - global_ring.gens()[0]) * (1 - global_ring.gens()[1])) def deriv_trace(pl, dct, key): return trace(derivative_pol(base_rat_func.subs(dct), pl), key) def first_three_lines(): t, s = global_ring.gens() k, j = t, s pl1 = (two ** (-7) * thr ** (-3) * fiv ** (-1) * (2 * j + 1) * (k - 2) * (2 * j + k - 1) * (2 * j + 2 * k - 3)) pl2 = - two ** (-5) * thr ** (-2) * (2 * j + 1) * (2 * j + 2 * k - 3) pl3 = two ** (-4) * thr ** (-1) * (2 * j + 1) pl = pl1 + pl2 + pl3 f1 = (two ** (-7) * thr ** (-2) * 7 * (k - 2) * (2 * j + k - 1) - two ** (-4) * thr ** (-1) * (2 * j + 2 * k - 3) + two ** (-5) * 3) f2 = two ** (-7) * thr ** (-1) * fiv * (2 * j + 2 * k - 3) - two ** (-3) f3 = two ** (-7) * (2 * j + 1) res = 0 res += derivative_pol(base_rat_func, pl) res += derivative_pol(base_rat_func.subs({t: -t}), f1) res += derivative_pol(base_rat_func.subs({s: -s}), f2) res += derivative_pol(base_rat_func.subs({t: -t, s: -s}), f3) return res def rem_line_1_2(): t, s = global_ring.gens() k, j = t, s i = root_of_unities()["i"] pl1 = two ** (-6) * thr ** (-1) * i * (2 * j + k - 1) - two ** (-4) * i pl2 = two ** (-5) * (i + 1) pl3 = two ** (-6) * thr ** (-1) * (k - 2) - two ** (-4) pl4 = two ** (-5) * (i + 1) res = 0 res += deriv_trace(pl1, {t: i * t}, "i") res += deriv_trace(pl2, {t: -t, s: i * s}, "i") res += deriv_trace(pl3, {t: i * t, s: -s}, "i") res += deriv_trace(pl4, {t: -i * t, s: i * s}, "i") return res def rem_line_3_4(): t, s = global_ring.gens() j = s r = root_of_unities()["rho"] pl1 = thr ** (-3) * (r + 1) pl2 = two ** (-2) * thr ** (-4) * (2 * r + 1) * (2 * j + 1) pl3 = - two ** (-2) * thr ** (-2) * (2 * r + 1) pl4 = thr ** (-3) res = 0 key = "rho" res += deriv_trace(pl1, {t: -t, s: r * s}, key) res += deriv_trace(pl2, {t: r * t, s: r * s}, key) res += deriv_trace(pl3, {t: r * t, s: -r * s}, key) res += deriv_trace(pl4, {t: -r * t, s: r * s}, key) return res def rem_line_5_9(): t, s = global_ring.gens() k, j = t, s r = root_of_unities()["rho"] pl1 = (two ** (-1) * thr ** (-4) * (1 - r) * (2 * j + 2 * k - 3) - two ** (-1) * thr ** (-2) * (1 - r)) pl2 = (two ** (-3) * thr ** (-4) * (r + 2) * (2 * j + k - 1) - two ** (-2) * thr ** (-3) * (5 * r + 6)) pl3 = - (two ** (-3) * thr ** (-3) * (r + 2) * (2 * j + k - 1) - two ** (-2) * thr ** (-2) * (r + 2)) pl4 = (two ** (-3) * thr ** (-4) * (1 - r) * (k - 2) + two ** (-2) * thr ** (-3) * (r - 5)) pl5 = (two ** (-3) * thr ** (-3) * (1 - r) * (k - 2) - two ** (-2) * thr ** (-2) * (1 - r)) res = 0 key = "rho" res += deriv_trace(pl1, {t: t, s: r * s}, key) res += deriv_trace(pl2, {t: r * t, s: s}, key) res += deriv_trace(pl3, {t: -r * t, s: s}, key) res += deriv_trace(pl4, {t: r * t, s: r ** 2 * s}, key) res += deriv_trace(pl5, {t: -r * t, s: r ** 2 * s}, key) return res def rem_line_10_11(): t, s = global_ring.gens() om = root_of_unities()["omega"] sgm = root_of_unities()["sigma"] pl1 = fiv ** (-2) pl2 = - fiv ** (-2) * om ** 2 pl3 = two ** (-3) * thr ** (-2) * (sgm ** 2 + 1) pl4 = - two ** (-3) * thr ** (-2) * (sgm + sgm ** 3) res = 0 res += deriv_trace(pl1, {t: om * t, s: om ** 4 * s}, "omega") res += deriv_trace(pl2, {t: om * t, s: om ** 3 * s}, "omega") res += deriv_trace(pl3, {t: sgm ** 7 * t, s: - s}, "sigma") res += deriv_trace(pl4, {t: sgm ** 7 * t, s: sgm ** 8 * s}, "sigma") return res @cached_function def gen_func_maybe_cusp(): return (first_three_lines() + rem_line_1_2() + rem_line_3_4() + rem_line_5_9() + rem_line_10_11()) def gen_func_maybe_cusp_num_t(parity=None): t, s = global_ring.gens() dnm1 = (1 - t ** 4) * (1 - t ** 6) * (1 - t ** 10) * (1 - t ** 12) dnm2 = (1 - s ** 3) * (1 - s ** 4) * (1 - s ** 5) * (1 - s ** 6) nm = global_ring(gen_func_maybe_cusp() * dnm1 * dnm2) if parity is None: return nm / dnm2 else: e = parity % 2 nm = sum([t ** a * s ** b * v for (a, b), v in nm.dict().iteritems() if a % 2 == e]) return nm / dnm2 def gen_func_maybe_cusp_num_t_power_srs(parity=None, prec=10): R = PolynomialRing(QQ, names="t") S = PowerSeriesRing(R, names="s", default_prec=prec) s = S.gen() num = gen_func_maybe_cusp_num_t(parity=parity) return S(num) + O(s ** prec) def gen_func_maybe_except_cusp(j): ''' j: even nonnegative integer If j = 0, it returns the Hilbert series (as a rational function) of the space of Siegel-Eisenstein series and Klingen-Eisenstein series. If j > 0, it returns a Hilbert series which is equal to sum_{k > 0} dim N_{k, j} t^k up to a polynomial with degree < 5. Here N_{k, j} is the space of Klingen-Eisenstein series'. ''' R = PolynomialRing(QQ, names="t") t = R.gen() h1 = t ** 12 / ((1 - t ** 4) * (1 - t ** 6)) if j > 0: f = sum([t ** k * dimension_cusp_forms(1, k) for k in range(0, 5 + j)]) return (h1 - f) * t ** (-j) elif j == 0: return h1 + R(1) / (1 - t ** 2) - t ** 2 def gen_func_maybe_except_cusp_num(j): R = PolynomialRing(QQ, names="t") t = R.gen() dnm = (1 - t ** 4) * (1 - t ** 6) * (1 - t ** 10) * (1 - t ** 12) return R(gen_func_maybe_except_cusp(j) * dnm) def gen_func_maybe_cusp_num(j, parity=None): f = gen_func_maybe_cusp_num_t_power_srs(parity=parity, prec=j // 2 + 1) nm = f[j // 2] return t_delete_terms_of_small_degrees(nm) def t_dnm(): R = PolynomialRing(QQ, names="t") t = R.gen() dnm = (1 - t ** 4) * (1 - t ** 6) * (1 - t ** 10) * (1 - t ** 12) return dnm def t_delete_terms_of_small_degrees(f): ''' f is a polynomial of t. Returns a polynomial g which is congruent to f modulo t_dnm so that g/t_dnm does not have terms with degree < 4. ''' R = PowerSeriesRing(QQ, names="t") S = PolynomialRing(QQ, names="t") t = R.gen() dnm = R(t_dnm()) g = R(f / dnm) + O(t ** 4) a = S(sum([t ** i * g[i] for i in range(4)])) return f - t_dnm() * a def hilbert_series_num_maybe(j, parity=None): ''' Returns a numerator of a hilbert series which is equal to sum_{k > 0} M_{k, j}(Gamma_{2}) t^k modulo a polynomial of degree < 5. ''' if parity == 1: a = 0 else: a = gen_func_maybe_except_cusp_num(j) nm = a + gen_func_maybe_cusp_num(j, parity=parity) return nm # The result when j = 10 is correct even if parity is 1. def hilbert_series_maybe(j, parity=None, prec=30): ''' Returns a hilbert series which is equal to sum_{k > 0} M_{k, j}(Gamma_{2}) t^k modulo a polynomial of degree < 5. ''' R = PowerSeriesRing(QQ, names="t", default_prec=prec) t = R.gen() dnm = R(t_dnm()) nm = hilbert_series_num_maybe(j, parity=parity) return (nm + O(t ** prec)) / dnm # t, s = global_ring.gens() # gen_func_maybe_cusp_num_t(0).subs({t:1}) # gen_func_maybe_cusp_num_t(1).subs({t:1}) # from sage.all import PowerSeriesRing # S = PowerSeriesRing(QQ, names="t", default_prec=100) # t = S.gens()[0] # R = PowerSeriesRing(S, names="s")
8,747
30.467626
79
py
degree2
degree2-master/utils.py
# -*- coding: utf-8 -*- from __future__ import print_function import traceback from multiprocessing import Process, Pipe, cpu_count import operator from itertools import groupby from abc import ABCMeta, abstractmethod import sage from sage.misc.cachefunc import cached_function from sage.all import CC, RR, factorial, Integer, vector, ceil def _partition(num_ls, n): ''' num_ls is a list of non-negative real numbers. Returns a list of indices. ''' m = len(num_ls) wts = [sum(num_ls[:i + 1]) for i in range(m)] av_wt = RR(wts[-1]) / RR(n) def fn(i): return max(ceil(RR(wts[i]) / RR(av_wt)), 1) return [list(v) for _, v in groupby(range(m), fn)] def partition_weighted(l, n, weight_fn=None): ''' weight_fn is a function defined on an element of l. Divides l into n lists so that the sum of weight_fn of each list is almost same. ''' if n == 1: return [l] idx_list = _partition([weight_fn(x) for x in l] if weight_fn is not None else [1 for _ in l], n) return [[l[i] for i in idl] for idl in idx_list] def pmap(fn, l, weight_fn=None, num_of_procs=None): ''' Parallel map. The meaning of weight_fn is same as the meaning of the argument of partition_weighted. ''' if num_of_procs == 1: return [fn(a) for a in l] if num_of_procs is not None: num = min(len(l), num_of_procs) else: num = cpu_count() ls = partition_weighted(l, num, weight_fn=weight_fn) pipes = [Pipe() for _ in ls] procs = [Process(target=_spawn(lambda x: [fn(a) for a in x]), args=(c, x)) for x, (_, c) in zip(ls, pipes)] for p in procs: p.start() try: vals = [parent.recv() for parent, _ in pipes] except KeyboardInterrupt: # Kill processes. for p in procs: p.terminate() p.join() raise finally: for p in procs: p.join() try: return reduce(operator.add, vals, []) except TypeError: for e in vals: if isinstance(e, BaseException): print(e._traceback) raise e def _spawn(f): def fun(pipe, x): try: pipe.send(f(x)) except BaseException as e: e._traceback = traceback.format_exc() pipe.send(e) finally: pipe.close() return fun def group(ls, n): ''' Partition of ls into n lists. ''' m = len(ls) // n if len(ls) % n == 0: return [ls[i * n:i * n + n] for i in range(m)] return [ls[i * n:i * n + n] for i in range(m)] + [ls[n * m:]] def mul(ls): return reduce(operator.mul, ls, 1) def list_group_by(ls, key_func): data = sorted(ls, key=key_func) return [(k, list(v)) for k, v in groupby(data, key_func)] def uniq(ls): return list(set(ls)) def combination(n, m): return factorial(n) / (factorial(m) * factorial(n - m)) @cached_function def naive_det_func(n): ''' Returns a function that computes the determinant of n by n matrix. ''' def removed_list_at(i, l): return [l[j] for j in range(len(l)) if i != j] def _det(ls): if n == 1: return ls[0][0] else: _det_func = naive_det_func(n - 1) ls1 = [l[:-1] for l in ls] return (-1) ** (n + 1) * \ sum([(-1) ** i * _det_func(removed_list_at(i, ls1)) * ls[i][-1] for i in range(n)]) return _det def naive_det(m): n = len(list(m)) if n == 1: return m[0][0] else: res = 0 for i, a in zip(range(n), m[0]): res += (-1) ** i * a * naive_det( [[b for j, b in zip(range(n), l) if i != j] for l in m[1:]]) return res def det(m): m = [list(a) for a in m] n = len(m) if n <= 2: return naive_det(m) i = j = None for a in range(n): for b in range(n): if m[a][b].is_unit(): i, j = a, b break if i is None: return naive_det(m) def exchange(l, a, b): if a == b: return l d = {a: l[b], b: l[a]} return [x if i != a and i != b else d[i] for x, i in zip(l, range(len(l)))] sgn = 1 m = exchange(m, 0, i) m = [exchange(l, 0, j) for l in m] if i != 0: sgn *= -1 if j != 0: sgn *= -1 inv = (m[0][0]) ** (-1) l0 = [a * inv for a in m[0][1:]] m1 = [] for v in m[1:]: m1.append([a - b * v[0] for a, b in zip(v[1:], l0)]) return sgn * det(m1) * m[0][0] def linearly_indep_rows_index_list(A, r): ''' Assume rank A = r and the number of columns is r. This function returns the list of indices lst such that [A.rows()[i] for i in lst] has length r and linearly independent. ''' return find_linearly_indep_indices(A, r) def find_linearly_indep_indices(vectors, r): ''' Let vectors be a list of vectors or a list of list. Assume r be the rank of vectors. This function returns a list of indices I of length r such that the rank of [vectors[i] for i in I] is equal to r. ''' acc = [] if isinstance(vectors[0], list): vectors = [vector(a) for a in vectors] while True: if r == 0: return acc nrws = len(vectors) first, first_r_idx = next((a, i) for i, a in enumerate(vectors) if a != 0) nonzero_col_index, a = next((j, a) for j, a in enumerate(first) if a != 0) v = a ** (-1) * first vectors1 = [] for j in range(first_r_idx + 1, nrws): w = vectors[j] vectors1.append(w - w[nonzero_col_index] * v) vectors = vectors1 r -= 1 if acc == []: acc.append(first_r_idx) else: acc.append(first_r_idx + acc[-1] + 1) def polynomial_func(pl): l = pl.coefficients() m = len(l) return lambda y: sum([y ** i * l[i] for i in range(m)]) def is_number(a): if isinstance(a, (int, float, long, complex, sage.rings.all.CommutativeRingElement)): return True elif hasattr(a, 'parent'): numgen = sage.rings.number_field.number_field.NumberField_generic parent = a.parent() return CC.has_coerce_map_from(parent) or \ isinstance(parent, numgen) or \ (hasattr(parent, "is_field") and hasattr(parent, "is_finite") and parent.is_field() and parent.is_finite()) else: return False def is_integer(a): return isinstance(a, (int, Integer)) def _is_triple_of_integers(tpl): return isinstance(tpl, tuple) and len(tpl) == 3 and \ all([is_integer(a) for a in list(tpl)]) class CommRingLikeElment(object): __metaclass__ = ABCMeta @abstractmethod def __mul__(self, other): raise NotImplementedError @abstractmethod def __add__(self, other): raise NotImplementedError @abstractmethod def __eq__(self, other): raise NotImplementedError def __rmul__(self, other): return self.__mul__(other) def __radd__(self, other): return self.__add__(other) def __sub__(self, other): return self.__add__(other.__neg__()) def __rsub__(self, other): return self.__neg__().__add__(other) def __neg__(self): return self.__mul__(-1) def __ne__(self, other): return not self.__eq__(other)
7,573
24.761905
82
py
degree2
degree2-master/__init__.py
# -*- coding: utf-8 -*-
24
11.5
23
py
degree2
degree2-master/elements.py
# -*- coding: utf-8 -*- from abc import ABCMeta, abstractmethod import operator from itertools import imap import sage from sage.all import (QQ, save, load, gcd, PolynomialRing, divisors, mod, vector) from degree2.utils import (is_number, list_group_by, CommRingLikeElment, pmap) from degree2.basic_operation import (_mul_fourier, _add_fourier, _mul_fourier_by_num, PrecisionDeg2, reduced_form_with_sign, _spos_def_mats_lt, common_prec, _common_base_ring, common_base_ring) from degree2.hecke_module import (HeckeModuleElement, SymTensorRepElt) def to_sorted_fc_list(fc_dct): dct = {k: v for k, v in fc_dct.iteritems() if v != 0} keys = dct.keys() keys_sorted = sorted(keys, key=lambda x: (max(x[0], x[2]), x[0], x[2], abs(x[1]), x[1])) return [(k, dct[k]) for k in keys_sorted] class FormalQexp(CommRingLikeElment): ''' A parent class of QexpLevel1 and QseriesTimesQminushalf. ''' __metaclass__ = ABCMeta def __init__(self, fc_dct, prec, base_ring=QQ, is_cuspidal=False): ''' fc_dct is a dictionary whose set of keys is PrecisionDeg2(prec). ''' self._is_cuspidal = is_cuspidal mp1 = fc_dct.copy() prec = PrecisionDeg2(prec) diff = set(prec) - set(mp1.keys()) mp1.update({t: base_ring(0) for t in diff}) self.__mp = mp1 self.__prec = prec self.__base_ring = base_ring # Unless self._is_gen, it is a generator's name. e.g "es4", "x12". self._is_gen = False self._sym_wt = 0 self._parent_space = None def __add__(self, other): raise NotImplementedError def __mul__(self, other): raise NotImplementedError def __eq__(self, other): if other == 0: return all([x == 0 for x in self.fc_dct.itervalues()]) else: return self - other == 0 @property def base_ring(self): return self.__base_ring @property def fc_dct(self): return self.__mp @property def prec(self): return self.__prec @property def sym_wt(self): return 0 @property def parent_space(self): '''Similar to self.parent() in Sage. ''' return self._parent_space def set_parent_space(self, space): self._parent_space = space def __str__(self): return self.fc_dct.__str__() def _name(self): return 'q-expansion' def __repr__(self): return self._name() + self._repr_base() def _repr_base(self): l = [str(k) + ': ' + str(v) for k, v in self.sorted_list()] return ' with prec = ' + str(self.prec) \ + ': \n' + '{' + ",\n ".join(l) + '}' def fourier_coefficient(self, n, r, m): return self.fc_dct[(n, r, m)] def __getitem__(self, idx): return self.fc_dct[idx] def iteritems(self): return self.fc_dct.iteritems() def sorted_list(self): return to_sorted_fc_list(self.fc_dct) @abstractmethod def _differential_operator_monomial(self, a, b, c): pass def differentiate_wrt_tau(self): ''' Let [[tau, z],[z, w]] be the parameter of the Siegel upper half space of degree 2. Returns the derivative with respect to tau. ''' return self._differential_operator_monomial(1, 0, 0) def differentiate_wrt_w(self): ''' Let [[tau, z],[z, w]] be the parameter of the Siegel upper half space of degree 2. Returns the derivative with respect to w. ''' return self._differential_operator_monomial(0, 0, 1) def differentiate_wrt_z(self): ''' Let [[tau, z],[z, w]] be the parameter of the Siegel upper half space of degree 2. Returns the derivative with respect to z. ''' return self._differential_operator_monomial(0, 1, 0) cache_gens_power = False class QexpLevel1(FormalQexp): ''' A class of formal Fourier series of degree 2. ''' def __init__(self, fc_dct, prec, base_ring=QQ, is_cuspidal=False): ''' fc_dct is a dictionary whose set of keys is PrecisionDeg2(prec). ''' FormalQexp.__init__(self, fc_dct, prec, base_ring=base_ring, is_cuspidal=is_cuspidal) def __eq__(self, other): if other == 0: return all([x == 0 for x in self.fc_dct.itervalues()]) else: return self - other == 0 def _to_format_dct(self): data_dict = {"prec": self.prec._to_format_dct(), "base_ring": self.base_ring, "fc_dct": self.fc_dct, "is_cuspidal": self._is_cuspidal} return data_dict def save_as_binary(self, filename): data_dict = self._to_format_dct() save(data_dict, filename) @classmethod def _from_dict_to_object(cls, data_dict): if "mp" in data_dict.keys(): kys = ["mp", "prec", "base_ring", "is_cuspidal"] else: kys = ["fc_dct", "prec", "base_ring", "is_cuspidal"] fc_dct, prec, base_ring, is_cuspidal = [data_dict[ky] for ky in kys] prec = PrecisionDeg2._from_dict_to_object(prec) return cls(fc_dct, prec, base_ring=base_ring, is_cuspidal=is_cuspidal) @classmethod def load_from(cls, filename): data_dict = load(filename) return cls._from_dict_to_object(data_dict) def __add__(self, other): if is_number(other): fcmap = self.fc_dct.copy() fcmap[(0, 0, 0)] = self.fc_dct[(0, 0, 0)] + other cuspidal = other == 0 and self._is_cuspidal return QexpLevel1(fcmap, self.prec, self.base_ring, is_cuspidal=cuspidal) prec = common_prec([self, other]) bsring = _common_base_ring(self.base_ring, other.base_ring) cuspidal = self._is_cuspidal and other._is_cuspidal ms = self.fc_dct mo = other.fc_dct fcmap = _add_fourier(ms, mo, prec, cuspidal) return QexpLevel1(fcmap, prec, base_ring=bsring, is_cuspidal=cuspidal) def __mul__(self, other): if is_number(other): if other == 1: return self fcmap = _mul_fourier_by_num(self.fc_dct, other, self.prec, self._is_cuspidal) if hasattr(other, "parent"): bs = _common_base_ring(self.base_ring, other.parent()) else: bs = self.base_ring return QexpLevel1(fcmap, self.prec, base_ring=bs, is_cuspidal=self._is_cuspidal) elif isinstance(other, QexpLevel1): prec = common_prec([self, other]) bsring = _common_base_ring(self.base_ring, other.base_ring) ms = self.fc_dct mo = other.fc_dct cuspidal = self._is_cuspidal or other._is_cuspidal fcmap = _mul_fourier(ms, mo, prec, cuspidal) res = QexpLevel1(fcmap, prec, base_ring=bsring, is_cuspidal=cuspidal) return res elif isinstance(other, (SymWtGenElt, QseriesTimesQminushalf)): return other.__mul__(self) else: raise NotImplementedError # dictionary s.t. ("gen_name", prec) => {0: f, 1: f^2, 2: f^4, 3: f^8, ...} gens_powers_cached_dict = {} def _calc_pows_lt_nth_pow_of_2(self, n, cached_dict=None): ''' If cached_dict is not None, cached_dict is a dictionary s.t. 0 => self, 1 => self^2, ... m => self^(2^m), where m <= n - 1. This method returns a dictionary 0 => self, 1 => self^2, ... n-1 => self^(2^(n-1)). ''' if cached_dict is not None and cached_dict != {}: m = len(cached_dict) - 1 f = cached_dict[m] else: m = 0 f = self cached_dict = {0: f} for i in range(m + 1, n): f = f * f cached_dict[i] = f return cached_dict def __pow__(self, other): if other == 0: return 1 elif other == 1: return self elif other == -1: return self._inverse() s = format(other, 'b') revs = s[::-1] n = len(s) if cache_gens_power and self._is_gen: gens_pws_dcts = QexpLevel1.gens_powers_cached_dict prec = self.prec key = (self._is_gen, prec) if key in gens_pws_dcts: cached_dict = gens_pws_dcts[key] else: cached_dict = {0: self} if not n - 1 in cached_dict.keys(): cached_dict = self._calc_pows_lt_nth_pow_of_2(n, cached_dict) QexpLevel1.gens_powers_cached_dict[key] = cached_dict else: cached_dict = self._calc_pows_lt_nth_pow_of_2(n) res = 1 for i in range(n): if int(revs[i]) != 0: res *= cached_dict[i] return res def theta_operator4(self): dic = dict() for k, v in self.fc_dct.iteritems(): (n, r, m) = k dic[k] = (4 * n * m - r ** 2) * v return QexpLevel1(dic, self.prec, self.base_ring) def phi_operator(self): d = {n: self[(n, 0, 0)] for n in self.prec._phi_operator_prec()} return {n: v for n, v in d.iteritems() if v != 0} def gcd_of_coefficients(self): K = self.base_ring l = [K(v) for v in self.fc_dct.values()] numgen = sage.rings.number_field.number_field.NumberField_generic if isinstance(K, numgen): l = [K(v) for v in self.fc_dct.values()] R = K.ring_of_integers() return R.fractional_ideal(l) else: return reduce(gcd, l) def gcd_of_norms(self, bd=False): ''' Returns the g.c.d of absolute norms of Fourier coefficients. ''' def norm(x): if x in QQ: return x else: return x.norm() if bd is False: bd = self.prec return gcd([QQ(norm(self.fc_dct[t])) for t in PrecisionDeg2(bd)]) def gcd_of_norms_ratio_theta4(self, bd=False): return self.theta_operator4().gcd_of_norms(bd) / self.gcd_of_norms(bd) def ratio_theta4(self): I = self.gcd_of_coefficients() J = self.theta_operator4().gcd_of_coefficients() return J * I ** (-1) def _differential_operator_monomial(self, a, b, c): ''' del_tau^a del_z^b del_w^c ''' fcmap = {(n, r, m): n ** a * r ** b * m ** c * v for (n, r, m), v in self.fc_dct.iteritems()} res = QexpLevel1(fcmap, self.prec, base_ring=self.base_ring, is_cuspidal=self._is_cuspidal) return res def theta_sym(self, j=2): ''' Returns an image as a vector valued (Sym_{j} j:even) Fourier expansion of the generalized Theta operator associated with the Rankin-cohen operator {F, G}_{Sym_{j}}. [Reference] Ibukiyama, Vector valued Siegel modular forms of symmetric tensor weight of small degrees, COMMENTARI MATHEMATICI UNIVERSITATIS SANCTI PAULI VOL 61, NO 1, 2012. Boecherer, Nagaoka, On p-adic properties of Siegel modular forms, arXiv, 2013. ''' R = PolynomialRing(QQ, "r1, r2, r3") (r1, r2, r3) = R.gens() S = PolynomialRing(R, "u1, u2") (u1, u2) = S.gens() pl = (r1 * u1 ** 2 + r2 * u1 * u2 + r3 * u2 ** 2) ** (j // 2) pldct = pl.dict() formsdict = {} for (_, i), ply in pldct.iteritems(): formsdict[i] = sum([v * self._differential_operator_monomial(a, b, c) for (a, b, c), v in ply.dict().iteritems()]) forms = [x for _, x in sorted([(i, v) for i, v in formsdict.iteritems()], key=lambda x: x[0])] return SymWtGenElt(forms, self.prec, self.base_ring) def change_ring(self, R=None, hom=None): ''' Returns a Fourier expansion whose base ring is changed. ''' if hom is None: hom = R if R is None: R = hom.codomain() fc_map = {} for k, v in self.fc_dct.iteritems(): fc_map[k] = hom(v) return QexpLevel1(fc_map, self.prec, base_ring=R, is_cuspidal=self._is_cuspidal) def mod_p_map(self, p): fcmap = {} for k, v in self.fc_dct.iteritems(): if v != 0: fcmap[k] = modulo(v, p, self.base_ring) return fcmap def is_unit(self): ''' Returns true if the constant term of self is not zero. ''' return self[(0, 0, 0)] != 0 def _inverse(self): a = self[(0, 0, 0)] if a == 0: raise ZeroDivisionError prec = self.prec R = self.base_ring if a != R(1): return (self * a ** (-1))._inverse() * a ** (-1) res_dict = {(0, 0, 0): R(1)} def norm(t): return t[0] + t[2] prec_dict = dict(list_group_by(list(prec), norm)) prec_d_keys = sorted(prec_dict.keys())[1:] for a in prec_d_keys: for t in prec_dict[a]: l = list(_spos_def_mats_lt(t)) l.remove(t) res_dict[t] = - sum([res_dict[u] * self[(t[0] - u[0], t[1] - u[1], t[2] - u[2])] for u in l]) return QexpLevel1(res_dict, prec, base_ring=self.base_ring) def _down_prec(self, prec): prec = PrecisionDeg2(prec) d = self._to_format_dct() d["prec"] = prec._to_format_dct() fc_dct = {t: d["fc_dct"][t] for t in prec} d["fc_dct"] = fc_dct return QexpLevel1._from_dict_to_object(d) def divide(self, f, prec): ''' Assuming self is divisible by f, returns self/f. ''' if isinstance(f, QexpLevel1): return divide(f, self, prec) else: raise NotImplementedError def _mul_q_half_monom(f, a=1): ''' Let f be a formal Fourier expansion: f = sum_{n, r, m} a(n, r, m) q1^n t^r q2^m. Assuming f * q1^(-a) * t^a * q2^(-a) This function returns f * q1^(-a) * t^a * q2^(-a). Decrease prec by a. ''' if f.prec.type != "diag_max": raise NotImplementedError prec = PrecisionDeg2(f.prec.value - a) res_dc = {} fc_dct = f.fc_dct for n, r, m in prec: if 4 * (n + a) * (m + a) - (r - a) ** 2 <= 0: res_dc[(n, r, m)] = 0 else: res_dc[(n, r, m)] = fc_dct[(n + a, r - a, m + a)] return QexpLevel1(res_dc, prec.value, base_ring=f.base_ring) class QseriesTimesQminushalf(FormalQexp): ''' An instance of this class represents a formal qexpansion q1^(-1/2) * t^(1/2) * q2^(-1/2) sum_{n, r, m} a(n, r, m) q1^n t^r q2^m. A typical instance of this class is a return value of x5__with_prec. ''' def __init__(self, f): ''' f = sum_{n, r, m} a(n, r, m) q1^n t^r q2^m in the notation above. ''' self.__f = f self._mul_dct = {} FormalQexp.__init__(self, f.fc_dct, f.prec, base_ring=f.base_ring) @property def f_part(self): return self.__f def __getitem__(self, t): if self._mul_dct == {}: self._mul_dct = {(n - QQ(1) / QQ(2), r + QQ(1) / QQ(2), m - QQ(1) / QQ(2)): v for (n, r, m), v in self.f_part.fc_dct.items()} return self._mul_dct[t] def _name(self): return 'q1^(-1/2)t^(1/2)q2^(-1/2) times q-expansion' def __mul__(self, other): if isinstance(other, QseriesTimesQminushalf): return _mul_q_half_monom(self.f_part * other.f_part) elif isinstance(other, QexpLevel1) or is_number(other): return QseriesTimesQminushalf(self.f_part * other) elif isinstance(other, SymWtGenElt): return other.__mul__(self) else: raise NotImplementedError def __add__(self, other): if other == 0: return self elif isinstance(other, QseriesTimesQminushalf): return QseriesTimesQminushalf(self.f_part + other.f_part) else: raise NotImplementedError def __pow__(self, other): if other == 0: return 1 elif other == 1: return self elif is_number(other) and other > 0: f = (self.f_part) ** other q, r = divmod(other, 2) g = _mul_q_half_monom(f, a=q) if r == 0: return g else: return QseriesTimesQminushalf(g) else: raise NotImplementedError def _differential_operator_monomial(self, a, b, c): fcmap = {(n, r, m): ((n - QQ(1) / QQ(2)) ** a * (r + QQ(1) / QQ(2)) ** b * (m - QQ(1) / QQ(2)) ** c * v) for (n, r, m), v in self.f_part.fc_dct.iteritems()} f = QexpLevel1(fcmap, self.prec, base_ring=self.base_ring) return QseriesTimesQminushalf(f) class ModFormQsrTimesQminushalf(QseriesTimesQminushalf): ''' An instance of QseriesTimesQminushalf and can be regard as modular form. (i.e. multiple of x5 by a modular form of level 1). A typical instance of this class is a return value of x5__with_prec. ''' def __init__(self, f, wt): QseriesTimesQminushalf.__init__(self, f) self.__wt = wt @property def wt(self): return self.__wt def __mul__(self, other): res = QseriesTimesQminushalf.__mul__(self, other) if is_number(other): return ModFormQsrTimesQminushalf(res.f_part, self.wt) elif isinstance(other, ModFormQexpLevel1): return ModFormQsrTimesQminushalf(res.f_part, self.wt + other.wt) elif isinstance(other, ModFormQsrTimesQminushalf): return ModFormQexpLevel1(self.wt + other.wt, res.fc_dct, res.prec, base_ring=res.base_ring) else: return res def __add__(self, other): res = QseriesTimesQminushalf.__add__(self, other) if (isinstance(other, ModFormQsrTimesQminushalf) and self.wt == other.wt): return ModFormQsrTimesQminushalf(res.f_part, self.wt) else: return res def __pow__(self, other): res = QseriesTimesQminushalf.__pow__(self, other) wt = self.wt * other if isinstance(res, QexpLevel1): return ModFormQexpLevel1(wt, res.fc_dct, res.prec, base_ring=res.base_ring) else: return ModFormQsrTimesQminushalf(res.f_part, wt) def is_hol_mod_form(f): return isinstance(f, ModFormQexpLevel1) class ModFormQexpLevel1(QexpLevel1, HeckeModuleElement): def __init__(self, wt, fc_dct, prec, base_ring=QQ, is_cuspidal=False, given_reduced_tuples_only=False): ''' given_reduced_tuples_only means that Fourier coefficients are given at reduced tuples. ''' self.__wt = wt self._construction = None prec = PrecisionDeg2(prec) if given_reduced_tuples_only: if is_cuspidal or wt % 2 == 1: # level 1 specific. for rdf, col in \ prec.group_by_reduced_forms_with_sgn().iteritems(): for t, sgn in col: fc_dct[t] = fc_dct[rdf] * sgn ** wt else: for rdf, col in prec.group_by_reduced_forms().iteritems(): for t in col: fc_dct[t] = fc_dct[rdf] QexpLevel1.__init__(self, fc_dct, prec, base_ring=base_ring, is_cuspidal=is_cuspidal) @property def wt(self): return self.__wt def __eq__(self, other): if other == 0: return all([x == 0 for x in self.fc_dct.itervalues()]) else: return self - other == 0 def __ne__(self, other): return not self.__eq__(other) def __add__(self, other): if is_number(other): fcmap = self.fc_dct.copy() fcmap[(0, 0, 0)] = self.fc_dct[(0, 0, 0)] + other if other == 0: return ModFormQexpLevel1(self.wt, fcmap, self.prec, self.base_ring, is_cuspidal=self._is_cuspidal) else: return QexpLevel1(fcmap, self.prec, self.base_ring) if is_hol_mod_form(other) and self.wt == other.wt: prec = common_prec([self, other]) bsring = _common_base_ring(self.base_ring, other.base_ring) ms = self.fc_dct mo = other.fc_dct cuspidal = self._is_cuspidal and other._is_cuspidal fcmap = _add_fourier(ms, mo, prec, cuspidal=cuspidal, hol=True) return ModFormQexpLevel1(self.wt, fcmap, prec, bsring, is_cuspidal=cuspidal, given_reduced_tuples_only=True) else: return QexpLevel1.__add__(self, other) def __radd__(self, other): return self.__add__(other) def __getitem__(self, idx): try: return self.fc_dct[idx] except KeyError: t, e = reduced_form_with_sign(idx) return self.fc_dct[t] * e ** (self.wt) # level 1 specific def __mul__(self, other): if is_number(other): if other == 1: return self fcmap = _mul_fourier_by_num(self.fc_dct, other, self.prec, cuspidal=self._is_cuspidal, hol=True) if hasattr(other, "parent"): bs = _common_base_ring(self.base_ring, other.parent()) else: bs = self.base_ring return ModFormQexpLevel1(self.wt, fcmap, self.prec, base_ring=bs, is_cuspidal=self._is_cuspidal, given_reduced_tuples_only=True) if isinstance(other, ModFormQexpLevel1) and other.wt == 0: return self.__mul__(other[(0, 0, 0)]) if is_hol_mod_form(other): prec = common_prec([self, other]) bsring = _common_base_ring(self.base_ring, other.base_ring) ms = self.fc_dct mo = other.fc_dct cuspidal = self._is_cuspidal or other._is_cuspidal fcmap = _mul_fourier(ms, mo, prec, cuspidal=cuspidal, hol=True) return ModFormQexpLevel1(self.wt + other.wt, fcmap, prec, base_ring=bsring, is_cuspidal=cuspidal, given_reduced_tuples_only=True) else: return QexpLevel1.__mul__(self, other) def __rmul__(self, other): return self.__mul__(other) def __pow__(self, other): if other == 0: return 1 res = QexpLevel1.__pow__(self, other) return ModFormQexpLevel1(self.wt * other, res.fc_dct, res.prec, res.base_ring) def __sub__(self, other): return self.__add__(other.__neg__()) def __rsub__(self, other): return self.__neg__().__add__(other) def __neg__(self): res = QexpLevel1.__neg__(self) return ModFormQexpLevel1(self.wt, res.fc_dct, res.prec, res.base_ring) def _name(self): return 'Siegel Modular form of weight ' + str(self.wt) def satisfies_maass_relation_for(self, n, r, m): if (n, r, m) == (0, 0, 0): return True return self[(n, r, m)] == sum([d ** (self.wt - 1) * self[(1, r / d, m * n / (d ** 2))] for d in divisors(gcd((n, r, m)))]) def _none_zero_tpl(self): keys_sorted = sorted(self.fc_dct.keys(), key=lambda x: (x[0] + x[2])) for t in keys_sorted: if self[t] != 0: return t def normalize(self, c): ''' Returns a c^(-1) * self. If c is a tuple (n, r, m), this returns self[(n, r, m)]^(-1) * self. ''' if isinstance(c, tuple): a = self[c] else: a = c if a != 0: res = self pl = 1 if (hasattr(self, "_construction") and self._construction is not None): pl = a ** (-1) * self._construction res = a ** (-1) * self res._construction = pl return res else: raise NotImplementedError def raise_prec(self, bd): ''' Returns the same modular form as self whose prec is raised. ''' pass # if self._construction is None: # raise NotImplementedError # pl = self._construction # base_ring = self.base_ring # if self.wt%2 == 0: # tupls = tuples_even_wt_modular_forms(self.wt) # else: # tupls = tuples_even_wt_modular_forms(self.wt - 35) # x35 = x35_with_prec(bd) # e4 = eisenstein_series_degree2(4, bd) # e6 = eisenstein_series_degree2(6, bd) # x10 = x10_with_prec(bd) # x12 = x12_with_prec(bd) # def coeff(a, b, c, d): # if self.wt % 2 == 0: # return base_ring(pl.coefficient({ple4: a, ple6: b, # plx10: c, plx12: d})) # else: # return base_ring(pl.coefficient({ple4: a, ple6: b, plx10: c, # plx12: d, plx35: 1})) # l = [coeff(a, b, c, d) * e4**a * e6**b * x10**c * x12**d # for a, b, c, d in tupls if coeff(a, b, c, d) != 0] # s = reduce(operator.add, l) # if self.wt%2 == 0: # return s # else: # return s * x35 def _to_format_dct(self): d = {"wt": self.wt, "construction": self._construction if hasattr(self, "_construction") else None} return dict(d.items() + QexpLevel1._to_format_dct(self).items()) @classmethod def _from_dict_to_object(cls, data_dict): if "mp" in data_dict.keys(): kys = ["wt", "mp", "prec", "base_ring", "construction", "is_cuspidal"] else: kys = ["wt", "fc_dct", "prec", "base_ring", "construction", "is_cuspidal"] wt, fc_dct, prec, base_ring, const, is_cuspidal \ = [data_dict[ky] for ky in kys] prec = PrecisionDeg2._from_dict_to_object(prec) f = ModFormQexpLevel1(wt, fc_dct, prec, base_ring=base_ring, is_cuspidal=is_cuspidal) f._construction = const return f @classmethod def load_from(cls, filename): data_dict = load(filename) return cls._from_dict_to_object(data_dict) def change_ring(self, R=None, hom=None): ''' Returns a Fourier expansion whose base ring is changed. ''' f = QexpLevel1.change_ring(self, R=R, hom=hom) res = ModFormQexpLevel1(self.wt, f.fc_dct, self.prec, base_ring=f.base_ring, is_cuspidal=self._is_cuspidal) return res def _set_construction(self, c): self._construction = c def _inverse(self): res = QexpLevel1._inverse(self) return ModFormQexpLevel1(-self.wt, res.fc_dct, res.prec, base_ring=res.base_ring) def hecke_operator_acted(self, m, prec=None): ''' Returns T(m)self with precision prec. ''' prec = PrecisionDeg2(prec) fc_dct = {t: self.hecke_operator(m, t) for t in prec} return ModFormQexpLevel1(self.wt, fc_dct, prec, base_ring=self.base_ring, is_cuspidal=self._is_cuspidal) def divide(self, f, prec): res = QexpLevel1.divide(self, f, prec) if isinstance(f, ModFormQexpLevel1): return ModFormQexpLevel1(self.wt - f.wt, res.fc_dct, prec, res.base_ring) else: return res class SymWtGenElt(object): ''' Let Symm(j) be the symmetric tensor representation of degree j of GL2. Symm(j) is the space of homogenous polynomials of u1 and u2 of degree j. We take u1^j, .. u2^j as a basis of Symm(j) An instance of this class corresponds to a tuple of j Fourier expansions of degree 2. ''' def __init__(self, forms, prec, base_ring=QQ): prec = PrecisionDeg2(prec) self.__base_ring = base_ring self.__prec = prec self.__sym_wt = len(forms) - 1 self.__forms = forms def __repr__(self): return "Formal Sym({j}) valued function with prec = {prec}".format( j=self.sym_wt, prec=self.prec) def _to_format_dct(self): return {"base_ring": self.base_ring, "prec": self.prec._to_format_dct(), "forms": [f._to_format_dct() for f in self.forms]} def save_as_binary(self, filename): save(self._to_format_dct(), filename) @classmethod def _from_dict_to_object(cls, data_dict): base_ring, prec, forms_dct = \ [data_dict[ky] for ky in ["base_ring", "prec", "forms"]] prec = PrecisionDeg2._from_dict_to_object(prec) forms = [QexpLevel1._from_dict_to_object(d) for d in forms_dct] return cls(forms, prec, base_ring) @classmethod def load_from(cls, filename): data_dict = load(filename) return cls._from_dict_to_object(data_dict) @property def forms(self): return self.__forms @property def base_ring(self): return self.__base_ring @property def prec(self): return self.__prec @property def sym_wt(self): return self.__sym_wt def __iter__(self): for f in self.forms: yield f def __getitem__(self, t): if (isinstance(t, tuple) and isinstance(t[0], tuple) and is_number(t[1])): tpl, i = t return self.forms[i][tpl] else: vec = vector([f[t] for f in self.forms]) return vec def _none_zero_tpl(self): if self[(1, 1, 1)] != 0: return (1, 1, 1) else: for t in sorted(self.prec, key=lambda x: max(x[0], x[2])): if self[t] != 0: return t def __add__(self, other): if other == 0: return self elif isinstance(other, SymWtGenElt) and self.sym_wt == other.sym_wt: prec = common_prec([self, other]) forms = [sum(tp) for tp in zip(other.forms, self.forms)] base_ring = _common_base_ring(self.base_ring, other.base_ring) return SymWtGenElt(forms, prec, base_ring) else: raise NotImplementedError def __radd__(self, other): return self.__add__(other) def __sub__(self, other): return self + (-1) * other def __mul__(self, other): if is_number(other): prec = self.prec forms = [other * f for f in self.forms] if hasattr(other, "parent"): base_ring = _common_base_ring(self.base_ring, other.parent()) else: base_ring = self.base_ring return SymWtGenElt(forms, prec, base_ring) if isinstance(other, (QexpLevel1, QseriesTimesQminushalf)): forms = [f * other for f in self.forms] prec = common_prec(forms) base_ring = _common_base_ring(self.base_ring, other.base_ring) return SymWtGenElt(forms, prec, base_ring) else: raise NotImplementedError def __rmul__(self, other): return self.__mul__(other) def gcd_of_coefficients(self): return gcd([x.gcd_of_coefficients() for x in self.forms]) def __eq__(self, other): if isinstance(other, SymWtGenElt) \ and self.sym_wt == other.sym_wt: return all([x == y for x, y in zip(self.forms, other.forms)]) elif other == 0: return all([f == 0 for f in self.forms]) else: raise NotImplementedError def __ne__(self, other): return not self.__eq__(other) def divide(self, f, prec, parallel=False): if parallel: res_forms = pmap(lambda x: x.divide(f, prec), self.forms) else: res_forms = [a.divide(f, prec) for a in self.forms] res_br = common_base_ring(res_forms) return SymWtGenElt(res_forms, prec, base_ring=res_br) def change_ring(self, R=None, hom=None): '''Return a Fourier expansion whose base_ring is changed. ''' forms = [f.change_ring(R=R, hom=hom) for f in self.forms] return SymWtGenElt(forms, self.prec, base_ring=forms[0].base_ring) class SymWtModFmElt(SymWtGenElt, HeckeModuleElement): ''' An instance of this class corresponding to vector valued Siegel modular form of degree 2. ''' def __init__(self, forms, wt, prec, base_ring=QQ): SymWtGenElt.__init__(self, forms, prec, base_ring) self.__wt = wt def __repr__(self): return "Vector valued modular form of weight " + \ "det^{wt} Sym({j}) with prec = {prec}".format(wt=self.wt, j=self.sym_wt, prec=self.prec) def _to_format_dct(self): d1 = SymWtGenElt._to_format_dct(self) return dict([("wt", self.wt)] + d1.items()) @classmethod def _from_dict_to_object(cls, data_dict): forms_dct, wt, prec, base_ring = \ [data_dict[ky] for ky in ["forms", "wt", "prec", "base_ring"]] prec = PrecisionDeg2._from_dict_to_object(prec) forms = [QexpLevel1._from_dict_to_object(d) for d in forms_dct] return cls(forms, wt, prec, base_ring) @classmethod def load_from(cls, filename): data_dict = load(filename) return cls._from_dict_to_object(data_dict) @property def wt(self): return self.__wt def __add__(self, other): if other == 0: return self res = SymWtGenElt.__add__(self, other) if isinstance(other, SymWtModFmElt) \ and self.wt == other.wt: return SymWtModFmElt(res.forms, self.wt, res.prec, res.base_ring) else: return res def __radd__(self, other): return self.__add__(other) def __sub__(self, other): return self.__add__(other.__mul__(-1)) def __mul__(self, other): res = SymWtGenElt.__mul__(self, other) if is_number(other): return SymWtModFmElt(res.forms, self.wt, res.prec, res.base_ring) if isinstance(other, (ModFormQexpLevel1, ModFormQsrTimesQminushalf)): return SymWtModFmElt(res.forms, self.wt + other.wt, res.prec, res.base_ring) else: return res def __rmul__(self, other): return self.__mul__(other) def phi_operator(self): return self.forms[0].phi_operator() def _down_prec(self, prec): prec = PrecisionDeg2(prec) forms_res = [f._down_prec(prec) for f in self.forms] return SymWtModFmElt(forms_res, self.wt, prec, base_ring=self.base_ring) def __getitem__(self, t): if (isinstance(t, tuple) and isinstance(t[0], tuple) and is_number(t[1])): tpl, i = t return self.forms[i][tpl] else: vec = vector([f[t] for f in self.forms]) return SymTensorRepElt(vec, self.wt) def hecke_operator_acted(self, m, prec=None): prec = PrecisionDeg2(prec) fc_dct = {t: self.hecke_operator(m, t) for t in prec} dcts = [{t: v.vec[i] for t, v in fc_dct.items()} for i in range(self.sym_wt + 1)] forms = [QexpLevel1(d, prec, base_ring=self.base_ring) for d in dcts] return SymWtModFmElt(forms, self.wt, prec, base_ring=self.base_ring) def divide(self, f, prec, parallel=False): res = SymWtGenElt.divide(self, f, prec, parallel=parallel) if isinstance(f, ModFormQexpLevel1): return SymWtModFmElt(res.forms, self.wt - f.wt, prec, base_ring=res.base_ring) else: return res def change_ring(self, R=None, hom=None): forms = [f.change_ring(R=R, hom=hom) for f in self.forms] return SymWtModFmElt(forms, self.wt, self.prec, base_ring=forms[0].base_ring) def divide(f, g, prec): ''' Assume g is divisible by f. Returns g/f with precision prec. ''' def key_func(x): return (x[0] + x[2], x[0], x[1], x[2]) ts = sorted(PrecisionDeg2(prec), key=key_func) f_ts = sorted([k for k, v in f.fc_dct.items() if v != 0], key=key_func) res_dct = {} n0, r0, m0 = f_ts[0] a0 = f[(n0, r0, m0)] # Normalize f f = f * a0 ** (-1) for n, r, m in ts: if g[(n + n0, r + r0, m + m0)] == 0: res_dct[(n, r, m)] = 0 else: break ts_res = ts[len(res_dct):] for n, r, m in ts_res: n1, r1, m1 = n + n0, r + r0, m + m0 s = sum((f.fc_dct[(n1 - a, r1 - b, m1 - c)] * res_dct[(a, b, c)] for a, b, c in _spos_def_mats_lt((n1, r1, m1)) if not ((a == n and b == r and c == m) or f.fc_dct[(n1 - a, r1 - b, m1 - c)] == 0))) res_dct[(n, r, m)] = g[(n1, r1, m1)] - s res = QexpLevel1(res_dct, prec) return res * a0 def modulo(x, p, K): d = K.degree() a = K.gens()[0] a_s = [a ** i for i in range(d)] xl = x.list() xl_p = [mod(b, p).lift() for b in xl] return sum(list(imap(operator.mul, a_s, xl_p)))
39,573
32.881849
92
py
degree2
degree2-master/standard_l_scalar_valued.py
# -*- coding: utf-8; mode: sage -*- ''' Algebraic part of a value of the standard L of Siegel cusp forms of degree 2. Cf. [Kat] H. Katsurada, exact standard zeta values of Siegel modular forms, Experimental Mathematics (2010), 19:1, 65-77 Original implementation was done by H. Katsurada by the wolfram language. ''' from degree2.siegel_series.pullback_of_siegel_eisen import \ eisenstein_pullback_coeff from sage.all import zeta as _zeta from sage.all import QQ, ZZ, PolynomialRing, floor, matrix, mul, sqrt def zeta(l): return _zeta(ZZ(l)) def binomial(x, m): '''Return the binomial coefficient (x m). ''' m = ZZ(m) return mul(x - i for i in range(m)) / m.factorial() def G_poly(l, m): '''The polynomial G of y1, y2 and y3 given in Proposition 3.7, [Kat]. ''' R = PolynomialRing(QQ, names="y1, y2, y3") y1, y2, y3 = R.gens() return sum(binomial(2 * n + l - QQ(5) / QQ(2), n) * y3 ** n * sum((-y2) ** nu * (2 * y1) ** (m - 2 * n - 2 * nu) * binomial(l + m - nu - QQ(5) / QQ(2), m - 2 * n - nu) * binomial(m - 2 * n - nu, nu) for nu in range((m - 2 * n) // 2 + 1)) for n in range(m // 2 + 1)) def _r_iter(n, m): sq = int(floor(2 * sqrt(n * m))) for r in range(-sq, sq + 1): yield r def epsilon_tilde_l_k_degree2(l, k, A1, A2): r''' A1 and A2 are half integral, semi-positive definite, symmetric matrices of size 2. Return \tilde{\epsilon}_{l, k}(A_{1}, A_{2}) in p72, [Kat]. ''' const_term = zeta(3 - 2 * l) * zeta(5 - 2 * l) * zeta(1 - l) G = G_poly(l, k - l) y1, y2, y3 = G.parent().gens() G_y1_y3 = G.subs({y2: A1.det() * A2.det()}) def func(a, _A1, _A2, R, mat): return G_y1_y3.subs({y1: R.det() / ZZ(4), y3: mat.det()}).constant_coefficient() * a res = eisenstein_pullback_coeff(l, A1, A2, func=func) return res * (-1) ** (l // 2 + 1) * ZZ(2) ** (-2) * (l - 2) * const_term def algebraic_part_of_standard_l(f, l, space_of_cusp_form=None): r''' f: cuspidal eigen form of degree 2 of weight k with k: even. l: positive even integer s.t. l <= k - 4 space_of_cusp_form: space of cusp form that f belongs to. If f.parent_space is not None, then this can be ommited. Return the algebriac part of the standard L of f at l (\tilde{\Lambda}(f, l, St)) defined in [Kat], pp 72. ''' if f[(1, 1, 1)] != 0 and f[(1, 0, 1)] != 0: t = (1, 1, 1) A1 = matrix([[ZZ(1), ZZ(0)], [ZZ(0), ZZ(1)]]) else: t = f._none_zero_tpl() A1 = tpl_to_half_int_mat(t) msg = "l must be an positive even integer less than or equal to %s" % ( f.wt, ) try: l = ZZ(l) except TypeError: raise ValueError(msg) if not (l > 0 and l % 2 == 0 and l <= f.wt): raise ValueError(msg) if space_of_cusp_form is not None: S = space_of_cusp_form else: S = f.parent_space if S is None: raise RuntimeError("Please specify the space of cusp form explicitly.") tpls = S.linearly_indep_tuples() pull_back_dct = {t: epsilon_tilde_l_k_degree2( l + 2, f.wt, A1, tpl_to_half_int_mat(t)) for t in tpls} pull_back_vec = S._to_vector(pull_back_dct) T2 = S.hecke_matrix(2) lam = f.hecke_eigenvalue(2) d = S.dimension() vecs = [(T2 ** i) * pull_back_vec for i in range(d)] ei = [sum(f[t] * a for f, a in zip(S.basis(), v)) for v in vecs] chply = T2.charpoly() nume = first_elt_of_kern_of_vandermonde(chply, lam, ei) denom = f[int(A1[0, 0]), int(2 * A1[0, 1]), int(A1[1, 1])] * f[t] return nume / denom def first_elt_of_kern_of_vandermonde(chply, lam, beta_vec): ''' Cf. Goto lemma 2.2, a twisted adjoint L-value of an elliptic modular form. ''' x = chply.parent().gens()[0] d = chply.degree() phi_d_lam = chply.diff(x).subs({x: lam}) if phi_d_lam == 0: raise ZeroDivisionError("Phi'(lambda) = 0") else: num = sum(sum(beta_vec[d - 1 - j] * chply[d - j + i] for j in range(i, d)) * lam ** i for i in range(d)) return num / phi_d_lam def norm_of_normalized_alg_part_of_standard_l(f, l, space_of_cusp_form=None): a = algebraic_part_of_standard_l( f, l, space_of_cusp_form=space_of_cusp_form) return a.norm() * f.gcd_of_coefficients().norm() ** 2 def tpl_to_half_int_mat(t): n, r, m = t return matrix([[ZZ(n), ZZ(r) / ZZ(2)], [ZZ(r) / ZZ(2), ZZ(m)]])
4,536
32.858209
92
py
degree2
degree2-master/scalar_valued_smfs.py
# -*- coding: utf-8 -*- import os import operator from abc import ABCMeta, abstractproperty import sage from sage.misc.cachefunc import cached_method, cached_function from sage.all import (QQ, save, load, gcd, PolynomialRing, ZZ, CuspForms, floor, matrix, factor, sqrt, ceil, LaurentPolynomialRing, PowerSeriesRing, bernoulli, divisors, fundamental_discriminant, quadratic_L_function__exact, kronecker_character, prime_factors, valuation) from sage.all import O as bigO import sage.matrix.matrix_space from degree2.elements import (ModFormQexpLevel1, QexpLevel1, ModFormQsrTimesQminushalf) from degree2.utils import linearly_indep_rows_index_list, pmap from degree2.basic_operation import PrecisionDeg2 from degree2.hecke_module import HeckeModule from degree2.rankin_cohen_diff import diff_opetator_4 def _number_to_hol_modform(a, prec): if hasattr(a, 'parent'): parent = a.parent() else: parent = QQ return ModFormQexpLevel1(0, {(0, 0, 0): a}, prec, parent) class SiegelEisensteinSeries(ModFormQexpLevel1): def __init__(self, wt, prec=5, base_ring=QQ, fc_dct=None): self.__wt = wt if fc_dct is None: fc_dct = {} for (n, r, m) in PrecisionDeg2(prec): fc = self.fourier_coefficient(n, r, m) fc_dct[(n, r, m)] = fc fc_dct[(n, -r, m)] = fc if wt in [4, 6]: base_ring = ZZ ModFormQexpLevel1.__init__(self, wt, fc_dct, prec, base_ring) @property def wt(self): return self.__wt def _name(self): return 'Siegel-Eisenstein series of weight ' + str(self.wt) def fourier_coefficient(self, n, r, m): tpl = (n, r, m) if tpl == (0, 0, 0): return 1 else: return self._fourier_coefficient(gcd(tpl), 4 * n * m - r ** 2) @cached_method def _fourier_coefficient(self, content, det_4): def zeta(s): k = ZZ(1 - s) return -bernoulli(k) / k k = self.wt if det_4 < 0: return 0 elif det_4 == 0: return 2 / zeta(1 - k) * sum([d ** (k - 1) for d in divisors(content)]) else: return 2 * quadratic_L_function__exact(2 - k, -det_4) *\ self._fc__unramfactor(content, det_4)\ / (zeta(1 - k) * zeta(3 - 2 * k)) @cached_method def _fc__unramfactor(self, content, det_4): chi = kronecker_character(-det_4) pfacs = prime_factors(det_4) fd = fundamental_discriminant(-det_4) l = [(p, valuation(content, p), (valuation(det_4, p) - valuation(fd, p)) / 2) for p in pfacs] return reduce(operator.mul, [self._fc__unramfactor_at_p(p, ci, fi, chi) for (p, ci, fi) in l]) @cached_method def _fc__unramfactor_at_p(self, p, ci, fi, chi): k = self.wt return self._fc__unramfactor_at_p_1(p, ci, fi + 1) - \ chi(p) * p ** (k - 2) * self._fc__unramfactor_at_p_1(p, ci, fi) @cached_method def _fc__unramfactor_at_p_1(self, p, a, b): if b == 0: return 0 a = min(a, b - 1) k = self.wt r1 = (1 - p ** ((k - 1) * (a + 1))) / (1 - p ** (k - 1)) rn2 = p ** ((2 * k - 3) * b + k - 2) - p ** (b + (k - 2) * (2 * b - a)) rd2 = p ** (k - 2) - 1 return (r1 - rn2 / rd2) / (1 - p ** (2 * k - 3)) def degree2_modular_forms_ring_level1_gens(prec): es4 = eisenstein_series_degree2(4, prec) es6 = eisenstein_series_degree2(6, prec) x10 = x10_with_prec(prec) x12 = x12_with_prec(prec) x35 = x35_with_prec(prec) return (es4, es6, x10, x12, x35) # {"es4":es4, "es6": es6, "es10": es10, "es12": es12, # "x10":x10, "x12": x12, "x35": x35} Deg2global_gens_dict = {} @cached_function def load_cached_gens_from_file(prec): current_dir = os.path.dirname(os.path.abspath(__file__)) cached_dir = os.path.join(current_dir, "cached_data") prec39 = PrecisionDeg2(39) prec34_m17_51 = PrecisionDeg2([(34, -17, 51)]) if Deg2global_gens_dict != {}: a_ky = Deg2global_gens_dict.keys()[0] if Deg2global_gens_dict[a_ky].prec >= prec: return None if prec <= prec39 or set(prec) <= set(prec39) | set(prec34_m17_51): if prec <= PrecisionDeg2(21): gens_dct = load(os.path.join(cached_dir, '_fc_dict21.sobj')) max_prec = PrecisionDeg2(21) elif prec <= prec39: gens_dct = load(os.path.join(cached_dir, '_fc_dict39.sobj')) max_prec = prec39 else: gens_dct1 = load(os.path.join(cached_dir, '_fc_dict39.sobj')) gens_dct2 = load(os.path.join(cached_dir, '_fc_dict_tuples_34_-17_51.sobj')) for k in gens_dct1.keys(): gens_dct1[k].update(gens_dct2[k]) gens_dct = {k: {t: gens_dct1[k][t] for t in prec} for k in gens_dct1.keys()} max_prec = prec es4 = ModFormQexpLevel1(4, gens_dct[4], max_prec) es6 = ModFormQexpLevel1(6, gens_dct[6], max_prec) x10 = ModFormQexpLevel1(10, gens_dct[10], max_prec) x12 = ModFormQexpLevel1(12, gens_dct[12], max_prec) x35 = ModFormQexpLevel1(35, gens_dct[35], max_prec) d = {"es4": es4, "es6": es6, "x10": x10, "x12": x12, "x35": x35} for k, v in d.items(): Deg2global_gens_dict[k] = v def load_deg2_cached_gens(key, prec, wt, cuspidal=False): if key in Deg2global_gens_dict.keys(): f = Deg2global_gens_dict[key] if f.prec >= prec: fc_dct = {t: f[t] for t in prec} if key in ["es4", "es6"]: res = SiegelEisensteinSeries(wt, prec=prec, base_ring=ZZ, fc_dct=fc_dct) else: res = ModFormQexpLevel1(wt, fc_dct, prec, base_ring=ZZ, is_cuspidal=cuspidal) res._is_gen = key return res else: return False def eisenstein_series_degree2(k, prec): return eisenstein_series_degree2_innner(k, PrecisionDeg2(prec)) @cached_function def eisenstein_series_degree2_innner(k, prec): prec = PrecisionDeg2(prec) load_cached_gens_from_file(prec) key = "es" + str(k) f = load_deg2_cached_gens(key, prec, k) if f: return f f = SiegelEisensteinSeries(k, prec) f._is_gen = key Deg2global_gens_dict["es" + str(k)] = f return f def x10_with_prec(prec): return x10_with_prec_inner(PrecisionDeg2(prec)) @cached_function def x10_with_prec_inner(prec): prec = PrecisionDeg2(prec) load_cached_gens_from_file(prec) k = 10 key = "x" + str(k) f = load_deg2_cached_gens(key, prec, k, cuspidal=True) if f: return f es4 = eisenstein_series_degree2(4, prec) es6 = eisenstein_series_degree2(6, prec) es10 = eisenstein_series_degree2(10, prec) res = QQ(43867) * QQ(2 ** 10 * 3 ** 5 * 5 ** 2 * 7 * 53) ** (-1) * \ (- es10 + es4 * es6) res._is_cuspidal = True res._is_gen = key Deg2global_gens_dict[key] = res return res.change_ring(ZZ) def x12_with_prec(prec): return x12_with_prec_inner(PrecisionDeg2(prec)) @cached_function def x12_with_prec_inner(prec): prec = PrecisionDeg2(prec) load_cached_gens_from_file(prec) k = 12 key = "x" + str(k) f = load_deg2_cached_gens(key, prec, k, cuspidal=True) if f: return f es4 = eisenstein_series_degree2(4, prec) es6 = eisenstein_series_degree2(6, prec) es12 = eisenstein_series_degree2(12, prec) chi12 = QQ(131 * 593) / QQ(2 ** 13 * 3 ** 7 * 5 ** 3 * 7 ** 2 * 337) * \ (3 ** 2 * 7 ** 2 * es4 ** 3 + 2 * 5 ** 3 * es6 ** 2 - 691 * es12) res = 12 * chi12 res._is_cuspidal = True res._is_gen = key Deg2global_gens_dict[key] = res return res.change_ring(ZZ) def x35_with_prec(prec): return x35_with_prec_inner(PrecisionDeg2(prec)) @cached_function def x35_with_prec_inner(prec): prec = PrecisionDeg2(prec) load_cached_gens_from_file(prec) k = 35 key = "x" + str(k) f = load_deg2_cached_gens(key, prec, k, cuspidal=True) if f: return f l = pmap(lambda k: eisenstein_series_degree2(k, prec), [4, 6, 10, 12]) res = diff_opetator_4(*l) a = res[(2, -1, 3)] res = res * a ** (-1) res._is_cuspidal = True res._is_gen = key Deg2global_gens_dict[key] = res return res.change_ring(ZZ) @cached_function def x5_jacobi_pwsr(prec): mx = int(ceil(sqrt(8 * prec) / QQ(2)) + 1) mn = int(floor(-(sqrt(8 * prec) - 1) / QQ(2))) mx1 = int(ceil((sqrt(8 * prec + 1) - 1) / QQ(2)) + 1) mn1 = int(floor((-sqrt(8 * prec + 1) - 1) / QQ(2))) R = LaurentPolynomialRing(QQ, names="t") t = R.gens()[0] S = PowerSeriesRing(R, names="q1") q1 = S.gens()[0] eta_3 = sum([QQ(-1) ** n * (2 * n + 1) * q1 ** (n * (n + 1) // 2) for n in range(mn1, mx1)]) + bigO(q1 ** (prec + 1)) theta = sum([QQ(-1) ** n * q1 ** (((2 * n + 1) ** 2 - 1) // 8) * t ** (n + 1) for n in range(mn, mx)]) # ct = qexp_eta(ZZ[['q1']], prec + 1) return theta * eta_3 ** 3 * QQ(8) ** (-1) def x5_jacobi_g(n, r, prec=40): if n % 2 == 0 or r % 2 == 0: return QQ(0) if n > prec: raise RuntimeError psr = x5_jacobi_pwsr((prec - 1) // 2) l_pol = psr[(n - 1) // 2] d = {} a_key = l_pol.dict().keys()[0] is_int_key = isinstance(a_key, int) is_etuple = isinstance(a_key, sage.rings.polynomial.polydict.ETuple) for k, v in l_pol.dict().items(): if is_int_key: d[k] = v elif is_etuple: d[k[0]] = v else: raise RuntimeError return d.get((r + 1) // 2, 0) @cached_function def x5__with_prec(prec): ''' Returns formal q-expansion f s.t. f * q1^(-1/2)*t^(1/2)*q2^(-1/2) equals to x5 (x10 == x5^2). ''' if prec not in ZZ: prec = prec._max_value() pwsr_prec = (2 * prec - 1) ** 2 def jacobi_g(n, r): return x5_jacobi_g(n, r, pwsr_prec) prec = PrecisionDeg2(prec) fc_dct = {} for n, r, m in prec: if 4 * n * m - r ** 2 == 0: fc_dct[(n, r, m)] = 0 else: n1 = 2 * n - 1 r1 = 2 * r + 1 m1 = 2 * m - 1 if 4 * n1 * m1 - r1 ** 2 > 0: fc_dct[(n, r, m)] = sum([d ** 4 * jacobi_g(n1 * m1 // (d ** 2), r1 // d) for d in gcd([n1, r1, m1]).divisors()]) res = QexpLevel1(fc_dct, prec) return ModFormQsrTimesQminushalf(res, 5) def y12_with_prec(prec): ''' One of Igusa's generators of the ring of Siegel modular forms of degree 2 over ZZ. ''' es4 = eisenstein_series_degree2(4, prec) es6 = eisenstein_series_degree2(6, prec) x12 = x12_with_prec(prec) y12 = 1 / QQ(2 ** 6 * 3 ** 3) * (es4 ** 3 - es6 ** 2) + \ 2 ** 4 * 3 ** 2 * x12 return y12.change_ring(ZZ) @cached_function def tuples_even_wt_modular_forms(wt): ''' Returns the list of tuples (p, q, r, s) such that 4p + 6q + 10r +12s = wt. ''' if wt < 0 or wt % 2 == 1: return [] w = wt / 2 return [(p, q, r, s) for p in range(0, floor(w / 2) + 1) for q in range(0, floor(w / 3) + 1) for r in range(0, floor(w / 5) + 1) for s in range(0, floor(w / 6) + 1) if 2 * p + 3 * q + 5 * r + 6 * s == w] def dimension_degree2(wt): if wt % 2 == 0: return len(tuples_even_wt_modular_forms(wt)) else: return len(tuples_even_wt_modular_forms(wt - 35)) RDeg2 = PolynomialRing(QQ, "es4, es6, x10, x12, x35") class AbstSpaceOfLevel1(HeckeModule): __metaclass__ = ABCMeta @abstractproperty def wt(self): raise NotImplementedError def strum_bound(self): return self.wt // 10 def basis(self): raise NotImplementedError def dimension(self): raise NotImplementedError @cached_method def linearly_indep_tuples(self): bd = PrecisionDeg2(self.strum_bound()) dim = self.dimension() tpls = sorted(bd.group_by_reduced_forms().keys(), key=lambda x: (x[0] + x[2], max(x[0], x[2]))) ml = [[f[t] for f in self.basis()] for t in tpls] idcs = linearly_indep_rows_index_list(ml, dim) return [tpls[i] for i in idcs] @property def sym_wt(self): return ZZ(0) class SpaceOfModForms(AbstSpaceOfLevel1): ''' The space of Siegel modular forms of degree 2. ''' def __init__(self, wt, prec=False): self.__wt = wt self.__prec = wt // 10 * 2 if prec is False else prec self.__basis = None @property def wt(self): return self.__wt @property def prec(self): return self.__prec def dimension(self): return dimension_degree2(self.wt) def _set_basis(self, bs): self.__basis = bs def basis(self): ''' Returns the list of the basis. An element of the basis has an attribute _construction that shows how one can construct the modular form as a polynomial of es4, es6, x10, x12 and x35. ''' if self.__basis is not None: return self.__basis ple4, ple6, plx10, plx12, plx35 = RDeg2.gens() prec = self.prec if self.dimension() == 0: res = [] if self.wt == 0: a = _number_to_hol_modform(QQ(1), prec) a._set_construction(RDeg2(1)) res = [a] elif self.wt == 35: x35 = x35_with_prec(prec) x35._set_construction(plx35) res = [x35] elif self.wt % 2 == 1: x35 = x35_with_prec(prec) bs = SpaceOfModForms(self.wt - 35, prec).basis() res = [] for a in bs: b = x35 * a b._set_construction(a._construction * plx35) res.append(b) else: # if wt is even es4 = eisenstein_series_degree2(4, prec) es6 = eisenstein_series_degree2(6, prec) x10 = x10_with_prec(prec) x12 = x12_with_prec(prec) tuples = tuples_even_wt_modular_forms(self.wt) res = [] for (p, q, r, s) in tuples: a = es4 ** p * es6 ** q * x10 ** r * x12 ** s a._construction = ple4 ** p * \ ple6 ** q * plx10 ** r * plx12 ** s res.append(a) self.__basis = res return res class KlingenEisensteinAndCuspForms(AbstSpaceOfLevel1): ''' The space of Klingen-Eisenstein series and cupsforms. ''' def __init__(self, wt, prec=False): self.__wt = wt if prec: self.__prec = PrecisionDeg2(prec) else: self.__prec = PrecisionDeg2(wt // 10 * 2) self.__basis_cached = False self.__cached_basis = False @property def wt(self): return self.__wt @property def prec(self): return self.__prec @cached_method def dimension(self): if self.wt % 2 == 0: return dimension_degree2(self.wt) - 1 else: return dimension_degree2(self.wt) @cached_method def dimensions(self): ''' Returns a dictionary such that "total" => the total dimension of self, "Klingen" => the dimension of the space of Klingen-Eisenstein series, "lift" => the dimension of the Maass subspace of the space of cusp forms, "non-lift" => the dimension of the non-lift cusp forms. ''' dim = self.dimension() cdim = self.dimension_of_cuspforms() kdim = dim - cdim nlcdim = self.dimension_of_nolift_cuspforms() lcdim = cdim - nlcdim return {"total": dim, "Klingen": kdim, "lift": lcdim, "non-lift": nlcdim} @cached_method def dimension_of_cuspforms(self): if self.wt % 2 == 1: return self.dimension() S = CuspForms(1, self.wt) return self.dimension() - S.dimension() @cached_method def dimension_of_nolift_cuspforms(self): if self.wt % 2 == 1: return self.dimension() S = CuspForms(1, (self.wt - 1) * 2) return self.dimension_of_cuspforms() - S.dimension() @cached_method def basis(self): ''' Returns the list of the basis. It is similar to SpaceOfModForms.basis. ''' if self.__basis_cached: return self.__cached_basis prec = self.prec if self.wt % 2 == 1: M = SpaceOfModForms(self.wt, self.prec) return M.basis() # If wt is even, es4 = eisenstein_series_degree2(4, prec) es6 = eisenstein_series_degree2(6, prec) x10 = x10_with_prec(prec) x12 = x12_with_prec(prec) tuples = tuples_even_wt_modular_forms(self.wt) not_kl_or_cusp = [(p, q, r, s) for (p, q, r, s) in tuples if r == 0 and s == 0] kl_or_cusp = [t for t in tuples if t not in not_kl_or_cusp] res1 = [] ple4, ple6, plx10, plx12, _ = RDeg2.gens() for (p, q, r, s) in kl_or_cusp: a = es4 ** p * es6 ** q * x10 ** r * x12 ** s a._construction = ple4 ** p * ple6 ** q * plx10 ** r * plx12 ** s res1.append(a) res2 = [] if not not_kl_or_cusp == []: (p1, q1, _, _) = not_kl_or_cusp.pop() A = es4 ** p1 * es6 ** q1 for (p, q, _, _) in not_kl_or_cusp: a = es4 ** p * es6 ** q - A a._construction = ple4 ** p * \ ple6 ** q - ple4 ** p1 * ple6 ** q1 res2.append(a) return res1 + res2 def save_basis_as_binary(self, filename): basis = self.basis() dicts = [b._to_format_dct() for b in basis] save(dicts, filename) def load_basis_from(self, filename): dicts = load(filename) prec = dicts[0]["prec"] if self.prec > PrecisionDeg2._from_dict_to_object(prec): msg = "self.prec must be less than {prec}".format(prec=prec) raise RuntimeError(msg) basis = [ModFormQexpLevel1._from_dict_to_object(dct) for dct in dicts] self.__basis_cached = True self.__cached_basis = basis def _is_linearly_indep_tuples(self, tuples): basis = self.basis() l = [[fm[(n, r, m)] for n, r, m in tuples] for fm in basis] return matrix(l).rank() == len(basis) def construction(self, f): return sum([a * b._construction for a, b in zip(self._to_vector(f), self.basis())]) def hecke_eigenvalue(self, f, a): ''' Assumes f is an eigenform and returns the eigenvalue w.r.t T(a). ''' ts = self.linearly_indep_tuples() for t in ts: if f[t] != 0: return f.hecke_operator(a, t) / f[t] class CuspFormsDegree2(AbstSpaceOfLevel1): ''' The space of cusp forms of degree 2. This class assumes that the characteristic polynomial of T(2) acting on KlingenEisensteinAndCuspForms has no double roots. ''' def __init__(self, wt, prec=False): self.__wt = wt if prec: self.__prec = PrecisionDeg2(prec) else: self.__prec = PrecisionDeg2(wt // 10 * 2) @property def wt(self): return self.__wt @property def prec(self): return self.__prec @cached_method def klingeneisensteinAndCuspForms(self): return KlingenEisensteinAndCuspForms(self.wt, self.prec) def dimension(self): return self.klingeneisensteinAndCuspForms().dimension_of_cuspforms() @cached_method def basis(self): ''' Returns a basis of this space. It assumes the characteristic polynomial of T(2) acting on KlingenEisensteinAndCuspForms has no double roots. ''' N = self.klingeneisensteinAndCuspForms() if self.wt % 2 == 1: return N.basis() return N.basis_of_subsp_annihilated_by(self.hecke_charpoly(2)) def hecke_charpoly(self, m, var="x", algorithm='linbox'): p, i = factor(m)[0] if not (ZZ(m).is_prime_power() and 0 < i < 3): raise RuntimeError("m must be a prime or the square of a prime.") if i == 1: return self._hecke_tp_charpoly(p, var=var, algorithm=algorithm) if i == 2: return self._hecke_tp2_charpoly(p, var=var, algorithm=algorithm) def _hecke_tp_charpoly(self, p, var='x', algorithm='linbox'): a = p ** (self.wt - 2) + 1 N = self.klingeneisensteinAndCuspForms() S = CuspForms(1, self.wt) m = S.dimension() R = PolynomialRing(QQ, names=var) x = R.gens()[0] f = R(S.hecke_matrix(p).charpoly(var=var, algorithm=algorithm)) f1 = f.subs({x: a ** (-1) * x}) * a ** m g = R(N.hecke_matrix(p).charpoly(var=var, algorithm=algorithm)) return R(g / f1) def _hecke_tp2_charpoly(self, p, var='x', algorithm='linbox'): u = p ** (self.wt - 2) N = self.klingeneisensteinAndCuspForms() S = CuspForms(1, self.wt) m = S.dimension() R = PolynomialRing(QQ, names=var) x = R.gens()[0] f = R(S.hecke_matrix(p).charpoly(var=var, algorithm=algorithm)) g = R(N.hecke_matrix(p ** 2).charpoly(var=var, algorithm=algorithm)) def morph(a, b, f, m): G = (-1) ** m * f.subs({x: -x}) * f alst = [[k // 2, v] for k, v in G.dict().iteritems()] F = sum([v * x ** k for k, v in alst]) return a ** m * F.subs({x: (x - b) / a}) f1 = morph(u ** 2 + u + 1, -p * u ** 3 - u ** 2 - p * u, f, m) return R(g / f1)
22,427
30.994294
83
py
degree2
degree2-master/diff_operator_pullback_vector_valued.py
# -*- coding: utf-8 -*- ''' cf. [Bö] S.Böcherer, Über die Fourier-Jacobi-Entwickling Siegelscher Eisensteinreihen II, Mathematische Zeichtschrift, 189 (1985), 81 - 110. [BSY] S. Böcherer, T. Satoh, T. Yamazaki, On the pullback of a differential operator and its application to vector valued Eisenstein series, Comment. Math. Univ. St. Pauli 42 (1992) 1 - 22. [DIK] N. Dummigan, T. Ibukiyama, H. Katsurada, some Siegel modular standard L-values, and Shafarevich-Tate groups. ''' from itertools import combinations from degree2.standard_l_scalar_valued import (first_elt_of_kern_of_vandermonde, tpl_to_half_int_mat) from sage.all import binomial as _binomial from sage.all import (QQ, ZZ, PolynomialRing, cached_function, factorial, identity_matrix, matrix, mul, vector, zeta) from .siegel_series.pullback_of_siegel_eisen import r_n_m_iter from .siegel_series.siegel_eisenstein import SiegelEisensteinSeries as sess def binomial(x, m): return ZZ(_binomial(x, m)) def _permutations_increasing(n, r): ''' A generator of permutations of n of length r which are non-decreasing. ''' return combinations(range(n), r) @cached_function def permutations_increasing(n, r): r'''index set of \wedge^r(V) ''' return list(_permutations_increasing(n, r)) def _concat(a, b): return tuple(sorted(list(a) + list(b))) def sub_mat(A, a, b): return matrix([[A[i, j] for i in a] for j in b]) def _sign(a1, a2): return mul(mul(-1 for b in a1 if b < a) for a in a2) @cached_function def _index_dct(n, r): return {a: i for i, a in enumerate(permutations_increasing(n, r))} def sqcap_mul(A, B, n, p, q): ''' Let A and B be square matrices of size binomial(n, p) and binomial(n, q). Return sqcap multiplication defined in [Bö] as a square matrix of size binomial(n, p + q). ''' # if p or q is zero, return immediately. if p == 0: return B elif q == 0: return A p_dct = _index_dct(n, p) q_dct = _index_dct(n, q) p_q_dct = _index_dct(n, p + q) p_q_lst = permutations_increasing(n, p + q) res = matrix([[A.base_ring()(0) for _ in p_q_lst] for _ in p_q_lst]) for ad in _permutations_increasing(n, p): for bd in _permutations_increasing(n, p): for add in _permutations_increasing(n, q): for bdd in _permutations_increasing(n, q): if all(a not in add for a in ad) and all(b not in bdd for b in bd): a = _concat(ad, add) b = _concat(bd, bdd) s = (_sign(ad, add) * _sign(bd, bdd) * A[p_dct[bd], p_dct[ad]] * B[q_dct[bdd], q_dct[add]]) res[p_q_dct[b], p_q_dct[a]] += s return binomial(p + q, p) ** (-1) * res def bracket_power(A, p): if p == 0: return identity_matrix(A.base_ring(), 1) l = permutations_increasing(A.ncols(), p) return matrix([[sub_mat(A, a, b).det() for b in l] for a in l]) def _ad_bracket_coeffs(A, a, b): N = range(A.ncols()) _na = tuple([x for x in N if x not in a]) _nb = tuple([x for x in N if x not in b]) return sub_mat(A, _na, _nb).det() * _sign(a, _na) * _sign(b, _nb) def ad_bracket(A, p): if p == A.ncols(): return identity_matrix(A.base_ring(), 1) l = permutations_increasing(A.ncols(), p) return matrix([[_ad_bracket_coeffs(A, b, a) for b in l] for a in l]) # matrix([[z11, z12], [z21, z22]]) is (2 pi i)Z_{2} in [Bö]. _Z_ring = PolynomialRing(QQ, names='z11, z12, z21, z22') # matrix([[dz11, dz12], [dz21, dz22]]) is 2 / (2 pi i)partial_2 in [Bö]. _dZ_ring = PolynomialRing(QQ, names='dz11, dz12, dz21, dz22') _Z_dZ_ring = PolynomialRing(_Z_ring, names='dz11, dz12, dz21, dz22') @cached_function def _z_u_ring_zgens(): return [_Z_U_ring(a) for a in _Z_ring.gens()] def _from_z_dz_ring_to_diff_op(pol): pol = _Z_dZ_ring(pol) d = {tuple(t): _Z_ring(v) for t, v in pol.dict().iteritems()} return DiffZOperatorElement(d) def _diff_z_exp(t, pol, r_ls, base_ring=None): '''Let a, b, c, d = t. Return (d/dz11)^a (d/dz12)^b (d/dz21)^c (d/dz22)^d (pol exp(2pi R^t Z)) * exp(- 2pi R^t Z). Here Z = matrix([[z11, z12], [z21, z22]]) and R = matrix(2, r_ls). ''' for z, r, a in zip((base_ring(_z) for _z in _Z_ring.gens()), r_ls, t): pol = base_ring(sum(binomial(a, i) * pol.derivative(z, i) * r ** (a - i) for i in range(a + 1))) return pol class DiffZOperatorElement(object): '''A polynomial of Z and d/dZ, where Z is a 2 by 2 matrix. ''' def __init__(self, pol_idcs): '''pol_idcs is a list of tuples (pol, idx) or a dict whose key is pol and the value is idx. Here pol is a polynomial, which is an element of _Z_ring. idx is a tuple of 4 integers (a, b, c, d). Each tuple corresponds pol (d/dz11)^a (d/dz12)^b (d/dz21)^c (d/dz22)^d. ''' if isinstance(pol_idcs, list): self._pol_idc_dct = {k: v for k, v in pol_idcs if v != 0} elif isinstance(pol_idcs, dict): self._pol_idc_dct = { k: v for k, v in pol_idcs.items() if v != 0} @property def pol_idc_dct(self): return self._pol_idc_dct def diff(self, pol, r_ls): '''pol is a polynomial in _Z_ring and R is a 2 by 2 marix. Return (the derivative of pol * exp(2pi R^t Z)) / exp(R^t Z) as a polynomial. R = matrix(2, r_ls) ''' try: pol = _Z_ring(pol) except TypeError: raise NotImplementedError return sum(v * _diff_z_exp(k, pol, r_ls, base_ring=_Z_ring) for k, v in self.pol_idc_dct.items()) Z = matrix(2, [_Z_dZ_ring(__a) for __a in _Z_ring.gens()]) dZ = matrix(2, [_Z_dZ_ring(__a) for __a in _dZ_ring.gens()]) def delta_r_q(q, A=None, D=None): ''' Return delta(r, q) in [DIK], pp. 1312. ''' n = 2 p = n - q res = sqcap_mul(bracket_power(A * Z, q) * bracket_power( D - ZZ(1) / ZZ(4) * dZ.transpose() * A ** (-1) * dZ, q), bracket_power(dZ, p), n, q, p) return res[0, 0] * ZZ(2) ** (-p) def _C(p, s): return mul(s + ZZ(i) / ZZ(2) for i in range(p)) def D_tilde(alpha, **kwds): ''' (2pi)**(-2) * D_tilde(alpha) in [DIK], pp 1312 as an instance of DiffZOperatorElement. ''' alpha = ZZ(alpha) res = sum(binomial(2, q) * _C(q, -alpha + 1) ** (-1) * delta_r_q(q, **kwds) for q in range(3)) return _from_z_dz_ring_to_diff_op(res) def D_tilde_nu(alpha, nu, pol, r_ls, **kwds): ''' (2pi)**(-2 nu) * D_tilde_{alpha}^nu(pol * exp(2pi R^t Z)) / exp(- 2pi R^t Z), where pol is pol polynomial of Z and R = matrix(2, r_ls). ''' for i in range(nu): pol = D_tilde(alpha + i, **kwds).diff(pol, r_ls) return pol # The repressentation space of Gl2 is homogenous polynomial of u1 and u2. _U_ring = PolynomialRing(QQ, names='u1, u2') _Z_U_ring = PolynomialRing(QQ, names='u1, u2, z11, z12, z21, z22') def _D_D_up_D_down(u1, u2, v1, v2, r_ls, pol): '''D - D_up - D_down ''' r11, r12, r21, r22 = [_diff_z_exp(t, pol, r_ls, base_ring=_Z_U_ring) for t in [(1, 0, 0, 0), (0, 1, 0, 0), (0, 0, 1, 0), (0, 0, 0, 1)]] return u1 * v1 * r11 + u1 * v2 * r12 + u2 * v1 * r21 + u2 * v2 * r22 def L_operator(k, m, _A, _D, r_ls, pol, us, d_up_down_mlt): ''' Return (k)_m * Fourier coefficient of L_tilde^{k, m}(pol exp(2pi block_matrix([[A, R/2], [R^t/2, D]])Z))/ exp(-2pi block_matrix([[A, R/2], [R^t/2, D]])Z). as an element of _Z_ring or _Z_U_ring. ''' if m == 0: return pol zero = _Z_U_ring(0) res = zero u1, u2, u3, u4 = us for n in range(m // 2 + 1): pol_tmp = _Z_U_ring(pol) for _ in range(m - 2 * n): pol_tmp = _D_D_up_D_down(u1, u2, u3, u4, r_ls, pol_tmp) for _ in range(n): pol_tmp *= d_up_down_mlt pol_tmp *= QQ(factorial(n) * factorial(m - 2 * n) * mul(2 - k - m + i for i in range(n))) ** (-1) res += pol_tmp return res def _zeta(s): return zeta(ZZ(s)) def fc_of_pullback_of_diff_eisen(l, k, m, A, D, u3, u4, verbose=False): '''Return the Fourier coefficient of exp(2pi A Z1 + 2pi D Z2) of pullback of vector valued Eisenstein series F_{l, (k, m)} in [DIK], pp 1313. ''' dct = {"A": A, "D": D} res = _U_ring(0) es = sess(weight=l, degree=4) us = list(_U_ring.gens()) + [u3, u4] # D_up is multiplication by d_up_mlt on p(z2)e(A Z1 + R^t Z12 + D Z2) v_up = vector(_U_ring, us[:2]) d_up_mlt = v_up * A * v_up v_down = vector(us[2:]) d_down_mlt = v_down * D * v_down d_up_down_mlt = d_up_mlt * d_down_mlt _u1, _u2 = (_Z_U_ring(a) for a in ["u1", "u2"]) for R, mat in r_n_m_iter(A, D): r_ls = R.list() pol = D_tilde_nu(l, k - l, QQ(1), r_ls, **dct) # L_operator is a differential operator whose order <= m, # we truncate it. pol = _Z_ring( {t: v for t, v in pol.dict().iteritems() if sum(list(t)) <= m}) _l_op_tmp = L_operator(k, m, A, D, r_ls, pol * es.fourier_coefficient(mat), us, d_up_down_mlt) _l_op_tmp = _U_ring({(m - a, a): _l_op_tmp[_u1 ** (m - a) * _u2 ** a] for a in range(m + 1)}) res += _l_op_tmp res = res * QQ(mul(k + i for i in range(m))) ** (-1) res = res * _zeta(1 - l) * _zeta(1 - 2 * l + 2) * _zeta(1 - 2 * l + 4) if verbose: print "Done computation of Fourier coefficient of pullback." return res def _pullback_vector(l, D, u3, u4, space_of_cuspforms, verbose=False): '''Return a vector corresponding to pullback of Eisenstein series. ''' k = space_of_cuspforms.wt j = space_of_cuspforms.sym_wt tpls = space_of_cuspforms.linearly_indep_tuples() u1, u2 = _U_ring.gens() if j > 0: pull_back_fc_dct = { t: fc_of_pullback_of_diff_eisen( l, k, j, tpl_to_half_int_mat(t), D, u3, u4, verbose=verbose) for t in set(t for t, i in tpls)} pull_back_dct = {(t, i): pull_back_fc_dct[t][u1 ** (j - i) * u2 ** i] for t, i in tpls} else: pull_back_dct = {t: fc_of_pullback_of_diff_eisen( l, k, j, tpl_to_half_int_mat(t), D, u3, u4, verbose=verbose) for t in tpls} pull_back_dct = {k: v.constant_coefficient() for k, v in pull_back_dct.iteritems()} return space_of_cuspforms._to_vector(pull_back_dct) def _u3_u4_gen(): s = 1 while True: for a in range(s + 1): yield (a, s - a) s += 1 def _u3_u4_nonzero(f, t0): ''' Return (u3, u4, f[t0](u3, u4)) such that f[t0](u3, u4) != 0. ''' if f.sym_wt > 0: f_t0_pol = f[t0]._to_pol() f_t0_pol_val = 0 for u3, u4 in _u3_u4_gen(): if f_t0_pol_val == 0: x, y = f_t0_pol.parent().gens() f_t0_pol_val = f_t0_pol.subs({x: u3, y: u4}) u3_val = u3 u4_val = u4 else: break else: f_t0_pol_val = f[t0] u3_val = u4_val = QQ(1) return (u3_val, u4_val, f_t0_pol_val) def algebraic_part_of_standard_l(f, l, space_of_cuspforms, verbose=False): r'''f: (vector valued) cuspidal eigenform of degree 2 of weight det^k Sym(j). l: positive even integer such that 2 le l < k - 2. space_of_cuspforms: space of cusp form that f belongs to. Return the algebriac part of the standard L of f at l cf. Katsurada, Takemori Congruence primes of the Kim-Ramakrishnan-Shahidi lift. Theorem 4.1. ''' k = f.wt j = f.sym_wt t0 = f._none_zero_tpl() D = tpl_to_half_int_mat(t0) if not (l % 2 == 0 and 2 <= l < k - 2): raise ValueError u3_val, u4_val, f_t0_pol_val = _u3_u4_nonzero(f, t0) pull_back_vec = _pullback_vector( l + ZZ(2), D, u3_val, u4_val, space_of_cuspforms, verbose=verbose) T2 = space_of_cuspforms.hecke_matrix(2) d = space_of_cuspforms.dimension() vecs = [(T2 ** i) * pull_back_vec for i in range(d)] ei = [sum(f[t0] * a for f, a in zip(space_of_cuspforms.basis(), v)) for v in vecs] if j > 0: ei = [a._to_pol() for a in ei] chply = T2.charpoly() nume = first_elt_of_kern_of_vandermonde(chply, f.hecke_eigenvalue(2), ei) denom = f[t0] * f_t0_pol_val if j > 0: denom = denom._to_pol() return f.base_ring(nume / denom)
12,790
32.484293
96
py
degree2
degree2-master/rankin_cohen_diff.py
# -*- coding: utf-8 -*- from sage.all import QQ, PolynomialRing, matrix, log, cached_function, fork from degree2.utils import mul, combination, group, pmap from degree2.elements import SymWtGenElt as SWGElt from degree2.elements import (QexpLevel1, QseriesTimesQminushalf, ModFormQexpLevel1) from degree2.elements import SymWtModFmElt as SWMFE from degree2.basic_operation import (common_prec, common_base_ring, _common_base_ring) from degree2.interpolate import det_deg2 def diff_opetator_4(f1, f2, f3, f4): l = [f1, f2, f3, f4] wt_s = [f.wt for f in l] prec_res = common_prec(l) base_ring = common_base_ring(l) m = [[a.wt * a for a in l], pmap(lambda a: a.differentiate_wrt_tau(), l), pmap(lambda a: a.differentiate_wrt_w(), l), pmap(lambda a: a.differentiate_wrt_z(), l)] res = det_deg2(m, wt=sum((f.wt for f in l)) + 1) res = ModFormQexpLevel1(sum(wt_s) + 3, res.fc_dct, prec_res, base_ring=base_ring) return res def rankin_cohen_triple_x5(_Q, _f, _prec, _i=2): ''' Deprecated. ''' raise DeprecationWarning("Use '_rankin_cohen_bracket_func'" " with x5__with_prec instead.") def rankin_cohen_pair_x5(_Q, _prec): ''' Deprecated. ''' raise DeprecationWarning("Use '_rankin_cohen_bracket_func'" " with x5__with_prec instead.") @cached_function def _inc_weight(Q): ''' Let D be the differential operator ass. to Q. Let f_1, .., f_t be vector valued modular forms of determinant weights k_1, ..., k_t. If the determinant weight of D(f_1, ..., f_t) is equal to k_1 + ... + k_t + k, this function returns k. ''' S = Q.parent() R = S.base_ring() u1, _ = S.gens() rs = R.gens() rdct = {} for r11, r12, _ in group(rs, 3): rdct[r11] = 4 * r11 rdct[r12] = 2 * r12 t = [t for t, v in Q.dict().iteritems() if v != 0][0] a = Q.map_coefficients(lambda f: f.subs(rdct))[t] / Q.subs({u1: 2 * u1})[t] return int(log(a) / log(2)) def _rankin_cohen_bracket_func(Q, rnames=None, unames=None): ''' Let rnames = "r00, r01, r02, ..., r(n-1)0, r(n-1)1, r(n-1)2" unames = "u1, u2" Let R0 = [[r00, r0], [r0, r02]], R1 = [[r10, r11], [r11, r12]], ... R(n-1) = [[r(n-1)0, r(n-1)], [r(n-1), r(n-1)2]] be the symmetric matrices. Q is a homogenous polynomial of u1 and u2 whose coefficient is a polynomial of R0, ..., R(n-1). This function returns a Rakin-Cohen type differential operator corresponding to Q. The operator is a function that takes a list of n forms. ''' if unames is None or rnames is None: S = Q.parent() unames = ", ".join(S.variable_names()) rnames = ", ".join(S.base_ring().variable_names()) R = PolynomialRing(QQ, names=rnames) S = PolynomialRing(R, names=unames) Q = S(Q) j = Q.degree() def monom_mul(tpl, v, flist): tpls = group(tpl, 3) l = zip(flist, tpls) return ((v * mul([QQ(2) ** (-t[1]) for _, t in l])) * mul((f._differential_operator_monomial(*t) for f, t in l))) def rankin_cohen(flist): res = [] for a in range(j, -1, -1): p_sum = QQ(0) for tpl, v in Q[(a, j - a)].dict().items(): p_sum += monom_mul(tpl, v, flist) res.append(p_sum) return res return rankin_cohen def _pair_gens_r_s(): rnames = "r11, r12, r22, s11, s12, s22" unames = "u1, u2" RS_ring = PolynomialRing(QQ, names=rnames) (r11, r12, r22, s11, s12, s22) = RS_ring.gens() (u1, u2) = PolynomialRing(RS_ring, names=unames).gens() r = r11 * u1 ** 2 + 2 * r12 * u1 * u2 + r22 * u2 ** 2 s = s11 * u1 ** 2 + 2 * s12 * u1 * u2 + s22 * u2 ** 2 return (RS_ring.gens(), (u1, u2), (r, s)) def _triple_gens(): rnames = "r11, r12, r22, s11, s12, s22, t11, t12, t22" unames = "u1, u2" R = PolynomialRing(QQ, names=rnames) S = PolynomialRing(R, names=unames) return (R.gens(), S.gens()) @fork def _rankin_cohen_gen(Q, flist): forms = _rankin_cohen_bracket_func(Q)(flist) prec = common_prec(forms) base_ring = common_base_ring(flist) a = _inc_weight(Q) return SWMFE(forms, sum([f.wt for f in flist]) + a, prec, base_ring) def rankin_cohen_pair_sym(j, f, g): ''' Assuming j: even, returns Rankin-Cohen bracket corresponding to Q_{k, l, j/2}(r, s). cf. Ibukiyama, Vector valued Siegel modular forms of symmetric tensor weight of small degrees, COMMENTARI MATHEMATICI UNIVERSITATIS SANCTI PAULI VOL 61, NO 1, 2012. Use rankin_cohen_pair_x5 if f or g is equal to x5. ''' Q = _rankin_cohen_pair_sym_pol(j, f.wt, g.wt) args = [f, g] return _rankin_cohen_gen(Q, args) def rankin_cohen_pair_det2_sym(j, f, g): ''' Returns a vector valued Siegel modular form of weight det^(f.wt + g.wt + 2) Sym(j). Use rankin_cohen_pair_x5 if f or g is equal to x5. ''' Q = _rankin_cohen_pair_det2_sym_pol(j, f.wt, g.wt) args = [f, g] return _rankin_cohen_gen(Q, args) def rankin_cohen_triple_det_sym2(f, g, h): Q = _rankin_cohen_triple_det_sym2_pol(f.wt, g.wt, h.wt) args = [f, g, h] return _rankin_cohen_gen(Q, args) def rankin_cohen_triple_det_sym4(f, g, h): Q = _rankin_cohen_triple_det_sym4_pol(f.wt, g.wt, h.wt) args = [f, g, h] return _rankin_cohen_gen(Q, args) def rankin_cohen_triple_det_sym8(f, g, h): Q = _rankin_cohen_triple_det_sym8_pol(f.wt, g.wt, h.wt) args = [f, g, h] return _rankin_cohen_gen(Q, args) def _rankin_cohen_pair_sym_pol(j, k, l): _, _, (r, s) = _pair_gens_r_s() m = j // 2 return sum([(-1) ** i * combination(m + l - 1, i) * combination(m + k - 1, m - i) * r ** i * s ** (m - i) for i in range(m + 1)]) def _rankin_cohen_pair_det2_sym_pol(j, k, l): (r11, r12, r22, s11, s12, s22), _, (r, s) = _pair_gens_r_s() m = j // 2 Q = sum([(-1) ** i * combination(m + l, i) * combination(m + k, m - i) * r ** i * s ** (m - i) for i in range(m + 1)]) Qx = sum([(-1) ** i * combination(m + l, i) * combination(m + k, m - i) * i * r ** (i - 1) * s ** (m - i) for i in range(1, m + 1)]) Qy = sum([(-1) ** i * combination(m + l, i) * combination(m + k, m - i) * (m - i) * r ** i * s ** (m - i - 1) for i in range(0, m)]) detR = r11 * r22 - r12 ** 2 detS = s11 * s22 - s12 ** 2 # det(R+S) detRpS = (-r12 ** 2 + r11 * r22 + r22 * s11 - QQ(2) * r12 * s12 - s12 ** 2 + r11 * s22 + s11 * s22) Q2 = ((2 * k - 1) * (2 * l - 1) * detRpS - (2 * k - 1) * (2 * k + 2 * l - 1) * detS - (2 * l - 1) * (2 * k + 2 * l - 1) * detR) Q = (QQ(4) ** (-1) * Q2 * Q + QQ(2) ** (-1) * ((2 * l - 1) * detR * s - (2 * k - 1) * detS * r) * (Qx - Qy)) return Q def _rankin_cohen_triple_det_sym2_pol(k1, k2, k3): (r11, r12, r22, s11, s12, s22, t11, t12, t22), (u1, u2) = _triple_gens() m0 = matrix([[r11, s11, t11], [2 * r12, 2 * s12, 2 * t12], [k1, k2, k3]]) m1 = matrix([[r11, s11, t11], [k1, k2, k3], [r22, s22, t22]]) m2 = matrix([[k1, k2, k3], [2 * r12, 2 * s12, 2 * t12], [r22, s22, t22]]) Q = m0.det() * u1 ** 2 - 2 * m1.det() * u1 * u2 + m2.det() * u2 ** 2 return Q def _rankin_cohen_triple_det_sym4_pol(k1, k2, k3): (r11, r12, r22, s11, s12, s22, t11, t12, t22), (u1, u2) = _triple_gens() m00 = matrix([[(k1 + 1) * r11, k2, k3], [r11 ** 2, s11, t11], [r11 * r12, s12, t12]]) m01 = matrix([[k1, (k2 + 1) * s11, k3], [r11, s11 ** 2, t11], [r12, s11 * s12, t12]]) m10 = matrix([[(k1 + 1) * r12, k2, k3], [r11 * r12, s11, t11], [r12 ** 2, s12, t12]]) m11 = matrix([[k1, (k2 + 1) * s12, k3], [r11, s11 * s12, t11], [r12, s12 ** 2, t12]]) m12 = matrix([[(k1 + 1) * r11, k2, k3], [r11 ** 2, s11, t11], [r11 * r22, s22, t22]]) m13 = matrix([[k1, (k2 + 1) * s11, k3], [r11, s11 ** 2, t11], [r22, s11 * s22, t22]]) m20 = matrix([[(k1 + 1) * r12, k2, k3], [r11 * r12, s11, t11], [r22 * r12, s22, t22]]) m21 = matrix([[k1, (k2 + 1) * s12, k3], [r11, s11 * s12, t11], [r22, s22 * s12, t22]]) m30 = matrix([[(k1 + 1) * r12, k2, k3], [r12 ** 2, s12, t12], [r12 * r22, s22, t22]]) m31 = matrix([[k1, (k2 + 1) * s12, k3], [r12, s12 ** 2, t12], [r22, s12 * s22, t22]]) m32 = matrix([[(k1 + 1) * r22, k2, k3], [r11 * r22, s11, t11], [r22 ** 2, s22, t22]]) m33 = matrix([[k1, (k2 + 1) * s22, k3], [r11, s11 * s22, t11], [r22, s22 ** 2, t22]]) m40 = matrix([[(k1 + 1) * r22, k2, k3], [r22 * r12, s12, t12], [r22 ** 2, s22, t22]]) m41 = matrix([[k1, (k2 + 1) * s22, k3], [r12, s22 * s12, t12], [r22, s22 ** 2, t22]]) Q0 = (k2 + 1) * m00.det() - (k1 + 1) * m01.det() Q1 = (2 * (k2 + 1) * m10.det() - 2 * (k1 + 1) * m11.det() + (k2 + 1) * m12.det() - (k1 + 1) * m13.det()) Q2 = 3 * (k2 + 1) * m20.det() - 3 * (k1 + 1) * m21.det() Q3 = (2 * (k2 + 1) * m30.det() - 2 * (k1 + 1) * m31.det() + (k2 + 1) * m32.det() - (k1 + 1) * m33.det()) Q4 = (k2 + 1) * m40.det() - (k1 + 1) * m41.det() Q = Q0 * u1 ** 4 + Q1 * u1 ** 3 * u2 + Q2 * u1 ** 2 * \ u2 ** 2 + Q3 * u1 * u2 ** 3 + Q4 * u2 ** 4 return Q def _rankin_cohen_triple_det_sym8_pol(k1, k2, k3): (r11, r12, r22, s11, s12, s22, t11, t12, t22), (u1, u2) = _triple_gens() def _mat_det(l): return matrix([[r11, s11, t11], [r12, s12, t12], l + [2 * k3]]).det() ls = [[2 * k1 + 6, 2 * k2], [2 * k1 + 4, 2 * k2 + 2], [2 * k1 + 2, 2 * k2 + 4], [2 * k1, 2 * k2 + 6]] coeffs = [(2 * k2 + 2) * (2 * k2 + 4) * (2 * k2 + 6) * r11 ** 3, -3 * (2 * k1 + 6) * (2 * k2 + 4) * (2 * k2 + 6) * r11 ** 2 * s11, 3 * (2 * k1 + 4) * (2 * k1 + 6) * (2 * k2 + 6) * r11 * s11 ** 2, -(2 * k1 + 2) * (2 * k1 + 4) * (2 * k1 + 6) * s11 ** 3] Q0 = sum([c * _mat_det(l) for c, l in zip(coeffs, ls)]) A = matrix([[1, u1], [0, 1]]) def bracketA(a, b, c): R = matrix([[a, b], [b, c]]) a1, b1, _, c1 = (A * R * A.transpose()).list() return (a1, b1, c1) def _subs_dct(rs): return {a: b for a, b in zip(rs, bracketA(*rs))} subs_dct = {} for rs in [[r11, r12, r22], [s11, s12, s22], [t11, t12, t22]]: subs_dct.update(_subs_dct(rs)) Q0_subs = Q0.subs(subs_dct) return sum([Q0_subs[(i, 0)] * u1 ** (8 - i) * u2 ** i for i in range(9)]) def _bracket_vec_val(vecs): if isinstance(vecs[0], SWGElt): v1, v2, v3 = [a.forms for a in vecs] else: v1, v2, v3 = vecs j = len(v1) - 1 def _names(s): return ", ".join([s + str(i) for i in range(j + 1)]) R = PolynomialRing(QQ, names=", ".join([_names(s) for s in ["x", "y", "z"]])) gens_x = R.gens()[: j + 1] gens_y = R.gens()[j + 1: 2 * (j + 1)] gens_z = R.gens()[2 * (j + 1):] S = PolynomialRing(R, names="u, v") u, v = S.gens() def _pol(gens): return sum([a * u ** (j - i) * v ** i for i, a in zip(range(j + 1), gens)]) f_x, f_y, f_z = [_pol(gens) for gens in [gens_x, gens_y, gens_z]] A = matrix([[f_x, f_y], [f_y, f_z]]) vec = matrix([u, v]).transpose() g = (vec.transpose() * A * vec)[0][0] pol_dc = {(i, j + 2 - i): g[(i, j + 2 - i)] for i in range(j + 3)} def pol_to_val(f): dct = {} def _dct(gens, v): return {a: b for a, b in zip(gens, v)} dct.update(_dct(gens_x, v1)) dct.update(_dct(gens_y, v2)) dct.update(_dct(gens_z, v3)) return f.subs(dct) res_dc = {k: pol_to_val(v) for k, v in pol_dc.iteritems()} return [res_dc[(j + 2 - i, i)] for i in range(j + 3)] def vector_valued_rankin_cohen(f, vec_val): ''' Rankin-Cohen type differential operator defined by van Dorp. Let f be a scalar valued Siegel modular form of weight det^k and vec_val be a vector valued Siegel modular form of weight det^l Sym(j). This function returns a vector valued Siegel modular form of weight det^(k + l + 1) Sym(j). ''' if not (isinstance(f, (QexpLevel1, QseriesTimesQminushalf)) and isinstance(vec_val, SWGElt)): raise TypeError("Arguments are invalid.") sym_wt = vec_val.sym_wt base_ring = _common_base_ring(f.base_ring, vec_val.base_ring) diff_tau = (f.differentiate_wrt_tau(), f.differentiate_wrt_z() * QQ(2) ** (-1), f.differentiate_wrt_w()) def diff_v(vec_val): forms = [i * f for f, i in zip(vec_val.forms[1:], range(1, vec_val.sym_wt + 1))] return SWGElt(forms, vec_val.prec, vec_val.base_ring) def diff_d(vec_val): return [diff_u(diff_u(vec_val)), diff_u(diff_v(vec_val)), diff_v(diff_v(vec_val))] def diff_u(vec_val): forms = [i * f for f, i in zip(vec_val.forms, reversed(range(1, vec_val.sym_wt + 1)))] return SWGElt(forms, vec_val.prec, vec_val.base_ring) crs_prd1 = _cross_prod(diff_tau, diff_d(vec_val)) forms1 = _bracket_vec_val(crs_prd1) prec = common_prec(forms1) res1 = (vec_val.wt + sym_wt // 2 - 1) * SWGElt(forms1, prec, base_ring=base_ring) forms2 = _bracket_vec_val(_cross_prod_diff(diff_d(vec_val))) res2 = f.wt * f * SWGElt(forms2, prec, base_ring=base_ring) res = SWMFE((res1 - res2).forms, f.wt + vec_val.wt + 1, prec, base_ring=base_ring) return res def _cross_prod_diff(vec_vals): f1, f2, f3 = vec_vals def differential_monom(vec_val, a, b, c): forms = [f._differential_operator_monomial(a, b, c) for f in vec_val.forms] return SWGElt(forms, vec_val.prec, vec_val.base_ring) def d1(f): return differential_monom(f, 1, 0, 0) def d2(f): return differential_monom(f, 0, 1, 0) * QQ(2) ** (-1) def d3(f): return differential_monom(f, 0, 0, 1) return [2 * (d1(f2) - d2(f1)), d1(f3) - d3(f1), 2 * (d2(f3) - d3(f2))] def _cross_prod(v1, v2): a, b, c = v1 ad, bd, cd = v2 return (2 * (a * bd - b * ad), a * cd - c * ad, 2 * (b * cd - c * bd)) def m_operator(k1, k2, k3): '''The operator M_k (cf. CH van Dorp Generators for a module of vector-valued Siegel modular forms). ''' gens_triple = _triple_gens() r11, r12, r22, s11, s12, s22, t11, t12, t22 = gens_triple[0] rs = (r11, r12, r22) ss = (s11, s12, s22) ts = (t11, t12, t22) u1, u2 = gens_triple[1] def bracket_op(rs): r1, r2, r3 = rs return r1 * u1 ** 2 + 2 * r2 * u1 * u2 + r3 * u2 ** 2 def x_op_val(f): r, s, t = f.parent().gens() return f.subs({r: bracket_op(rs), s: bracket_op(ss), t: bracket_op(ts)}) def m_op_val(f): r, s, t = f.parent().gens() x_val = x_op_val(f) xs = [k * x_val for k in [k3, k2, k1]] brxs = [bracket_op(a) * x_op_val(f.derivative(b)) for a, b in zip([ts, ss, rs], [t, s, r])] brcks = [bracket_op(_cross_prod(a, b)) for a, b in zip([rs, ts, ss], [ss, rs, ts])] return sum([a * (b + c) for a, b, c in zip(brcks, xs, brxs)]) return m_op_val def rankin_cohen_triple_det_sym(j, f, g, h): ''' Let f, g, h be scalar valued Siegel modular forms of weight k, l, m respectively. Then this returns a vector valued Siegel modular form of weight det^{k + l + m + 1}Sym(j). It uses vector_valued_rankin_cohen. ''' F = rankin_cohen_pair_sym(j, f, g) return vector_valued_rankin_cohen(h, F) def rankin_cohen_triple_det3_sym(j, f, g, h): ''' Let f, g, h be scalar valued Siegel modular forms of weight k, l, m respectively. Then this returns a vector valued Siegel modular form of weight det^{k + l + m + 3}Sym(j). It uses vector_valued_rankin_cohen. ''' F = rankin_cohen_pair_det2_sym(j, f, g) return vector_valued_rankin_cohen(h, F)
17,142
31.467803
89
py
degree2
degree2-master/basic_operation.py
# -*- coding: utf-8 -*- import multiprocessing from sage.all import Integer, ZZ, gcd, QQ, mod, floor, sqrt from sage.misc.cachefunc import cached_function from degree2.utils import (list_group_by, partition_weighted, _is_triple_of_integers, pmap) def _common_base_ring(r1, r2): if r1.has_coerce_map_from(r2): return r1 elif r2.has_coerce_map_from(r1): return r2 else: raise NotImplementedError def common_base_ring(forms): return reduce(_common_base_ring, [x.base_ring for x in forms]) def common_prec(forms): if all(f.prec.type == "diag_max" for f in forms): return PrecisionDeg2(min([f.prec.value for f in forms])) # else a_prec = forms[0].prec if all([a_prec == f.prec for f in forms[1:]]): return a_prec else: raise NotImplementedError class PrecisionDeg2(object): ''' An instance of this class is immutable and used for a dictionary's key. ''' def __init__(self, prec): if isinstance(prec, PrecisionDeg2): self.__prec = prec.__prec self.__type = prec.__type elif isinstance(prec, (int, Integer)): self.__prec = prec self.__type = "diag_max" elif isinstance(prec, (frozenset, set, tuple, list)) \ and all([_is_triple_of_integers(a) for a in prec]): self.__prec = frozenset(prec) self.__type = "tuples" else: raise TypeError("self must be an integer or " + "a collection of tuples of integers.") def _to_format_dct(self): return {"type": self.type, "prec": self.value} def __hash__(self): return self.value.__hash__() @classmethod def _from_dict_to_object(cls, data_dict): if isinstance(data_dict, (int, Integer)): return cls(data_dict) else: return cls(data_dict["prec"]) def __str__(self): if self.type == "diag_max": return "diag_max " + str(self.value) elif self.type == "tuples": return "tuples " + str(list(self.value)) def __repr__(self): return str(self) @property def prec(self): raise DeprecationWarning("Use 'value' instead.") @property def value(self): return self.__prec def _max_value(self): ''' Returns max([max(n, m) for n, r, m in self]). ''' if self.type == "tuples": return max([max(n, m) for n, _, m in self.value]) elif self.type == "diag_max": return self.value else: raise NotImplementedError @property def type(self): return self.__type def __iter__(self): if self.type == "diag_max": for t in semi_pos_def_matarices(self.value): yield t elif self.type == "tuples": res = set([]) for t in self.value: res.update(_spos_def_mats_lt(t)) for t in res: yield t def pos_defs(self): for t in self: n, r, m = t if 4 * n * m - r ** 2 != 0: yield t def group_by_reduced_forms(self): ''' In list(self), we define equivalent relation ~ by t1, t2 in list(self), rank(t1) == rank(t2) and if rank(t1) == 0, t1 ~ t2, if and only if t2 == (0, 0, 0) if rank(t1) == 1, t1 ~ t2, if and only if gcd(t1) == gcd(t2), if rank(t1) == 2, t1 ~ t2 if and only if reduced forms of t1 and t2 are equal. Then this function returns a dictionary such that rep => equiv_class where rep is an element of representatives of this equivalent class and equiv_class is a equivalent class that contains rep. ''' r0 = [] r1 = [] r2 = [] for t in self: n, r, m = t if t == (0, 0, 0): r0.append(t) elif 4 * n * m - r ** 2 == 0: r1.append(t) else: r2.append(t) res0 = {(0, 0, 0): set(r0)} res1 = {ls[0]: ls for k, ls in list_group_by(r1, lambda t: gcd([QQ(x) for x in t]))} res2 = {ls[0]: ls for k, ls in list_group_by(r2, lambda x: reduced_form_with_sign(x)[0])} res = {} for dct in [res0, res1, res2]: res.update(dct) return res def group_by_reduced_forms_with_sgn(self): ''' Returns a dictionary whose keys are representatives of equivalent class of list(self.pos_defs()). Its value at (n, r, m) is the list of ((n1, r1, m1), sgn) where (n1, r1, m1) is unimodular equivalent to (n, r, m) and sgn is 1 if reduced_form_with_sign((n, r, m))[0] is (n1, r1, m1) and reduced_form_with_sign((n, r, m))[1] == 1 otherwise -1. ''' pos_forms = [] for t in self.pos_defs(): rdf, sgn = reduced_form_with_sign(t) pos_forms.append((t, rdf, sgn)) grpd_by_rdf = list_group_by(pos_forms, lambda x: x[1]) res = {} for _, ls in grpd_by_rdf: a_tupl, _, a_sgn = ls[0] res[a_tupl] = [(t, _sgn * a_sgn) for t, __, _sgn in ls] return res def __eq__(self, other): if not isinstance(other, PrecisionDeg2): return False elif self.type == other.type and self.value == other.value: return True else: return set(self) == set(other) def __ne__(self, other): return not self == other def __ge__(self, other): ''' Returns True if and only if set(self) contains set(other). ''' if not isinstance(other, PrecisionDeg2): raise NotImplementedError elif self.type == other.type and self.type == "diag_max": return self.value >= other.value elif other.type == "tuples": return set(self).issuperset(set(other.value)) else: return set(self).issuperset(set(other)) def __le__(self, other): ''' Returns True if and only if set(self) is a subset of set(other). ''' if not isinstance(other, PrecisionDeg2): return NotImplementedError elif self.type == other.type and self.type == "diag_max": return self.value <= other.value elif self.type == "tuples": return set(self.value).issubset(set(other)) else: return set(self).issubset(set(other)) def __gt__(self, other): return self >= other and self != other def __lt__(self, other): return self <= other and self != other def _phi_operator_prec(self): ''' Used for calculating phi_operator. ''' if self.type == "diag_max": for t in range(self.value + 1): yield t elif self.type == "tuples": mx = max([t[0] for t in self.value]) for t in range(mx + 1): if (t, 0, 0) in self: yield t else: raise NotImplementedError class WithNumOfProcs(object): def __init__(self, n): self.n = n self.save = current_num_of_procs.num_of_procs def __enter__(self): current_num_of_procs.set_num_of_procs(self.n) def __exit__(self, err_type, value, traceback): current_num_of_procs.set_num_of_procs(self.save) def number_of_procs(n): return WithNumOfProcs(n) class CurrentNumOfProcs(object): def __init__(self): self._procs = multiprocessing.cpu_count() @property def num_of_procs(self): return self._procs def set_num_of_procs(self, num): self._procs = num current_num_of_procs = CurrentNumOfProcs() def reduced_form_with_sign(tpl): ''' Assuming the 2-by-2 matrix correspoding to tpl is positive definite, returns ((n, r, m), sgn) where (n, r, m) is unmimodular equivalent to tpl s.t. n <= m and 0 <= r <= n. sgn is the determinant of an element GL2(ZZ) that gives the unimodular equivalence. ''' n, r, m = [ZZ(x) for x in tpl] if 4 * n * m - r ** 2 == 0: raise RuntimeError("tpl must be definite.") sign = 1 while True: if n <= m and r >= 0 and r <= n: return ((n, r, m), sign) if n > m: sign *= -1 n, m = m, n rem = mod(r, 2 * n) if rem > n: u = r // (2 * n) + 1 else: u = r // (2 * n) m = n * u ** 2 - r * u + m r = r - 2 * n * u if r < 0: sign *= -1 r *= -1 def semi_pos_def_matarices(bd): ''' Generates tuples (n, r, m) such that 0 <= n, m, 4nm - r^2 and n <= bd and m <= bd. ''' for n in range(bd + 1): for m in range(bd + 1): a = 2 * bd yield (n, 0, m) for r in range(1, a + 1): if r ** 2 <= 4 * n * m: yield (n, r, m) yield (n, -r, m) def _spos_def_mats_lt(tpl): ''' Returns an iterator of tuples. ''' n, r, m = tpl for n1 in range(n + 1): for m1 in range(m + 1): a = 4 * (n - n1) * (m - m1) if r ** 2 <= a: yield (n1, 0, m1) sq = int(floor(2 * sqrt(n1 * m1))) for r1 in range(1, sq + 1): if (r - r1) ** 2 <= a: yield (n1, r1, m1) if (r + r1) ** 2 <= a: yield (n1, -r1, m1) def _key_of_tuples(prec, cuspidal=False, hol=False): if cuspidal and not hol: return list(PrecisionDeg2(prec).pos_defs()) elif hol and cuspidal: return prec.group_by_reduced_forms_with_sgn().keys() elif hol and not cuspidal: return prec.group_by_reduced_forms().keys() else: return list(PrecisionDeg2(prec)) @cached_function def _partition_add_fourier(prec, cuspidal=False, hol=False, num_of_procs=current_num_of_procs.num_of_procs): lst = _key_of_tuples(prec, cuspidal, hol) return partition_weighted(lst, num_of_procs) @cached_function def _partition_mul_fourier(prec, cuspidal=False, hol=False, num_of_procs=current_num_of_procs.num_of_procs): tpls = _key_of_tuples(prec, cuspidal, hol) def weight_fn(x): n, r, m = x return max(16.0 / 9.0 * (ZZ(n) * ZZ(m)) ** (1.5) - ZZ(n) * ZZ(m) * abs(r), 0) return partition_weighted(tpls, num_of_procs, weight_fn) def _dict_parallel(f, ls): if current_num_of_procs.num_of_procs == 1: return f(ls[0]) res = {} for d in pmap(f, ls): res.update(d) return res def _mul_fourier(mp1, mp2, prec, cuspidal=False, hol=False): ''' Returns the dictionary of the product of Fourier series correspoding to mp1 and mp2. ''' tupls_s = _partition_mul_fourier( prec, cuspidal=cuspidal, hol=hol, num_of_procs=current_num_of_procs.num_of_procs) def _mul_fourier1(tupls): return {(n, r, m): sum((mp1[(n0, r0, m0)] * mp2[(n - n0, r - r0, m - m0)] for n0, r0, m0 in _spos_def_mats_lt((n, r, m)))) for (n, r, m) in tupls} return _dict_parallel(_mul_fourier1, tupls_s) def _add_fourier(mp1, mp2, prec, cuspidal=False, hol=False): ts_s = _partition_add_fourier( prec, cuspidal=cuspidal, hol=hol, num_of_procs=current_num_of_procs.num_of_procs) def _add_fourier1(ts): return {t: mp1[t] + mp2[t] for t in ts} return _dict_parallel(_add_fourier1, ts_s) def _mul_fourier_by_num(fc_dct, a, prec, cuspidal=False, hol=False): tss = _partition_add_fourier( prec, cuspidal=cuspidal, hol=hol, num_of_procs=current_num_of_procs.num_of_procs) def _mul_fourier_by_num1(ts): return {t: a * fc_dct[t] for t in ts} return _dict_parallel(_mul_fourier_by_num1, tss)
12,176
28.555825
82
py
degree2
degree2-master/interpolate.py
# -*- coding: utf-8 -*- import multiprocessing from degree2.basic_operation import PrecisionDeg2 from degree2.elements import ModFormQexpLevel1, QexpLevel1 from degree2.utils import group, mul, pmap from sage.all import QQ, ZZ, PolynomialRing, floor, matrix, sqrt def _to_polynomial(f, val1): prec = f.prec.value R = PolynomialRing(QQ if f.base_ring == ZZ else f.base_ring, names="q1, q2") q1, q2 = R.gens() I = R.ideal([q1 ** (prec + 1), q2 ** (prec + 1)]) S = R.quotient_ring(I) res = sum([sum([f.fc_dct.get((n, r, m), 0) * QQ(val1) ** r for r in range(-int(floor(2 * sqrt(n * m))), int(floor(2 * sqrt(n * m))) + 1)]) * q1 ** n * q2 ** m for n in range(prec + 1) for m in range(prec + 1)]) return S(res) def det_deg2(mat, autom=True, wt=None, num_of_procs=multiprocessing.cpu_count()): ''' Returns det(mat) by interpolatation. Result is a Siegel modular form. ''' n = len(mat) bd = mat[0][0].prec.value forms_flatten = reduce(lambda x, y: x + y, mat) def func(l): return matrix(group(l, n)).det() if autom: return calc_forms(func, forms_flatten, bd, autom=True, wt=wt, num_of_procs=num_of_procs) else: return calc_forms(func, forms_flatten, bd, autom=False, num_of_procs=num_of_procs) def interpolate_deg2(dct, bd, autom=True, parity=None): '''parity is 0 if the parity of the weight and the character coincide else 1. ''' t_ring = PolynomialRing(QQ, names="t") t = t_ring.gens()[0] u_ring = PolynomialRing(QQ, names="u") u = u_ring.gens()[0] # lift the values of dct dct = {k: v.lift() for k, v in dct.items()} def interpolate_pol(x, d): prd = mul([x - a for a in d]) prd_dff = prd.derivative(x) return sum([v * prd_dff.subs({x: k}) ** (-1) * prd // (x - k) for k, v in d.items()]) def t_pol_dct(n, m): if not autom: dct_t = {a: v[(n, m)] * a ** (2 * bd) for a, v in dct.items()} return t_ring(interpolate_pol(t, dct_t)) # put u = t + t^(-1) elif parity == 0: dct_u = {a + a ** (-1): v[(n, m)] for a, v in dct.items()} u_pol = interpolate_pol(u, dct_u) return t_ring(t ** (2 * bd) * u_pol.subs({u: t + t ** (-1)})) else: dct_u = {a + a ** (-1): v[(n, m)] / (a - a ** (-1)) for a, v in dct.items()} u_pol = interpolate_pol(u, dct_u) return t_ring(t ** (2 * bd) * u_pol.subs({u: t + t ** (-1)}) * (t - t ** (-1))) fc_dct = {} for n in range(bd + 1): for m in range(bd + 1): pl = t_pol_dct(n, m) for r in range(-int(floor(2 * sqrt(n * m))), int(floor(2 * sqrt(n * m))) + 1): fc_dct[(n, r, m)] = pl[r + 2 * bd] return fc_dct def calc_forms(func, forms, prec, autom=True, wt=None, num_of_procs=multiprocessing.cpu_count()): ''' func is a function which takes forms as an argument. Calculate func(forms) by interpolation. ''' bd = prec.value if isinstance(prec, PrecisionDeg2) else prec parity = wt % 2 if autom else None if not autom: t_vals = [QQ(a) for a in range(-2 * bd, 0) + range(1, 2 * bd + 2)] elif parity == 0: t_vals = [QQ(a) for a in range(1, 2 * bd + 2)] else: t_vals = [QQ(a) for a in range(2, 2 * bd + 2)] def _f(r): return (r, func([_to_polynomial(f, r) for f in forms])) t_dct = dict(pmap(_f, t_vals, num_of_procs=num_of_procs)) fc_dct = interpolate_deg2(t_dct, bd, autom=autom, parity=parity) if not autom: return QexpLevel1(fc_dct, bd) else: return ModFormQexpLevel1(wt, fc_dct, bd)
3,922
34.026786
90
py
degree2
degree2-master/vector_valued_impl/utils.py
# -*- coding: utf-8; mode: sage -*- import os data_dir = os.path.join(os.getenv("HOME"), "data/vector_valued")
112
21.6
64
py
degree2
degree2-master/vector_valued_impl/__init__.py
0
0
0
py
degree2
degree2-master/vector_valued_impl/sym2/odd_structure.py
''' This module provides functions gen_consts and ignored_dct. cf. Ibukiyama, Vector valued Siegel modular forms of symmetric tensor weight of small degrees. ''' from degree2.const import ScalarModFormConst as SMFC from degree2.const import ConstVectValued def cvv(w1, w2, w3): return ConstVectValued(2, [SMFC([w]) for w in [w1, w2, w3]], inc=1, tp=None) def gen_consts(): return [cvv(*w) for w in [(4, 6, 10), (4, 6, 12), (4, 10, 12), (6, 10, 12)]] def ignored_dct(): return {cvv(6, 10, 12): [4]}
556
23.217391
76
py
degree2
degree2-master/vector_valued_impl/sym2/even_structure.py
'''This module provides functions gen_consts and ignored_dct. cf. Satoh, On vector valued Siegel modular forms of degree two. ''' from degree2.const import ScalarModFormConst as SMFC from degree2.const import ConstVectValued def cvv(w1, w2): return ConstVectValued(2, [SMFC([w1]), SMFC([w2])], inc=0, tp=None) def gen_consts(): return [cvv(w1, w2) for w1, w2 in [(4, 6), (4, 10), (4, 12), (6, 10), (6, 12), (10, 12)]] def ignored_dct(): return {cvv(6, 10): [4], cvv(6, 12): [4], cvv(10, 12): [4, 6]}
531
25.6
71
py
degree2
degree2-master/vector_valued_impl/sym2/__init__.py
0
0
0
py
degree2
degree2-master/vector_valued_impl/tests/test_eigenvalue.py
from sage.all import (ComplexField, NumberField, PolynomialRing, CuspForms, QQ, CartesianProduct, fork) import unittest from degree2.vector_valued_smfs import vector_valued_siegel_modular_forms as vvsmf from degree2.basic_operation import number_of_procs def _hecke_pol_klingen(k, j): '''k: even. F: Kligen-Eisenstein series of determinant weight k whose Hecke field is the rational filed. Return the Hecke polynomial of F at 2. ''' f = CuspForms(1, k + j).basis()[0] R = PolynomialRing(QQ, names="x") x = R.gens()[0] pl = QQ(1) - f[2] * x + QQ(2) ** (k + j - 1) * x ** 2 return pl * pl.subs({x: x * QQ(2) ** (k - 2)}) def _hecke_pol_krs_lift(): '''Return the Hecke polynomial of KRS lift of weight det^{13}Sym(10) at 2. ''' R = PolynomialRing(QQ, names="x") x = R.gens()[0] f = CuspForms(1, 12).basis()[0] a = f[2] b = QQ(2) ** 11 return ((1 - (a ** 3 - 3 * a * b) * x + b ** 3 * x ** 2) * (1 - a * b * x + b ** 3 * x ** 2)) class RamanujanConjandKlingen(unittest.TestCase): def assert_hecke_eigen_values(self, f, complex_prec=300): '''f is a Hecke eigenform. Assert for all embeddings from the Hecke field to the ComplexField, abs value of roots of Hecke polynomial is near to 1 if f is a cusp form. If f is not a cusp form and the base_ring is a rational field, test Hecke eigenvalues of Klingen-Eisenstein series. ''' charpoly = f.base_ring.polynomial() CC = ComplexField(prec=complex_prec) self.assertTrue(charpoly.is_irreducible(), "charpoly is not irreducible.") K = f.base_ring pl = f.euler_factor_of_standard_l(2) if K == QQ: embeddings = [lambda x: x] else: embeddings = K.complex_embeddings(prec=complex_prec) if f.phi_operator() == {}: print "Test the Ramanujan conjecture when k = %s, j = %s" % (f.wt, f.sym_wt) for phi in embeddings: pl_cc = pl.map_coefficients(phi) R = PolynomialRing(CC, names=("x",)) max_diff = max((a.abs() - CC(1)).abs() for a, _ in R(pl_cc).roots()) self.assertLess(max_diff, CC(2) ** (-complex_prec + 1)) elif f.base_ring.degree() == 1: print "Test Kligen Eisenstein series when k = %s, j = %s" % (f.wt, f.sym_wt) self.assertEqual(f.euler_factor_of_spinor_l(2), _hecke_pol_klingen(f.wt, f.sym_wt)) def test_ramanujan_conj_and_klingen(self): '''Test Ramanujan conjectures for eigenforms of determinant weights less than or equal to 29 and Hecke eigenvalues of Kligen-Eisenstein series. ''' prec = 10 @fork def _check(k, j): M = vvsmf(j, k, prec) if M.dimension() > 0: self.assertEqual(M.dimension(), len(M.basis())) _chply = M.hecke_charpoly(2) for cply, _ in _chply.factor(): K = NumberField(cply, names="a") a = K.gens()[0] f = M.eigenform_with_eigenvalue_t2(a) self.assert_hecke_eigen_values(f) with number_of_procs(1): for k, j in CartesianProduct(range(4, 30), [2, 4, 10]): _check(k, j) suite = unittest.TestLoader().loadTestsFromTestCase(RamanujanConjandKlingen) unittest.TextTestRunner(verbosity=2).run(suite)
3,563
38.6
88
py
degree2
degree2-master/vector_valued_impl/tests/__init__.py
0
0
0
py
degree2
degree2-master/vector_valued_impl/sym4/odd_structure.py
''' This module provides functions gen_consts and ignored_dct. cf. Ibukiyama, Vector valued Siegel modular forms of symmetric tensor weight of small degrees. ''' from degree2.const import ScalarModFormConst as SMFC from degree2.const import ConstVectValued def cvv(w1, w2, w3): return ConstVectValued(4, [SMFC([w]) for w in [w1, w2, w3]], inc=1, tp=None) def gen_consts(): return [cvv(*w) for w in [(4, 4, 6), (4, 6, 6), (4, 4, 10), (4, 4, 12), (4, 6, 12)]] def ignored_dct(): return {}
545
22.73913
76
py
degree2
degree2-master/vector_valued_impl/sym4/even_structure.py
''' This module provides functions gen_consts and ignored_dct. cf. Ibukiyama, Vector valued Siegel modular forms of symmetric tensor weight of small degrees. ''' from degree2.const import ScalarModFormConst as SMFC from degree2.const import ConstVectValued def cvv(w1, w2, inc=0): return ConstVectValued(4, [SMFC([w]) for w in [w1, w2]], inc=inc, tp=None) def gen_consts(): return [cvv(4, 4), cvv(4, 6), cvv(4, 6, 2), cvv(4, 10), cvv(6, 10)] def ignored_dct(): return {}
517
22.545455
76
py
degree2
degree2-master/vector_valued_impl/sym4/__init__.py
0
0
0
py
degree2
degree2-master/vector_valued_impl/sym10/relation.py
''' This module provides a function 'relation' that returns a linear relation as a dictionary among generators. ''' from sage.all import QQ, gcd from degree2.vector_valued_smfs import vector_valued_siegel_modular_forms as vvsmf def relation(wt, data_directory=None): '''For a given weight wt, this funciton returns a dict whose set of keys is equal to a set of instances of ConstMul with weight wt. Its value is a rational number. This dictionary represents a releation among keys. ''' wts = (24, 26, 27, 29) if wt not in wts: raise ValueError("The weight must be in %s" % (wts,)) prec = 6 M = vvsmf(10, wt, prec, data_directory=data_directory) mul_consts = M._basis_const_base([]) basis_consts = list(M._basis_const()) another_const = [c for c in mul_consts if c not in basis_consts][0] f = another_const.calc_form_from_dependencies_depth_1( prec, M._calculator.forms_dict(prec)) coeffs = list(M._to_vector(f)) + [QQ(-1)] _gcd = gcd(coeffs) coeffs = [a / _gcd for a in coeffs] return {c: a for a, c in zip(coeffs, basis_consts + [another_const])}
1,135
36.866667
82
py
degree2
degree2-master/vector_valued_impl/sym10/odd_structure.py
# -*- coding: utf-8 -*- '''This module provides functions gen_consts and ignored_dct.''' from degree2.const import ScalarModFormConst as SMFC from degree2.const import (CalculatorVectValued, ConstDivision, ConstMul, ConstVectValued) from sage.all import QQ, PolynomialRing from degree2.vector_valued_impl.utils import data_dir # from vector_valued_const.const import ConstVectValuedHeckeOp as CVH def sym10_odd_hilbert_series_low_prec(): R = PolynomialRing(QQ, names="t") t = R.gens()[0] return (t ** 9 + t ** 11 + 2 * t ** 13 + 5 * t ** 15 + 6 * t ** 17 + 9 * t ** 19 + 13 * t ** 21 + 16 * t ** 23 + 21 * t ** 25 + 28 * t ** 27) def sym10_odd_hilbert_series_num(): R = PolynomialRing(QQ, names="t") t = R.gens()[0] return (-t ** 29 - t ** 27 + t ** 23 + t ** 21 + 2 * t ** 19 + 3 * t ** 17 + 3 * t ** 15 + t ** 13 + t ** 11 + t ** 9) def cvv(cs, inc, tp=None): return ConstVectValued(10, cs, inc, tp=tp) # rank 5 sym10_wt17_consts = [cvv([SMFC([4]), SMFC([6]), SMFC([6])], 1), cvv([SMFC([4]), SMFC([4, 4]), SMFC([4])], 1), cvv([SMFC([5]), SMFC([6]), SMFC([5])], 1), cvv([SMFC([4]), SMFC([5]), SMFC([5])], 3), cvv([SMFC([4]), SMFC([6]), SMFC([4])], 3)] sym10_19_consts = [cvv([SMFC([4]), SMFC([5]), SMFC([4, 5])], 1), cvv([SMFC([4]), SMFC([6]), SMFC([4, 4])], 1), cvv([SMFC([4]), SMFC([4, 4]), SMFC([6])], 1), cvv([SMFC([4]), SMFC([4, 5]), SMFC([5])], 1), cvv([SMFC([4]), SMFC([4, 6]), SMFC([4])], 1), cvv([SMFC([5]), SMFC([4, 4]), SMFC([5])], 1), cvv([SMFC([5]), SMFC([4, 5]), SMFC([4])], 1), cvv([SMFC([6]), SMFC([4, 4]), SMFC([4])], 1)] def _sym10_wt9_const(): v = [-QQ(1018769) / QQ(702364979796480), QQ(19951781) / QQ(4182551037842817024000), -QQ(1562093) / QQ(1021566751446996615168), QQ(140639) / QQ(155067592942080), -QQ(12235063) / QQ(7946846971901352345600), -QQ(7200827) / QQ(6494946488997120), -QQ(6469999) / QQ(1053547469694720), -QQ(5521031) / QQ(327807437590930784256000)] return ConstDivision(sym10_19_consts, v, SMFC([10]), 1) def _sym10_wt19_mul_const(): c = _sym10_wt9_const() return ConstMul(c, SMFC([4, 6])) def _sym10_wt11_const(): v = [-QQ(2961689) / QQ(67743535860), QQ(29712421) / QQ(67234938236928000), -QQ(56816381) / QQ(246326859434557440), QQ(262364) / QQ(3739091265), -QQ(64486531) / QQ(383239147950489600), -QQ(151658893) / QQ(1252883196180), -QQ(54108317) / QQ(101615303790), -QQ(365201621) / QQ(11856461139718272000), -QQ(1)] consts = sym10_19_consts + [_sym10_wt19_mul_const()] return ConstDivision(consts, v, SMFC([4, 4]), 0) def _sym10_wt13_const(): '''Returns a construction for the Kim-Ramakrishnan-Shahidi lift of the Ramanujan's delta.''' v = [-QQ(197629) / QQ(592509060), -QQ(5285311) / QQ(5292546158592000), QQ(1020103) / QQ(5027078763970560), -QQ(50701) / QQ(228923955), QQ(494651) / QQ(2011167540264960), QQ(876421) / QQ(4512184380), QQ(2852711) / QQ(2666290770), -QQ(10129591) / QQ(622204957769472000), -QQ(1)] consts = sym10_19_consts + [_sym10_wt19_mul_const()] return ConstDivision(consts, v, SMFC([6]), 0) def _sym10_wt15_consts(): vs = [[QQ(3567) / QQ(182), QQ(0), QQ(0), QQ(0), -QQ(91) / QQ(94556160), QQ(195) / QQ(68), -QQ(131) / QQ(12), -QQ(169) / QQ(59570380800), QQ(0)], [QQ(0), QQ(1) / QQ(19508428800), QQ(0), QQ(0), QQ(0), QQ(0), QQ(0), QQ(0), QQ(0)], [QQ(0), QQ(0), QQ(0), QQ(7134) / QQ(1105), -QQ(23749) / QQ(2316625920), QQ(1079) / QQ(1020), -QQ(50573) / QQ(1092), QQ(2489467) / QQ(416992665600), QQ(0)]] consts = sym10_19_consts + [_sym10_wt19_mul_const()] return [ConstDivision(consts, v, SMFC([4]), 0) for v in vs] def _sym10_wt15_mul_const1(): c = _sym10_wt9_const() return ConstMul(c, SMFC([6])) def _sym10_wt15_mul_const2(): c = _sym10_wt11_const() return ConstMul(c, SMFC([4])) # def sym10wt9(prec): # const = _sym10_wt9_const() # return const.calc_form(prec) def _sym10_wt17_consts(): return sym10_wt17_consts[:3] def _sym10_wt19_consts(): return [sym10_19_consts[i] for i in [0, 2]] sym10_wt21_consts = [cvv([SMFC([4]), SMFC([4, 4]), SMFC([4, 4])], 1), cvv([SMFC([4]), SMFC([6]), SMFC([4, 6])], 1), cvv([SMFC([4]), SMFC([6]), SMFC([10])], 1)] def _sym10_wt21_const(): return sym10_wt21_consts[2] def _sym10_wt23_const(): return cvv([SMFC([4]), SMFC([6]), SMFC([12])], 1) def odd_consts(): '''A list of constructions needed for calculation of generators. ''' res = [] res.extend(sym10_19_consts) res1 = [_sym10_wt9_const(), _sym10_wt19_mul_const(), _sym10_wt11_const(), _sym10_wt13_const()] res.extend(res1) res.extend(_sym10_wt15_consts()) res.extend(_sym10_wt17_consts()) res.append(_sym10_wt21_const()) res.append(_sym10_wt23_const()) return res def gen_consts(): '''A list of constructions of generators of M_{Sym(10)}^{odd}. ''' res = [_sym10_wt9_const(), _sym10_wt11_const(), _sym10_wt13_const()] res.extend(_sym10_wt15_consts()) res.extend(_sym10_wt17_consts()) res.extend(_sym10_wt19_consts()) res.append(_sym10_wt21_const()) res.append(_sym10_wt23_const()) return res def ignored_dct(): consts = gen_consts() consts = [c for c in consts if c.weight() in [21, 23]] return {c: [6] for c in consts} calculator = CalculatorVectValued(gen_consts(), data_dir) # calculator.calc_forms_and_save(5, verbose=True) # calculator23 = CalculatorVectValued(sym10_wt23_consts, data_dir)
6,057
31.223404
86
py
degree2
degree2-master/vector_valued_impl/sym10/even_structure.py
# -*- coding: utf-8; mode: sage -*- '''This modules provides functions gen_consts and ignored_dct. ''' from sage.all import Integer, matrix import os from degree2.const import ScalarModFormConst as SMFC from degree2.const import (CalculatorVectValued, ConstDivision, ConstMul, ConstVectValued) from degree2.utils import find_linearly_indep_indices from degree2.all import ModularFormsDegree2 from degree2.basic_operation import PrecisionDeg2 from degree2.interpolate import det_deg2 from degree2.scalar_valued_smfs import (eisenstein_series_degree2, ModFormQexpLevel1, x35_with_prec) from degree2.vector_valued_impl.utils import data_dir # from degree2.const import ConstVectValuedHeckeOp as CVH def cvv(cs, inc=0, tp=None): return ConstVectValued(10, cs, inc, tp=tp) def rank_of_forms(forms, prec=5): ts = [(t, i) for t in PrecisionDeg2(5) for i in range(11)] m = matrix([[f[t] for t in ts] for f in forms]) r = m.rank() return (r, find_linearly_indep_indices(list(m), r)) _wt12_consts = [cvv([SMFC([4]), SMFC([4, 4])]), cvv([SMFC([4]), SMFC([6])], inc=2)] # wt14_consts = _wt14_consts + [_wt14_const_mul()] # _wt16_consts = [ # cvv([SMFC([4]), SMFC([4, 4, 4])]), # cvv([SMFC([4]), SMFC([6, 6])]), # cvv([SMFC([4]), SMFC([12])]), # cvv([SMFC([5]), SMFC([5, 6])]), # cvv([SMFC([6]), SMFC([10])]), # cvv([SMFC([6]), SMFC([4, 6])]), # cvv([SMFC([4]), SMFC([4, 6])], inc=2), # cvv([SMFC([4]), SMFC([10])], inc=2), # cvv([SMFC([5]), SMFC([4, 5])], inc=2), # cvv([SMFC([6]), SMFC([4, 4])], inc=2)] # def _wt16_mul_consts(): # consts4 = [ConstMul(c, es4_with_prec) for c in wt12_consts] # consts6 = [ConstMul(c, es6_with_prec) for c in wt10_consts] # return consts4 + consts6 # wt16_consts = _wt16_consts + _wt16_mul_consts() _wt18_consts = [cvv([SMFC([4]), SMFC([4, 10])]), cvv([SMFC([4]), SMFC([4, 4, 6])]), cvv([SMFC([5]), SMFC([5, 4, 4])]), cvv([SMFC([6]), SMFC([4, 4, 4])]), cvv([SMFC([6]), SMFC([6, 6])]), cvv([SMFC([6]), SMFC([12])]), cvv([SMFC([4, 4]), SMFC([10])]), cvv([SMFC([4, 4]), SMFC([4, 6])]), cvv([SMFC([4]), SMFC([4, 4, 4])], inc=2), cvv([SMFC([4]), SMFC([6, 6])], inc=2), cvv([SMFC([4]), SMFC([12])], inc=2), cvv([SMFC([5]), SMFC([5, 6])], inc=2)] def _wt18_mul_basis(prec): d = calculator.forms_dict(prec) res = [] alst = [([_wt6_const()], 12), ([_wt8_const()], 10), (_wt10_consts, 8), (_wt12_consts, 6), (_wt14_consts, 4)] for consts, wt in alst: res.extend([d[c] * f for c in consts for f in ModularFormsDegree2(wt, prec).basis()]) res.extend([d[c] for c in _wt18_consts]) return res def _wt6_const(): coeffs = [Integer(756104669) / Integer(1684996983384000), Integer(60317) / Integer(22505698838937600), -Integer(77703239) / Integer(19230943832100), Integer(1117936087) / Integer(165889506141809049600), -Integer(11163571) / Integer(2111320987259387904), Integer(1522464953) / Integer(800007263415360), Integer(17042897) / Integer(36630369204000), Integer(3559) / Integer(16879274129203200), Integer(121558417) / Integer(567776583521072640000), -Integer(185407) / Integer(806407321522682880), Integer(270817) / Integer(12777893790662), -Integer(159424) / Integer(192309438321)] return ConstDivision(_wt18_consts, coeffs, SMFC([12]), 1) def _wt8_const(): coeffs = [-Integer(4730030099) / Integer(2808328305640000), -Integer(20243401) / Integer(1519134671628288000), Integer(14794567) / Integer(4578796150500), -Integer(3131451079) / Integer(207361882677261312000), Integer(7172507) / Integer(6333962961778163712), -Integer(108119239) / Integer(38095583972160), Integer(9381479) / Integer(11446990376250), -Integer(8257981) / Integer(1519134671628288000), -Integer(5603832011) / Integer(9936090211618771200000), Integer(251287) / Integer(4121637421115934720), -Integer(79559141) / Integer(255557875813240), -Integer(886304) / Integer(1236274960635)] return ConstDivision(_wt18_consts, coeffs, SMFC([10]), 1) def _wt18_mul_const(): return ConstMul(_wt8_const(), SMFC([4, 6])) # wt18_consts = _wt18_consts + [CVH(_wt18_consts[0], 2)] wt18_consts = _wt18_consts + [_wt18_mul_const()] def _wt10_klingen_const(): '''Return a construction for Klingen Eisenstein series of weight 10. We normalize it so that the gcd of Fourier coefficients is equal to 1. ''' coeffs = [-Integer(83651648095008) / Integer(72529140125), -Integer(25513201561) / Integer(2664026851200), -Integer(18582719702112) / Integer(6937569925), -Integer(232943887417) / Integer(51948523598400), Integer(1315408685) / Integer(3966978165696), -Integer(5395358805732) / Integer(3607536361), Integer(14273508725532) / Integer(381566345875), -Integer(25069455163) / Integer(14652147681600), -Integer(516775179623) / Integer(2489200089090000), Integer(72724077) / Integer(4646506832968), -Integer(16790605258464) / Integer(82973336303), -Integer(183949336576) / Integer(277502797), -Integer(5675) / Integer(2)] return ConstDivision(wt18_consts, coeffs, SMFC([4, 4]), 0) _wt10_consts_diff = [cvv([SMFC([4]), SMFC([6])])] _wt10_consts = _wt10_consts_diff + [_wt10_klingen_const()] def _wt10_mul_const(): return ConstMul(_wt6_const(), SMFC([4])) def _wt12_mul_const_f6(): return ConstMul(_wt6_const(), SMFC([6])) def _wt12_mul_const_f8(): return ConstMul(_wt8_const(), SMFC([4])) _wt20_consts = [ cvv([SMFC([4]), SMFC([4, 12])]), # cvv([SMFC([4]), SMFC([4, 4, 4, 4])]), # cvv([SMFC([4]), SMFC([4, 6, 6])]), # cvv([SMFC([4]), SMFC([6, 10])]) ] def _wt20_mul_basis(prec): d = calculator.forms_dict(prec) res = [] alst = [([_wt6_const()], 14), ([_wt8_const()], 12), (_wt10_consts, 10), (_wt12_consts, 8), (_wt14_consts, 6), (_wt16_consts, 4)] for consts, wt in alst: res.extend([d[c] * f for c in consts for f in ModularFormsDegree2(wt, prec).basis()]) res.extend([d[c] for c in _wt20_consts]) return res _wt16_consts = [ cvv([SMFC([4]), SMFC([6, 6])]), cvv([SMFC([4]), SMFC([12])]), # cvv([SMFC([4]), SMFC([4, 4, 4])]), # cvv([SMFC([5]), SMFC([5, 6])]), # cvv([SMFC([6]), SMFC([10])]), # cvv([SMFC([6]), SMFC([4, 6])]), # cvv([SMFC([4]), SMFC([4, 6])], inc=2), # cvv([SMFC([4]), SMFC([10])], inc=2), # cvv([SMFC([5]), SMFC([4, 5])], inc=2), # cvv([SMFC([6]), SMFC([4, 4])], inc=2) ] def _wt16_basis(prec): d = calculator.forms_dict(prec) res = [] alst = [([_wt6_const()], 10), ([_wt8_const()], 8), (_wt10_consts, 6), (_wt12_consts, 4)] for consts, wt in alst: res.extend([d[c] * f for c in consts for f in ModularFormsDegree2(wt, prec).basis()]) res.extend([d[c] for c in _wt16_consts]) return res _wt14_consts = [cvv([SMFC([4]), SMFC([4, 6])]), cvv([SMFC([4]), SMFC([10])]), cvv([SMFC([5]), SMFC([4, 5])])] def _wt14_consts_mul(): res = [ConstMul(_wt6_const(), SMFC([4, 4])), ConstMul(_wt8_const(), SMFC([6]))] res.extend([ConstMul(c, SMFC([4])) for c in _wt10_consts]) return res wt10_consts = _wt10_consts + [_wt10_mul_const()] wt12_consts = _wt12_consts + [_wt12_mul_const_f6(), _wt12_mul_const_f8()] wt14_consts = _wt14_consts + _wt14_consts_mul() def gen_consts(): return ([_wt6_const(), _wt8_const()] + _wt10_consts + _wt12_consts + _wt14_consts + _wt16_consts + [_wt18_consts[0]] + [_wt20_consts[0]]) def ignored_dct(): '''Return value will be passed to GivenWtBase._basis_const_base. ''' consts = gen_consts() consts = [c for c in consts if c.weight() in [18, 20]] return {c: [6] for c in consts} def even_consts(): '''A list of constructions needed for calculation of generators. ''' res = [] res.extend(_wt10_consts) res.extend(_wt12_consts) res.extend(_wt14_consts) res.extend(_wt16_consts) res.extend(wt18_consts) res.extend(_wt20_consts) res.append(_wt6_const()) res.append(_wt8_const()) return res calculator = CalculatorVectValued(gen_consts(), data_dir) def det_of_gens(prec): d = calculator.forms_dict(prec) cs = gen_consts()[:11] wt = sum([c.weight() for c in cs]) + (10 * 11) // 2 mat = [d[c].forms for c in cs] f = det_deg2(mat, wt=wt) f.save_as_binary(os.path.join(data_dir, "gens_even_det.sobj")) # print time.ctime() # det_of_gens(18) # print time.ctime() def test_det_is_divisible_x35_fifth(): prec = 18 x35 = x35_with_prec(prec) f187 = ModFormQexpLevel1.load_from(os.path.join(data_dir, "gens_even_det.sobj")) es4 = eisenstein_series_degree2(4, prec) es6 = eisenstein_series_degree2(6, prec) g = (x35 ** 5) * (es4 ** 3 - es6 ** 2) assert f187 * g[(16, 5, 10)] == g * f187[(16, 5, 10)] # test_det_is_divisible_x35_fifth() # No Error!
9,857
33.711268
74
py
degree2
degree2-master/vector_valued_impl/sym10/__init__.py
0
0
0
py
degree2
degree2-master/vector_valued_impl/sym10/tests/test_relation.py
'''Test relations among generators. ''' import unittest from degree2.vector_valued_impl.sym10.module_of_given_wt import relation, sym10_space import os from degree2.const import CalculatorVectValued from degree2.vector_valued_impl.sym10.even_structure import gen_consts as even_gen_consts from degree2.vector_valued_impl.sym10.odd_structure import gen_consts as odd_gen_consts data_dir = os.path.expanduser("~/data/vector_valued_sym10/test/") calculator = CalculatorVectValued(even_gen_consts() + odd_gen_consts(), data_dir) class TestRelation(unittest.TestCase): def test_relation(self): '''Test relations of weight 24, 26, 27 and 29. ''' prec = 6 forms_dict = calculator.forms_dict(prec) for wt in [24, 26, 27, 29]: print "Checking when k = %s" % (wt,) M = sym10_space(wt, prec, data_directory=data_dir) rel = relation(wt, data_directory=data_dir) self.assertEqual(len(rel), M.dimension() + 1) self.assertTrue(all(c.weight() == wt for c in rel)) self.assertTrue(sum(c.calc_form_from_dependencies_depth_1( prec, forms_dict) * a for c, a in rel.items()), 0) suite = unittest.TestLoader().loadTestsFromTestCase(TestRelation) unittest.TextTestRunner(verbosity=2).run(suite)
1,342
40.96875
89
py
degree2
degree2-master/vector_valued_impl/sym10/tests/test_division.py
''' A module for testing generators of smaller weights. ''' import unittest from degree2.const import CalculatorVectValued from degree2.vector_valued_impl.sym10.even_structure import _wt18_consts from degree2.const import ConstVectValuedHeckeOp as CVH from degree2.const import ConstDivision, ScalarModFormConst from degree2.vector_valued_smfs import VectorValuedSiegelModularForms from degree2.tsushima_dimension_formula import hilbert_series_maybe from degree2.vector_valued_impl.sym10.odd_structure import sym10_19_consts from sage.all import (cached_function, PolynomialRing, QQ, cached_method, CuspForms, matrix) from degree2.vector_valued_impl.sym10.even_structure import gen_consts as even_gen_consts from degree2.vector_valued_impl.sym10.odd_structure import gen_consts as odd_gen_consts from degree2.basic_operation import PrecisionDeg2 from degree2.scalar_valued_smfs import tuples_even_wt_modular_forms import os data_dir = os.path.expanduser("~/data/vector_valued_sym10/test/") gens_consts = even_gen_consts() + odd_gen_consts() class Sym10GivenWtHeckeConstBase(VectorValuedSiegelModularForms): '''A parent class of Sym10Wt18HeckeConst and Sym10Wt19HeckeConst. ''' def __init__(self, prec, wt, basis_consts): self._basis_consts = basis_consts self._calculator = CalculatorVectValued(self._basis_consts, data_dir) super(Sym10GivenWtHeckeConstBase, self).__init__(wt, 10, prec) @cached_method def basis(self): d = self._calculator.forms_dict(self.prec) return [d[c] for c in self._basis_consts] def dimension(self): return hilbert_series_maybe(10)[self.wt] class Sym10GivenWtBase(VectorValuedSiegelModularForms): '''A prarent class for a subspace of M_{det^k Sym(10)} with given basis. ''' def __init__(self, prec, wt, bss): self._bss = bss super(Sym10GivenWtBase, self).__init__(wt, 10, prec) def basis(self): return self._bss def dimension(self): return len(self._bss) class Sym10Wt18HeckeConst(Sym10GivenWtHeckeConstBase): '''A class for the module of weight det^18 Sym(10). CVH is used for constructing a basis.''' def __init__(self, prec): super(Sym10Wt18HeckeConst, self).__init__( prec, 18, _wt18_consts + [CVH(_wt18_consts[0], 2)]) class Sym10Wt19HeckeConst(Sym10GivenWtHeckeConstBase): '''A class for the module of weight det^19 Sym(10). CVH is used for constructing a basis.''' def __init__(self, prec): super(Sym10Wt19HeckeConst, self).__init__( prec, 19, sym10_19_consts + [CVH(sym10_19_consts[0], 2)]) @cached_function def _hecke_const_sp(prec, wt): '''Returns an instance of Sym10Wt18HeckeConst or Sym10Wt19HeckeConst. The result is cached. ''' consts = {18: Sym10Wt18HeckeConst, 19: Sym10Wt19HeckeConst} if wt in consts: return consts[wt](prec) else: raise ValueError class Sym10DivBase(VectorValuedSiegelModularForms): ''' A class for f^(-1) M, where f is a scalar valued modular form. An instance of this class may less prec than the argument. ''' def __init__(self, scalar_const, M, prec): self._scalar_const = scalar_const self._M = M prec = PrecisionDeg2(prec) # small prec f = self._scalar_const.calc_form(2) if f._is_cuspidal and f[(1, 1, 1)] != 0: prec = prec._max_value() - 1 elif f[(0, 0, 0)] != 0: prec = prec._max_value() else: raise RuntimeError super(Sym10DivBase, self).__init__( M.wt - scalar_const.weight(), 10, prec) def basis(self): f = self._scalar_const.calc_form(self.prec._max_value() + 1) return [b.divide(f, self.prec, parallel=True) for b in self._M.basis()] def dimension(self): return self._M.dimension() class Sym10EvenDiv(Sym10DivBase): '''A class for f^(-1) M_{det^18 Sym(10)}, where f is a scalar valued modular form. ''' def __init__(self, scalar_const, prec): M = _hecke_const_sp(prec, 18) super(Sym10EvenDiv, self).__init__(scalar_const, M, prec) class Sym10OddDiv(Sym10DivBase): '''A class for f^(-1) M_{det^19 Sym(10)}, where f is a scalar valued modular form. ''' def __init__(self, scalar_const, prec): M = _hecke_const_sp(prec, 19) super(Sym10OddDiv, self).__init__(scalar_const, M, prec) def _anihilate_pol(k, M): ''' k: The weight of an element c, where c is a construction for generators of M_{det^* sym(10)} and an instance of ConstDivision. M: an instance of Sym10EvenDiv or Sym10OddDiv. Return a polynomial pl such that the subspace of M anihilated by pl(T(2)) is equal to the subspace of holomorphic modular forms. ''' R = PolynomialRing(QQ, names="x") x = R.gens()[0] if k % 2 == 0: # Klingen-Eisenstein series f = CuspForms(1, k + 10).basis()[0] return x - f[2] * (1 + QQ(2) ** (k - 2)) elif k == 13: # Kim-Ramakrishnan-Shahidi lift f = CuspForms(1, 12).basis()[0] a = f[2] return x - f[2] ** 3 + QQ(2) ** 12 * f[2] else: chrply = M.hecke_charpoly(2) dim = hilbert_series_maybe(10)[k] l = [(a, b) for a, b in chrply.factor() if a.degree() == dim] if len(l) > 1 or l[0][1] != 1: raise RuntimeError else: return l[0][0] def _find_const_of_e4_e6_of_same_wt(k): '''Returns an instance of ScalarModFormConst so that this corresponds to a polynomial of es4 and es6 of weight k. ''' a, b = [(a, b) for a, b, c, d in tuples_even_wt_modular_forms(k) if c == d == 0][0] return ScalarModFormConst([4] * a + [6] * b) class TestDivision(unittest.TestCase): '''A class for testing whether generators constructed by dividing forms are given correctly. ''' def test_division_generators(self): prec = 6 div_consts = [c for c in gens_consts if isinstance(c, ConstDivision)] consts = (even_gen_consts() + odd_gen_consts() + [CVH(_wt18_consts[0], 2), CVH(sym10_19_consts[0], 2)]) calculator = CalculatorVectValued(consts, data_dir) calculator.calc_forms_and_save(prec, verbose=True, force=True) gens_dct = calculator.forms_dict(prec) for c in div_consts: k = c.weight() print "checking when k = %s" % (str(k), ) if k % 2 == 0: sccst = _find_const_of_e4_e6_of_same_wt(18 - k) M = Sym10EvenDiv(sccst, prec) else: sccst = _find_const_of_e4_e6_of_same_wt(19 - k) M = Sym10OddDiv(sccst, prec) pl = _anihilate_pol(k, M) hol_basis = M.basis_of_subsp_annihilated_by(pl) N = Sym10GivenWtBase(prec, k, hol_basis) # Check this prec is sufficient. mt = matrix(QQ, [[b[t] for b in N.basis()] for t in N.linearly_indep_tuples()]) self.assertTrue( mt.is_invertible(), "False when k = %s" % (str(k),)) # Check our construction gives a holomorphic modular form self.assertTrue(N.contains(gens_dct[c]), "False when k = %s" % (str(k),)) suite = unittest.TestLoader().loadTestsFromTestCase(TestDivision) unittest.TextTestRunner(verbosity=2).run(suite)
7,508
32.977376
89
py
degree2
degree2-master/vector_valued_impl/sym10/tests/test_ramanujan_conj.py
'''This module tests Ramanujan conjecture for eigenforms in M_{det^k Sym(10)} for k <= 29 and tests Hecke polynomials of degree 4 for known lifts and Eisenstein series. ''' import os from degree2.vector_valued_impl.sym10.module_of_given_wt import sym10_space from sage.all import ComplexField, NumberField, PolynomialRing, CuspForms, QQ import unittest from degree2.tsushima_dimension_formula import hilbert_series_maybe from degree2.vector_valued_impl.sym10.even_structure import gen_consts as even_gen_consts from degree2.vector_valued_impl.sym10.even_structure import _wt10_klingen_const from degree2.vector_valued_impl.sym10.odd_structure import gen_consts as odd_gen_consts from degree2.const import CalculatorVectValued data_dir = os.path.expanduser("~/data/vector_valued_sym10/test/") def _hecke_pol_klingen(k): '''k: even. F: Kligen-Eisenstein series of determinant weight k whose Hecke field is the rational filed. Return the Hecke polynomial of F at 2. ''' f = CuspForms(1, k + 10).basis()[0] R = PolynomialRing(QQ, names="x") x = R.gens()[0] pl = QQ(1) - f[2] * x + QQ(2) ** (k + 9) * x ** 2 return pl * pl.subs({x: x * QQ(2) ** (k - 2)}) def _hecke_pol_krs_lift(): '''Return the Hecke polynomial of KRS lift of determinant weight 13 at 2. ''' R = PolynomialRing(QQ, names="x") x = R.gens()[0] f = CuspForms(1, 12).basis()[0] a = f[2] b = QQ(2) ** 11 return ((1 - (a ** 3 - 3 * a * b) * x + b ** 3 * x ** 2) * (1 - a * b * x + b ** 3 * x ** 2)) class RamanujanConj(unittest.TestCase): def assert_ramanujan_conj_eigenform(self, f, complex_prec=300): '''f is a Hecke eigenform. Assert for all cuspidal embeddings from the Hecke field to the ComplexField, abs value of roots of Hecke polynomial is near to 1. ''' charpoly = f.base_ring.polynomial() CC = ComplexField(prec=complex_prec) self.assertTrue(charpoly.is_irreducible(), "charpoly is not irreducible.") K = f.base_ring pl = f.euler_factor_of_standard_l(2) if K == QQ: embeddings = [lambda x: x] else: embeddings = K.complex_embeddings(prec=complex_prec) if f.phi_operator() == {}: print "Testing when k = %s" % (f.wt,) for phi in embeddings: pl_cc = pl.map_coefficients(phi) R = PolynomialRing(CC, names=("x",)) max_diff = max((a.abs() - CC(1)).abs() for a, _ in R(pl_cc).roots()) self.assertLess(max_diff, CC(2) ** (-complex_prec + 1)) def test_ramanujan_conj(self): '''Test Ramanujan conjectures for eigenforms of determinant weights less than or equal to 29. ''' prec = 6 hpl = hilbert_series_maybe(10) for k in range(22, 30): if hpl[k] != 0: N = sym10_space(k, prec, data_directory=data_dir) self.assertEqual(N.dimension(), len(N.basis())) _chply = N.hecke_charpoly(2) for cply, _ in _chply.factor(): K = NumberField(cply, names="a") a = K.gens()[0] f = N.eigenform_with_eigenvalue_t2(a) self.assert_ramanujan_conj_eigenform(f) def test_known_eigenforms(self): '''Test Hecke polynomial of degree 4 for Klingen-Eisenstein series and a KRS-lift. ''' klingen_consts = [c for c in even_gen_consts() if c.weight() in (6, 8)] klingen_consts.append(_wt10_klingen_const()) krs_const = [c for c in odd_gen_consts() if c.weight() == 13][0] clc = CalculatorVectValued(klingen_consts + [krs_const], data_dir) forms_dct = clc.forms_dict(6) for c in klingen_consts: self.assertEqual(forms_dct[c].euler_factor_of_spinor_l(2), _hecke_pol_klingen(c.weight())) self.assertEqual(forms_dct[krs_const].euler_factor_of_spinor_l(2), _hecke_pol_krs_lift()) suite = unittest.TestLoader().loadTestsFromTestCase(RamanujanConj) unittest.TextTestRunner(verbosity=2).run(suite)
4,232
39.701923
89
py
degree2
degree2-master/vector_valued_impl/sym10/tests/__init__.py
0
0
0
py
degree2
degree2-master/tests/test_interpolate.py
# -*- coding: utf-8 -*- import unittest from degree2.all import eisenstein_series_degree2, x35_with_prec from degree2.interpolate import calc_forms, det_deg2 class TestInterpolate(unittest.TestCase): def test_interpolate(self): prec = 15 es4 = eisenstein_series_degree2(4, prec) x35 = x35_with_prec(prec) f = es4.differentiate_wrt_tau() self.assertEqual(calc_forms(lambda fs: fs[0] ** 2, [es4], prec, wt=8), es4 ** 2) self.assertEqual(calc_forms(lambda fs: fs[0] * fs[1], [es4, x35], prec, wt=39), es4 * x35) self.assertEqual(calc_forms(lambda fs: fs[0] ** 2, [f], prec, autom=False), f ** 2) def test_det(self): prec = 10 l = [eisenstein_series_degree2(k, prec) for k in [4, 6, 10, 12]] m = [[a.wt * a for a in l], [a.differentiate_wrt_tau() for a in l], [a.differentiate_wrt_w() for a in l], [a.differentiate_wrt_z() for a in l]] d = det_deg2(m, wt=35) d = d * d[(2, -1, 3)] ** (-1) self.assertEqual(d, x35_with_prec(prec)) suite = unittest.TestLoader().loadTestsFromTestCase(TestInterpolate) unittest.TextTestRunner(verbosity=2).run(suite)
1,297
36.085714
78
py
degree2
degree2-master/tests/test_fc_mul_add.py
# -*- coding: utf-8 -*- from degree2.scalar_valued_smfs import eisenstein_series_degree2, QexpLevel1,\ x10_with_prec, x12_with_prec, x35_with_prec, ModFormQexpLevel1 from degree2.basic_operation import PrecisionDeg2 import unittest from sage.all import FiniteField, ZZ, QQ, PolynomialRing import operator global_prec = 8 # global_prec = [(10, 5, 10), (9, 0, 8)] es4 = eisenstein_series_degree2(4, global_prec) qsres4 = QexpLevel1(es4.fc_dct, global_prec, base_ring=ZZ) ffld = FiniteField(5) ff_es4 = es4.change_ring(ffld) ff_qsres4 = qsres4.change_ring(ffld) es6 = eisenstein_series_degree2(6, global_prec) qsres6 = QexpLevel1(es6.fc_dct, global_prec, base_ring=ZZ) ff_es6 = es6.change_ring(ffld) ff_qsres6 = qsres6.change_ring(ffld) x10 = x10_with_prec(global_prec) qsrx10 = QexpLevel1(x10.fc_dct, global_prec, base_ring=ZZ, is_cuspidal=True) ff_x10 = x10.change_ring(ffld) ff_qsrx10 = qsrx10.change_ring(ffld) dzx10 = x10.differentiate_wrt_z() ff_dzx10 = dzx10.change_ring(ffld) x12 = x12_with_prec(global_prec) qsrx12 = QexpLevel1(x12.fc_dct, global_prec, is_cuspidal=True, base_ring=ZZ) dzx12 = x12.differentiate_wrt_z() ff_x12 = x12.change_ring(ffld) ff_qsrx12 = qsrx12.change_ring(ffld) ff_dzx12 = dzx12.change_ring(ffld) x35 = x35_with_prec(global_prec) ff_x35 = x35.change_ring(ffld) dct_of_forms = {"es4": es4, "qsres4": qsres4, "es6": es6, "qsres6": qsres6, "x10": x10, "qsrx10": qsrx10, "x12": x12, "qsrx12": qsrx12, "dzx10": dzx10, "dzx12": dzx12, "x35": x35, "2": 2, "0": 0, "F5_3": FiniteField(5)(3), "ff_es4": ff_es4, "ff_es6": ff_es6, "ff_qsres4": ff_qsres4, "ff_qsrx10": ff_qsrx10, "ff_qsres6": ff_qsres6, "ff_x10": ff_x10, "ff_x12": ff_x12, "ff_x35": ff_x35, "ff_dzx10": ff_dzx10, "ff_dzx12": ff_dzx12} class TestDeg2fcMulAddFunctions(unittest.TestCase): # @skip("OK") # def test_dict_to_pol_to_dict(self): # seq = range(10) # bd = 10 # l = semi_pos_def_matarices(bd) # dct = {t: random.choice(seq) for t in l} # self.assertTrue(pol_to_dict(dict_to_pol(dct, bd), bd) == dct) def mul_is_correct(self, f1_name, f2_name, base_ring=QQ): f1 = dct_of_forms[f1_name] f2 = dct_of_forms[f2_name] pf1 = dict_to_pol(f1, base_ring=base_ring) pf2 = dict_to_pol(f2, base_ring=base_ring) f = f1 * f2 self.assertTrue(f.fc_dct == pol_to_dict(pf1 * pf2, base_ring=base_ring)) return f def add_is_correct(self, f1_name, f2_name, base_ring=QQ): f1 = dct_of_forms[f1_name] f2 = dct_of_forms[f2_name] pf1 = dict_to_pol(f1, base_ring=base_ring) pf2 = dict_to_pol(f2, base_ring=base_ring) f = f1 + f2 self.assertTrue(f.fc_dct == pol_to_dict(pf1 + pf2, base_ring=base_ring)) return f def pow_is_correct(self, f1_name, n): f1 = dct_of_forms[f1_name] f = f1 ** n self.assertTrue(f == power(f1, n)) return f # @skip("Ok") def test_hol_add(self): f1 = self.add_is_correct("es4", "0") self.assertFalse(f1._is_cuspidal) self.assertTrue(isinstance(f1, ModFormQexpLevel1)) f2 = self.add_is_correct("es4", "2") self.assertFalse(f2._is_cuspidal) self.assertFalse(isinstance(f2, ModFormQexpLevel1)) f3 = self.add_is_correct("x10", "0") self.assertTrue(f3._is_cuspidal) self.assertTrue(isinstance(f3, ModFormQexpLevel1)) f4 = self.add_is_correct("x10", "x10") self.assertTrue(f4._is_cuspidal) self.assertTrue(isinstance(f4, ModFormQexpLevel1)) f5 = self.add_is_correct("x12", "x10") self.assertTrue(f5._is_cuspidal) self.assertFalse(isinstance(f5, ModFormQexpLevel1)) # @skip("OK") def test_hol_mul(self): f1 = self.mul_is_correct("es4", "es4") self.assertFalse(f1._is_cuspidal) f2 = self.mul_is_correct("es4", "2") self.assertFalse(f2._is_cuspidal) f3 = self.mul_is_correct("es4", "x10") self.assertTrue(f3._is_cuspidal) f4 = self.mul_is_correct("x10", "x12") self.assertTrue(f4._is_cuspidal) f5 = self.mul_is_correct("x10", "dzx10") self.assertTrue(f5._is_cuspidal) self.assertFalse(isinstance(f5, ModFormQexpLevel1)) f6 = self.mul_is_correct("es4", "F5_3", base_ring=FiniteField(5)) self.assertFalse(f6._is_cuspidal) # @skip("OK") def test_odd_mul(self): f1 = self.mul_is_correct("es4", "x35") self.assertTrue(f1._is_cuspidal) f2 = self.mul_is_correct("x35", "x35") self.assertTrue(f2._is_cuspidal) # @skip("OK") def test_qsr_add(self): f1 = self.add_is_correct("qsres4", "0") self.assertFalse(f1._is_cuspidal) f2 = self.add_is_correct("qsres4", "2") self.assertFalse(f2._is_cuspidal) f3 = self.add_is_correct("qsrx10", "0") self.assertTrue(f3._is_cuspidal) f4 = self.add_is_correct("qsrx10", "2") self.assertFalse(f4._is_cuspidal) f5 = self.add_is_correct("qsres4", "qsres6") self.assertFalse(f5._is_cuspidal) f6 = self.add_is_correct("qsres4", "qsrx10") self.assertFalse(f6._is_cuspidal) f7 = self.add_is_correct("qsrx12", "qsrx10") self.assertTrue(f7._is_cuspidal) # @skip("OK") def test_qsr_mul_not_cusp(self): f = self.mul_is_correct("qsres4", "qsres4") self.assertFalse(f._is_cuspidal) g = self.mul_is_correct("qsres6", "qsres4") self.assertFalse(g._is_cuspidal) # @skip("OK") def test_qsr_mul_num(self): f = self.mul_is_correct("qsres4", "2") self.assertFalse(f._is_cuspidal) g = self.mul_is_correct("qsrx10", "2") self.assertTrue(g._is_cuspidal) h = self.mul_is_correct("qsrx10", "F5_3", base_ring=FiniteField(5)) self.assertTrue(h._is_cuspidal) # @skip("OK") def test_qsr_mul_cusp(self): f = self.mul_is_correct("qsres4", "qsrx10") g = self.mul_is_correct("qsrx10", "qsrx12") h = self.mul_is_correct("qsres4", "dzx10") self.assertTrue(f._is_cuspidal) self.assertTrue(g._is_cuspidal) self.assertTrue(h._is_cuspidal) # @skip("OK") def test_pow_hol(self): ''' Assumes multiplication is correct. ''' self.pow_is_correct("es4", 2) self.pow_is_correct("es4", 5) self.pow_is_correct("x35", 6) self.pow_is_correct("es4", 9) es4._is_gen = False x35._is_gen = False self.pow_is_correct("es4", 2) self.pow_is_correct("es4", 5) self.pow_is_correct("x35", 6) self.pow_is_correct("es4", 9) def test_pos_characteristic_mul(self): self.mul_is_correct("ff_es4", "ff_es4", base_ring=ffld) self.mul_is_correct("ff_x35", "es4", base_ring=ffld) self.mul_is_correct("es4", "ff_x12", base_ring=ffld) self.mul_is_correct("ff_dzx10", "ff_es6", base_ring=ffld) self.mul_is_correct("dzx10", "ff_x35", base_ring=ffld) def power(f, n): return reduce(operator.mul, [f for i in range(n)]) def pol_to_dict(pl, bd=global_prec, base_ring=QQ): R = PolynomialRing(base_ring, "u1,u2,q1,q2") (u1, u2, q1, q2) = R.gens() S = R.quotient(u1 * u2 - 1) (uu1, uu2, qq1, qq2) = S.gens() l = PrecisionDeg2(bd) pl_lft = pl.lift() dct = dict() for n, r, m in l: if r >= 0: cfs = pl_lft.coefficient({u1: r, u2: 0, q1: n, q2: m}) else: cfs = pl_lft.coefficient({u1: 0, u2: -r, q1: n, q2: m}) dct[(n, r, m)] = cfs for t in l: if not t in dct.keys(): dct[t] = 0 return dct def dict_to_pol(dct, bd=global_prec, base_ring=QQ): R = PolynomialRing(base_ring, "u1, u2, q1, q2") (u1, u2, q1, q2) = R.gens() S = R.quotient(u1 * u2 - 1) (uu1, uu2, qq1, qq2) = S.gens() l = PrecisionDeg2(bd) if not hasattr(dct, "__getitem__"): return dct return sum([dct[(n, r, m)] * uu1 ** r * qq1 ** n * qq2 ** m if r > 0 else dct[(n, r, m)] * uu2 ** (-r) * qq1 ** n * qq2 ** m for n, r, m in l]) suite = unittest.TestLoader().loadTestsFromTestCase(TestDeg2fcMulAddFunctions) unittest.TextTestRunner(verbosity=2).run(suite)
8,797
30.992727
78
py
degree2
degree2-master/tests/test_eigenforms.py
# -*- coding: utf-8 -*- import unittest import operator from sage.all import NumberField, QQ, var, Integer, ZZ, PolynomialRing from degree2.scalar_valued_smfs import ( eisenstein_series_degree2, x10_with_prec, x12_with_prec, x35_with_prec, KlingenEisensteinAndCuspForms, CuspFormsDegree2) from .data_dir import load_from_data_dir def load_cache(fl): return load_from_data_dir(fl, "eigen_forms") def _alpha20_3(): x = var("x") K = NumberField(x ** 2 - ZZ(1378464) * x + ZZ(328189501440), "alpha20_3") return K.gens()[0] alpha20_1 = 119538120 alpha20_2 = -840960 alpha20_3 = _alpha20_3() def _a47(): x = var("x") K = NumberField( x ** 3 - x ** 2 - ZZ(524706) * x + ZZ(103406706), names="a47") return K.gens()[0] a47 = _a47() RDeg2 = PolynomialRing(QQ, "es4, es6, x10, x12, x35") ple4, ple6, plx10, plx12, plx35 = RDeg2.gens() def polynomial_to_form(f, prec): es4 = eisenstein_series_degree2(4, prec) es6 = eisenstein_series_degree2(6, prec) x10 = x10_with_prec(prec) x12 = x12_with_prec(prec) x35 = x35_with_prec(prec) gens = [es4, es6, x10, x12, x35] def monom(t): return reduce(operator.mul, [f ** a for f, a in zip(gens, t)]) return sum([a * monom(t) for t, a in f.dict().iteritems()]) x47_fc_dct = load_cache("x47_fc_dct.sobj") f20_1_dct = load_cache("f20_1_dct.sobj") f20_2_dct = load_cache("f20_2_dct.sobj") f20_3_dct = load_cache("f20_3_dct.sobj") cons20 = load_cache("cons20.sobj") cons47 = load_cache("cons47.sobj") def coerce_dict(d, fld): if fld == QQ or fld == ZZ: return d return {k: fld(v.list()) if len(v.list()) > 1 else fld(v) for k, v in d.items()} def coerce_pol(pl, fld): if fld == QQ or fld == ZZ: return pl return pl.map_coefficients(lambda x: fld(x.list()) if len(x.list()) > 1 else fld(x), fld) class TestEigenforms(unittest.TestCase): def test_wt_20_eigen(self): N20 = KlingenEisensteinAndCuspForms(20) pl = N20.hecke_charpoly(2) x = pl.parent().gens()[0] pl1 = ((x + Integer(840960)) * (x ** Integer(2) - Integer(1378464) * x + Integer(328189501440))) self.assertTrue(pl == (x - Integer(119538120)) * pl1) x = var("x") f20_1 = N20.eigenform_with_eigenvalue_t2(alpha20_1) f20_2 = N20.eigenform_with_eigenvalue_t2(alpha20_2) f20_3 = N20.eigenform_with_eigenvalue_t2(alpha20_3) l = [f20_1, f20_2, f20_3] l = [f.normalize(f[(1, 1, 1)]) for f in l] cons20[-1] = coerce_pol(cons20[-1], alpha20_3.parent()) self.assertTrue(cons20 == [f._construction for f in l]) self.assertTrue(all(polynomial_to_form(c, 4) == f for c, f in zip(cons20, l))) dcts = [f20_1_dct, f20_2_dct, f20_3_dct] self.assertTrue(all(coerce_dict(d, f.base_ring) == f.fc_dct for d, f in zip(dcts, l))) def test_wt_20_cusp_eigen(self): S20 = CuspFormsDegree2(20) pl = S20.hecke_charpoly(2) x = pl.parent().gens()[0] pl1 = (x ** Integer(2) - Integer(1378464) * x + Integer(328189501440)) self.assertTrue(pl == (x + Integer(840960)) * pl1) self.assertTrue(pl == S20.hecke_matrix(2).charpoly("x")) f20_2 = S20.eigenform_with_eigenvalue_t2(alpha20_2) f20_3 = S20.eigenform_with_eigenvalue_t2(alpha20_3) l = [f20_2, f20_3] l = [f.normalize(f[(1, 1, 1)]) for f in l] # self.assertTrue(cons20[1:] == [f._construction for f in l]) dcts = [f20_2_dct, f20_3_dct] self.assertTrue(all(coerce_dict(d, f.base_ring) == f.fc_dct for d, f in zip(dcts, l))) def test_wt_47_eigen(self): KS47 = KlingenEisensteinAndCuspForms(47) lambda2 = (-ZZ(957874176) / ZZ(13) * a47 ** 2 - ZZ(818321817600) / ZZ(13) * a47 - ZZ(34324755775488)) x47 = KS47.eigenform_with_eigenvalue_t2(lambda2) x47 = x47.normalize(x47[(2, -1, 3)]) _const47 = coerce_pol(cons47[0], a47.parent()) self.assertTrue(_const47 == x47._construction) self.assertTrue(polynomial_to_form(_const47, x47.prec) == x47) _x47_fc_dct = coerce_dict(x47_fc_dct, a47.parent()) self.assertTrue(x47.fc_dct == _x47_fc_dct) S47 = CuspFormsDegree2(47) x47 = S47.eigenform_with_eigenvalue_t2(lambda2) x47 = x47.normalize(x47[(2, -1, 3)]) self.assertTrue(_const47 == x47._construction) self.assertTrue(x47.fc_dct == _x47_fc_dct) def test_wt_35_eigenvalues(self): x35 = x35_with_prec([(12, 33, 27), (8, 35, 39), (34, -17, 51)]) d = {2: -25073418240, 3: -11824551571578840, 4: 138590166352717152256, 5: 9470081642319930937500, 7: -10370198954152041951342796400, 9: -96268467952179923650803475996239, 11: -8015071689632034858364818146947656, 13: -20232136256107650938383898249808243380, 17: 118646313906984767985086867381297558266980} d1 = {m: x35.hecke_eigenvalue(m) for m in d.keys()} self.assertTrue(d == d1) def test_es4_eigenvalues(self): es4 = eisenstein_series_degree2(4, 25) d = {2: 45, 3: 280, 4: 1549, 5: 3276, 9: 69049, 25: 10256401} for k, v in d.iteritems(): self.assertTrue(es4.hecke_eigenvalue(k) == v) def test_cusp_sp_wt28_hecke_charpoly(self): R = PolynomialRing(QQ, names="x") x = R.gens()[0] pl = (x ** Integer(7) - Integer(599148384) * x ** Integer(6) + Integer(85597740037545984) * x ** Integer(5) + Integer(4052196666582552432082944) * x ** Integer(4) - Integer(992490558368877866775830593536000) * x ** Integer(3) - Integer(7786461340613962559507216233894458163200) * x ** Integer(2) + Integer(2554655965904300151500968857660777576875950080000) * x + Integer(2246305351725266922462270484154998253269432286576640000)) S = CuspFormsDegree2(28) self.assertTrue(R(S.hecke_charpoly(2)) == pl) def test_cusp_sp_even_wts_hecke_charpoly_decomp_deg(self): for k in range(28, 50, 2): S = CuspFormsDegree2(k) dims = S.klingeneisensteinAndCuspForms().dimensions() dgs = set([a.degree() for a, _ in S.hecke_charpoly(2).factor()]) dgs1 = set([dims["lift"], dims["non-lift"]]) self.assertTrue(dgs == dgs1) suite = unittest.TestLoader().loadTestsFromTestCase(TestEigenforms) unittest.TextTestRunner(verbosity=2).run(suite)
6,618
33.473958
94
py
degree2
degree2-master/tests/misc_test.py
# -*- coding: utf-8 -*- import unittest from unittest import skip from degree2.basic_operation import ( PrecisionDeg2, semi_pos_def_matarices, _spos_def_mats_lt) from degree2.scalar_valued_smfs import (KlingenEisensteinAndCuspForms, eisenstein_series_degree2, x10_with_prec, x12_with_prec, x5__with_prec, degree2_modular_forms_ring_level1_gens, SpaceOfModForms) from degree2.rankin_cohen_diff import rankin_cohen_pair_sym from sage.all import matrix, mod, QQ from degree2.hecke_module import HalfIntegralMatrices2 from degree2.utils import linearly_indep_rows_index_list, pmap from .data_dir import load_from_data_dir class TestDeg2fcFunctions(unittest.TestCase): def test_reduced_form(self): def reduced_form_with_sign_test(tpl): mat = matrix([[1, 0], [0, 1]]) sign = 1 (n, r, m) = tpl if n > m: sign *= -1 mat = mat * matrix([[0, 1], [1, 0]]) (n, m) = m, n rem = mod(r, 2 * n) if rem > n: u = r // (2 * n) + 1 else: u = r // (2 * n) m = n * u ** 2 - r * u + m r = r - 2 * n * u mat = mat * matrix([[1, -u], [0, 1]]) if r < 0: sign *= -1 mat = mat * matrix([[1, 0], [0, -1]]) r *= -1 return ((n, r, m), sign, mat) bls = [] for t in PrecisionDeg2(15).pos_defs(): tpl, sign, mat = reduced_form_with_sign_test(t) a = HalfIntegralMatrices2(t) b = HalfIntegralMatrices2(tpl) bl = mat.det() == sign bl = a[mat] == b and bl bls.append(bl) self.assertTrue(all(bls)) # @skip("OK") def save_load_basis(self, wt): KS = KlingenEisensteinAndCuspForms(wt, 10) basis = KS.basis() KS.save_basis_as_binary("/tmp/basis_test.sobj") KS = KlingenEisensteinAndCuspForms(wt, 10) KS.load_basis_from("/tmp/basis_test.sobj") lbasis = KS.basis() dim = KS.dimension() self.assertTrue(all([lbasis[i].fc_dct == basis[i].fc_dct for i in range(dim)])) def test_x5(self): d = load_from_data_dir("x5_fc_dct.sobj", "eigen_forms") self.assertEqual(d, x5__with_prec(10).fc_dct) @skip("OK") def test_wt_34_47_save_load_basis(self): self.save_load_basis(34) self.save_load_basis(47) # @skip("OK") def test_eisenstein(self): prec = 10 es4, es6, es10, es12 = [eisenstein_series_degree2(k, prec) for k in [4, 6, 10, 12]] f10 = es4 * es6 - es10 f12 = 3 ** 2 * 7 ** 2 * es4 ** 3 + 2 * 5 ** 3 * es6 ** 2 - 691 * es12 f10 = f10 * (f10[(1, 1, 1)]) ** (-1) f12 = f12 * (f12[(1, 1, 1)]) ** (-1) self.assertTrue(f10 == x10_with_prec(prec)) self.assertTrue(f12 == x12_with_prec(prec)) def test_semi_pos_mats(self): self.assertEqual(len(list(semi_pos_def_matarices(10))), 2029) self.assertEqual(len(list(semi_pos_def_matarices(14))), 5357) self.assertEqual(len(list(_spos_def_mats_lt((20, 3, 10)))), 2832) self.assertEqual(len(list(_spos_def_mats_lt((10, 0, 10)))), 1021) def test_lin_indep(self): A = [[1, 0, 0], [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 1, 0], [0, 0, 1]] self.assertEqual(linearly_indep_rows_index_list(A, 3), [0, 3, 5]) A = [[1, 0], [0, 0], [1, 0], [0, 1]] self.assertEqual(linearly_indep_rows_index_list(A, 2), [0, 3]) # @skip("OK") def test_hecke_operator(self): es4, es6, _, _, _ = degree2_modular_forms_ring_level1_gens(10) self.assertEqual(es4.hecke_operator_acted(2, 5), 45 * es4._down_prec(5)) f10 = rankin_cohen_pair_sym(2, es4, es6) self.assertEqual(f10.hecke_operator_acted(2, 5), -6168 * f10._down_prec(5)) # @skip("OK") def test_pmap(self): self.assertEqual([x ** 2 for x in range(100)], pmap(lambda x: x ** 2, range(100), num_of_procs=4)) def test_x5_mul(self): x5 = x5__with_prec(5) x10 = x10_with_prec(4) es4 = eisenstein_series_degree2(4, 5) self.assertEqual(x10, x5 ** 2) self.assertEqual(x10 * x5, x5 ** 3) self.assertEqual(x5 * es4, es4 * x5) self.assertEqual((x5 + x5 * es4) * x5, x10 + x10 * es4) def test_basis_of_scalar_valued(self): for k in [12, 16, 35, 37, 47]: M = SpaceOfModForms(k, prec=k // 10) dm = M.dimension() self.assertEqual(dm, len(M.basis())) tpls = M.linearly_indep_tuples() self.assertTrue(all(f.wt == k for f in M.basis())) self.assertTrue( matrix([[f[t] for f in M.basis()] for t in tpls]).change_ring(QQ).is_invertible()) suite = unittest.TestLoader().loadTestsFromTestCase(TestDeg2fcFunctions) unittest.TextTestRunner(verbosity=2).run(suite)
5,310
36.666667
98
py
degree2
degree2-master/tests/data_dir.py
import os.path as opath from sage.all import load def data_dir(name): return opath.join(opath.dirname(opath.abspath(__file__)), "data", name) def load_from_data_dir(fname, dirname): return load(opath.join(data_dir(dirname), fname))
244
21.272727
75
py
degree2
degree2-master/tests/test_save_load.py
# -*- coding: utf-8 -*- import unittest from degree2.scalar_valued_smfs import eisenstein_series_degree2, x12_with_prec, x35_with_prec,\ ModFormQexpLevel1, QexpLevel1 import tempfile from sage.all import FiniteField, ZZ global_prec = 8 es4 = eisenstein_series_degree2(4, global_prec) qsres4 = QexpLevel1(es4.fc_dct, global_prec, base_ring=ZZ) x12 = x12_with_prec(global_prec) ff_x12 = x12.change_ring(FiniteField(23)) x35 = x35_with_prec(global_prec) ff_x35 = x35.change_ring(FiniteField(23)) class TestLoadSave(unittest.TestCase): def load_save_scalar_valued_mf(self, orig_f, loaded_f, hol=False): if not hol: self.assertTrue(orig_f.fc_dct == loaded_f.fc_dct) self.assertTrue(orig_f._is_cuspidal == loaded_f._is_cuspidal) self.assertTrue(orig_f.base_ring == loaded_f.base_ring) self.assertTrue(orig_f.prec == loaded_f.prec) else: self.load_save_scalar_valued_mf(orig_f, loaded_f) self.assertTrue(orig_f.wt == loaded_f.wt) def loaded_form(self, form, fname): form.save_as_binary(fname) if isinstance(form, ModFormQexpLevel1): return ModFormQexpLevel1.load_from(fname) else: return QexpLevel1.load_from(fname) def test_save_load(self): with tempfile.NamedTemporaryFile() as temp: f = es4 lf = self.loaded_form(f, temp.name) self.load_save_scalar_valued_mf(f, lf, hol=True) f = qsres4 lf = self.loaded_form(f, temp.name) self.load_save_scalar_valued_mf(f, lf) f = x12 lf = self.loaded_form(f, temp.name) self.load_save_scalar_valued_mf(f, lf, hol=True) f = ff_x12 lf = self.loaded_form(f, temp.name) self.load_save_scalar_valued_mf(f, lf, hol=True) f = x35 lf = self.loaded_form(f, temp.name) self.load_save_scalar_valued_mf(f, lf, hol=True) f = ff_x35 lf = self.loaded_form(f, temp.name) self.load_save_scalar_valued_mf(f, lf, hol=True) suite = unittest.TestLoader().loadTestsFromTestCase(TestLoadSave) unittest.TextTestRunner(verbosity=2).run(suite)
2,241
32.462687
96
py
degree2
degree2-master/tests/test_prec_class.py
# -*- coding: utf-8 -*- import unittest from degree2.basic_operation import PrecisionDeg2, reduced_form_with_sign class TestPrecClass(unittest.TestCase): def test_eq(self): l = [(n, r, m) for n, r, m in PrecisionDeg2(10) if n == 10 and m == 10] self.assertTrue(PrecisionDeg2(10) == PrecisionDeg2(l)) def test_ge(self): self.assertTrue(PrecisionDeg2(10) >= PrecisionDeg2(5)) self.assertTrue( PrecisionDeg2(10) >= PrecisionDeg2([(10, 1, 2), (2, -1, 3)])) self.assertTrue( PrecisionDeg2([(10, 1, 3)]), PrecisionDeg2([(2, 1, 3)])) def test_le(self): self.assertTrue(PrecisionDeg2(5) <= PrecisionDeg2(10)) l = [(n, r, m) for n, r, m in PrecisionDeg2(5) if n == 5 and m == 5] self.assertTrue(PrecisionDeg2(5) <= PrecisionDeg2(l)) self.assertTrue(PrecisionDeg2(l) <= PrecisionDeg2(5)) self.assertTrue( PrecisionDeg2([(2, 1, 3)]) <= PrecisionDeg2([(10, 1, 3)])) def test_group_by_reduced_forms_with_sgn(self): prec = 8 bls = [] for t, ls in PrecisionDeg2(prec).group_by_reduced_forms_with_sgn().iteritems(): rdf_t, sgn_t = reduced_form_with_sign(t) for t1, sgn in ls: _, sgn_t1 = reduced_form_with_sign(t1) bls.append(sgn_t == sgn_t1 * sgn) self.assertTrue(all(bls)) suite = unittest.TestLoader().loadTestsFromTestCase(TestPrecClass) unittest.TextTestRunner(verbosity=2).run(suite)
1,501
37.512821
87
py
degree2
degree2-master/tests/test_divide.py
# -*- coding: utf-8 -*- import unittest # from unittest import skip from degree2.scalar_valued_smfs import eisenstein_series_degree2, x10_with_prec from degree2.rankin_cohen_diff import rankin_cohen_pair_sym class TestDivide(unittest.TestCase): def testdivide(self): prec = 10 x10 = x10_with_prec(prec + 1) es4 = eisenstein_series_degree2(4, prec + 1) self.assertEqual((es4 * x10).divide(x10, prec), es4) self.assertEqual((x10 * x10).divide(x10, prec), x10) def test_divide_vector_valued(self): prec = 6 x10 = x10_with_prec(prec + 1) es4 = eisenstein_series_degree2(4, prec + 1) es6 = eisenstein_series_degree2(6, prec + 1) f = rankin_cohen_pair_sym(2, es4, es6) g = f * x10 self.assertEqual(f._down_prec(prec), g.divide(x10, prec, parallel=True)) suite = unittest.TestLoader().loadTestsFromTestCase(TestDivide) unittest.TextTestRunner(verbosity=2).run(suite)
996
31.16129
79
py
degree2
degree2-master/tests/test_const.py
'''A module for testing degree2.const. ''' import unittest from degree2.const import (ConstMul, ConstDivision, ConstVectValued, dependencies, needed_precs, ConstVectValuedHeckeOp) from degree2.all import degree2_modular_forms_ring_level1_gens from degree2.scalar_valued_smfs import x10_with_prec from degree2.const import ScalarModFormConst as SMFC from unittest import skip class ConstsTest(unittest.TestCase): @skip("OK") def test_scalar_calc_form(self): '''Test SMFC.calc_form. ''' prec = 5 es4, es6, x10, x12, _ = degree2_modular_forms_ring_level1_gens(prec) c = SMFC([4, 6]) self.assertTrue(c.calc_form(prec), es4 * es6) c = SMFC({(4, 6): 1}) self.assertTrue(c.calc_form(prec), es4 * es6) c = SMFC([4, 4, 10, 12]) self.assertTrue(c.calc_form(prec), es4 ** 2 * x10 * x12) c = SMFC({(4, 4, 6): 1, (4, 10): -1}) self.assertTrue(c.calc_form(prec), es4 ** 2 * es6 - es4 * x10) @skip("OK") def test_division_multiplication(self): '''Test the method calc_form of ConstDivision and ConstMul.''' prec = 5 es4, es6, _, _, _ = degree2_modular_forms_ring_level1_gens(prec) sccs = [SMFC([4, 6]), SMFC([10, 12])] cvc = ConstVectValued(2, sccs, 0, None) cd = ConstDivision([cvc], [1], SMFC([4, 6]), 0) cm = ConstMul(cvc, SMFC([4, 6])) F = cvc.calc_form(prec) G = cd.calc_form(prec) H = cm.calc_form(prec) self.assertNotEqual(F, 0) self.assertEqual(G.wt, 22) self.assertEqual(G * es4 * es6, F) self.assertEqual(H.wt, 42) self.assertEqual(H, F * es4 * es6) @skip("OK") def test_division_cusp(self): '''Test the method calc_form of ConstDivision in the case when division by a cusp form. ''' prec = 5 x10 = x10_with_prec(prec) sccs = [SMFC([4, 10]), SMFC([6, 10])] cvc = ConstVectValued(2, sccs, 0, None) cd = ConstDivision([cvc], [1], SMFC([10]), 1) F = cvc.calc_form(prec) G = cd.calc_form(prec) self.assertNotEqual(F, 0) self.assertEqual(G.prec.value, prec) self.assertEqual(G * x10, F) def test_dependencies(self): '''Test the function dependencies. ''' j = 10 c1 = ConstVectValued(j, [SMFC([4, 6])], 0, None) c2 = ConstDivision([c1], [1], SMFC([4]), 0) c3 = ConstMul(c2, SMFC([4])) c4 = ConstVectValued(j, [SMFC([10])], 0, None) c5 = ConstDivision([c4, c3], [1], SMFC([4]), 0) self.assertTrue(dependencies(c5), set([c1, c2, c3, c4])) def test_needed_precs(self): '''Test the funciton needed_precs. ''' j = 10 c1 = ConstVectValued(j, [SMFC([5, 5])], 0, None) c2 = ConstDivision([c1], [1], SMFC([10]), 1) c3 = ConstVectValuedHeckeOp(c2, 2) c4 = ConstDivision([c1], [1], SMFC([12]), 1) c5 = ConstDivision([c3, c4], [1, -1], SMFC([10]), 1) precs = needed_precs(c5, 5) self.assertEqual(set(precs.keys()), set([c1, c2, c3, c4, c5])) self.assertEqual(precs[c5], 6) self.assertEqual(precs[c4], 7) self.assertEqual(precs[c3], 12) self.assertEqual(precs[c2], 13) self.assertEqual(precs[c1], 14) def test_walk(self): '''Test the method walk of ConstVectBase. ''' j = 10 c1 = ConstVectValued(j, [SMFC([5, 5])], 0, None) c2 = ConstDivision([c1], [1], SMFC([10]), 1) c3 = ConstVectValuedHeckeOp(c2, 2) c4 = ConstDivision([c1], [1], SMFC([12]), 1) c5 = ConstDivision([c3, c4], [1, -1], SMFC([10]), 1) self.assertEqual(list(c1.walk()), [c1]) self.assertEqual(list(c2.walk()), [c1, c2]) self.assertEqual(list(c3.walk()), [c1, c2, c3]) self.assertEqual(list(c4.walk()), [c1, c4]) self.assertEqual(list(c5.walk()), [c1, c2, c3, c1, c4, c5]) suite = unittest.TestLoader().loadTestsFromTestCase(ConstsTest) unittest.TextTestRunner(verbosity=2).run(suite)
4,115
36.761468
78
py
degree2
degree2-master/tests/test_vector_valued.py
# -*- coding: utf-8 -*- import os.path as opath import unittest # from unittest import skip from sage.all import CuspForms, PolynomialRing, QQ, matrix, identity_matrix from degree2.scalar_valued_smfs import ( eisenstein_series_degree2, degree2_modular_forms_ring_level1_gens, x5__with_prec) from degree2.elements import SymWtModFmElt from degree2.all import (rankin_cohen_pair_sym, rankin_cohen_pair_det2_sym, rankin_cohen_triple_det_sym2, rankin_cohen_triple_det_sym4) from degree2.basic_operation import PrecisionDeg2 from degree2.utils import det from degree2.rankin_cohen_diff import vector_valued_rankin_cohen from degree2.vector_valued_smfs \ import vector_valued_siegel_modular_forms \ as vvld_smfs from .data_dir import data_dir class TestVectorValued(unittest.TestCase): # @skip("OK") def test_vector_vald_klingen_hecke_pol(self): es4 = eisenstein_series_degree2(4, 5) es6 = eisenstein_series_degree2(6, 5) F10 = rankin_cohen_pair_sym(2, es4, es6) pl = F10.euler_factor_of_spinor_l(2) x = pl.parent().gens()[0] f = 1 + 24 * x + 2048 * x ** 2 self.assertTrue(f * f.subs({x: 2 ** 8 * x}) == pl) # @skip("OK") def test_t_2_action(self): es4 = eisenstein_series_degree2(4, 10) es6 = eisenstein_series_degree2(6, 10) F10 = rankin_cohen_pair_sym(2, es4, es6) ev2 = -24 * (1 + 2 ** 8) prec5 = PrecisionDeg2(5) self.assertTrue( all([F10.hecke_operator(2, t) == ev2 * F10[t] for t in prec5])) # @skip("OK") def test_differential_operator(self): def load_cache(f): return SymWtModFmElt.load_from(opath.join( data_dir("vector_valued_differential"), f)) p_e4_e4 = load_cache("pair_sym4_e4_e4_prec7.sobj") p_e4_x10 = load_cache("pair_sym4_e4_x10_prec7.sobj") p_det2_e4_e6 = load_cache("pair_sym4_det2_e4_e6_prec7.sobj") t_det_sym4_e4_e4_e6 = load_cache( "triple_det_sym4_e4_e4_e6_prec10.sobj") t_det_sym4_e4_e6_x12 = load_cache( "triple_det_sym4_e4_e6_x12_prec10.sobj") # prec 7 es4, es6, x10, x12, _ = degree2_modular_forms_ring_level1_gens(7) self.assertEqual(rankin_cohen_pair_sym(4, es4, es4), p_e4_e4) self.assertEqual(rankin_cohen_pair_sym(4, es4, x10), p_e4_x10) self.assertEqual(rankin_cohen_pair_det2_sym(4, es4, es6), p_det2_e4_e6) # prec 10 es4, es6, x10, x12, _ = degree2_modular_forms_ring_level1_gens(10) self.assertEqual(rankin_cohen_triple_det_sym4(es4, es4, es6), t_det_sym4_e4_e4_e6) self.assertEqual(rankin_cohen_triple_det_sym4(es4, es6, x12), t_det_sym4_e4_e6_x12) # @skip("OK") def test_det_sym2_odd(self): prec = 7 es4, es6, x10, x12, x35 = degree2_modular_forms_ring_level1_gens(prec) f21 = rankin_cohen_triple_det_sym2(es4, es6, x10) f23 = rankin_cohen_triple_det_sym2(es4, es6, x12) f27 = rankin_cohen_triple_det_sym2(es4, x10, x12) fs = [f21, f23, f27] det_form = det([f.forms for f in fs]) det_form = det_form[(4, -2, 6)] ** (-1) * det_form self.assertEqual(det_form, es4 * x35 ** 2) # @skip("OK") # def test_module_of_wt_sym_2_4(self): # for k, j in [(27, 2), (29, 2), (18, 2), (20, 2), # (12, 4), (14, 4), (19, 4), (21, 4)]: # M = vvld_smfs(j, k, k//10 + 3) # self.assertTrue(M.dimension() > 1) # self.assertEqual(len(M.linearly_indep_tuples()), # M.dimension()) # @skip("OK") def test_vecor_valued_klingen(self): lst = [(18, 2), (20, 2), (12, 4), (14, 4)] R = PolynomialRing(QQ, names="x") x = R.gens()[0] def euler_factor_at_2(f): wt = f.weight() return 1 - f[2] * x + 2 ** (wt - 1) * x ** 2 for k, j in lst: M = vvld_smfs(j, k, 4) S = CuspForms(1, j + k) f = S.basis()[0] f = f * f[1] ** (-1) pl = euler_factor_at_2(f) lam = (1 + 2 ** (k - 2)) * f[2] F = M.eigenform_with_eigenvalue_t2(lam) self.assertEqual(R(F.euler_factor_of_spinor_l(2)), pl * pl.subs({x: 2 ** (k - 2) * x})) # @skip("OK") def test_vector_valued_rankin_cohen(self): prec = 5 M4_10 = vvld_smfs(4, 10, prec) f4_10 = M4_10.basis()[0] f4_15 = vvld_smfs(4, 15, prec).basis()[0] e4 = eisenstein_series_degree2(4, prec) g4_15 = vector_valued_rankin_cohen(e4, f4_10) t = ((1, 1, 1), 0) self.assertEqual(f4_15 * g4_15[t], g4_15 * f4_15[t]) es4, es6, _, _, _ = degree2_modular_forms_ring_level1_gens(5) f = es6 x5 = x5__with_prec(5) f_even_sym2 = rankin_cohen_pair_sym(2, f, x5) f_odd_sym2 = vector_valued_rankin_cohen(es4 * x5, f_even_sym2) a = f_odd_sym2[(1, 0, 2)].vec[1] g_sym2_21 = vvld_smfs(2, 21, 4).basis()[0] b = g_sym2_21[(1, 0, 2)].vec[1] self.assertEqual(f_odd_sym2 * b, g_sym2_21 * a) def test_vecor_valued_misc(self): prec = 5 M = vvld_smfs(2, 20, prec) m = matrix([M._to_vector(f).list() for f in M.basis()]) self.assertEqual(m, identity_matrix(QQ, M.dimension())) suite = unittest.TestLoader().loadTestsFromTestCase(TestVectorValued) unittest.TextTestRunner(verbosity=2).run(suite)
5,627
35.545455
78
py
degree2
degree2-master/tests/__init__.py
0
0
0
py
degree2
degree2-master/tests/test_pullback_se_vector_valued.py
# -*- coding: utf-8 -*- import unittest from degree2.diff_operator_pullback_vector_valued import ( bracket_power, ad_bracket, _Z_U_ring, _diff_z_exp, fc_of_pullback_of_diff_eisen, _U_ring, sqcap_mul, D_tilde_nu, algebraic_part_of_standard_l, _pullback_vector, _u3_u4_nonzero) from sage.all import (random_matrix, QQ, binomial, identity_matrix, exp, random_vector, symbolic_expression, ZZ, derivative, block_matrix, matrix, cached_function) from degree2.vector_valued_smfs import vector_valued_siegel_modular_forms as vvsmf from degree2.scalar_valued_smfs import x12_with_prec, x35_with_prec from unittest import skip from ..siegel_series.tests.utils import random_even_symm_mat from degree2.all import CuspFormsDegree2 from degree2.standard_l_scalar_valued import tpl_to_half_int_mat, G_poly @cached_function def _wt10_13_space_forms(): M = vvsmf(10, 13, prec=4) f = M.eigenform_with_eigenvalue_t2(QQ(84480)) g = M.eigenform_with_eigenvalue_t2(QQ(-52800)) return (M, f, g) class TestPullBackVectorValued(unittest.TestCase): @skip("OK") def test_bracket_power_ad_bracket(self): for n in range(2, 6): for _ in range(1000): m = random_matrix(QQ, n) self.assertEqual(bracket_power(m, n)[0, 0], m.det()) for p in range(n + 1): self.assertEqual(ad_bracket(m, p) * bracket_power(m, p), m.det() * identity_matrix(QQ, binomial(n, p))) @skip("OK") def test_diff_z_exp(self): z11 = symbolic_expression("z11") for _ in range(500): n = random_vector(QQ, 1)[0] m = random_vector(ZZ, 1)[0].abs() r_pol = sum(a * b for a, b in zip(random_vector(QQ, 5), (z11 ** a for a in range(5)))) pol_exp = r_pol * exp(n * z11) r_pol_diff_se = ( derivative(pol_exp, z11, m) / exp(n * z11)).canonicalize_radical() r_pol_diff_se = _Z_U_ring(r_pol_diff_se) r_pol_diff = _diff_z_exp( (m, 0, 0, 0), _Z_U_ring(r_pol), [n, 0, 0, 0], base_ring=_Z_U_ring) self.assertEqual(r_pol_diff, r_pol_diff_se) @skip("ok") def test_pullback_diff_eisen_sym2_wt14(self): M = vvsmf(2, 14, 5) u1, _ = _U_ring.gens() # cusp form f = M.eigenform_with_eigenvalue_t2(QQ(-19200)) f = f * f[(1, 1, 1), 0] ** (-1) A = tpl_to_half_int_mat((1, 1, 1)) D = A def fc_pb(a): return fc_of_pullback_of_diff_eisen(12, 14, 2, a, D, 1, 1) fc_111 = fc_pb(A) a0 = fc_111[u1 ** 2] def assert_eq(t): a = tpl_to_half_int_mat(t) self.assertEqual(f[t]._to_pol(), fc_pb(a) / a0) assert_eq((1, 1, 1)) assert_eq((2, 1, 3)) assert_eq((2, 0, 3)) @skip("ok") def test_pullback_diff_eisen_scalar_wt12(self): x12 = x12_with_prec(5) self.assert_pullback_scalar_valued(x12, (1, 1, 1), [(2, 1, 3), (2, 0, 2), (2, 2, 3)], 10) @skip("ok") def test_pullback_diff_eisen_scalar_wt12_diff4(self): x12 = x12_with_prec(5) self.assert_pullback_scalar_valued(x12, (1, 1, 1), [(2, 1, 3), (2, 0, 2), (2, 2, 3)], 8, verbose=True) @skip("ok") def test_pullback_diff_eisen_scalar_wt35(self): x35 = x35_with_prec(10) self.assert_pullback_scalar_valued(x35, (2, 1, 3), [(2, -1, 4), (3, -1, 4), (3, -2, 4)], 34, verbose=True) def assert_pullback_scalar_valued(self, f, t0, ts, l, verbose=False): D = tpl_to_half_int_mat(t0) f = f * f[t0] ** (-1) if verbose: print "Compute for %s" % (t0,) a = fc_of_pullback_of_diff_eisen( l, f.wt, 0, D, D, 1, 1, verbose=verbose) self.assertNotEqual(a, 0) for t in ts: if verbose: print "Checking for %s" % (t, ) A = tpl_to_half_int_mat(t) self.assertEqual(fc_of_pullback_of_diff_eisen(l, f.wt, 0, A, D, 1, 1, verbose=verbose), a * f[t]) @skip("Not ok") def test_14_identity(self): '''Test idenitity (14) in [Bö]. ''' n = 2 for _ in range(50): t1, t2, t3, t4 = random_t1234(n) z2 = random_matrix(QQ, n) for al in range(n + 1): for be in range(n + 1): if al + be <= n: p = n - al - be lhs = sqcap_mul(bracket_power(z2 * t4, al), sqcap_mul(identity_matrix(QQ, binomial(n, p)), bracket_power(z2 * t3, be), n, p, be) * ad_bracket(t1, p + be) * bracket_power(t2, p + be), n, al, p + be) lhs = lhs[0, 0] * QQ(2) ** (-p - 2 * be) rhs = delta_al_be(t1, t2, t4, z2, al, be) self.assertEqual(lhs, rhs) @skip("OK") def test_delta_p_q_expand(self): n = 3 for _ in range(50): t1, t2, t3, t4 = random_t1234(n) z2 = random_matrix(QQ, n) for q in range(n + 1): p = n - q lhs = sum(delta_al_be(t1, t2, t4, z2, q - be, be) * (-1) ** be * binomial(q, be) for be in range(q + 1)) rhs = sqcap_mul(bracket_power(t1 * z2, q) * bracket_power( t4 - QQ(1) / QQ(4) * t3 * t1 ** (-1) * t2, q), bracket_power(t2, p), n, q, p) rhs = rhs[0, 0] * QQ(2) ** (-p) self.assertEqual(lhs, rhs) @skip("ok") def test_diff_pol_value(self): for _ in range(50): t1, t2, _, t4 = random_t1234(2) for k, nu in [(10, 2), (10, 4), (12, 6)]: self.assertEqual(scalar_valued_diff_pol_1(k, nu, t1, t4, t2), scalar_valued_diff_pol_2(k, nu, t1, t4, t2)) @skip("OK") def test_pullback_lin_comb(self): M, f, g = _wt10_13_space_forms() t0 = f._none_zero_tpl() D = tpl_to_half_int_mat(t0) u3_val, u4_val, f_t0_pol_val = _u3_u4_nonzero(f, t0) vec = _pullback_vector(ZZ(6), D, u3_val, u4_val, M) / f_t0_pol_val a = algebraic_part_of_standard_l(f, ZZ(4), M) f_vec = M._to_vector(f) g_vec = M._to_vector(g) m = (matrix([f_vec, g_vec]).transpose()) ** (-1) self.assertEqual((m * vec)[0], a) @skip("ok") def test_pullback_lin_comb1(self): M, f, _ = _wt10_13_space_forms() self.assert_pullback_lin_comb( M, f, 6, [(1, 1, 2), (2, 1, 2)], verbose=True) def assert_pullback_lin_comb(self, M, f, l, ts, verbose=False): j = f.sym_wt k = f.wt t0 = f._none_zero_tpl() D = tpl_to_half_int_mat(t0) u3_val, u4_val, _ = _u3_u4_nonzero(f, t0) vec = _pullback_vector(l, D, u3_val, u4_val, M) f_vec = M._to_form(vec) for t in ts: A = tpl_to_half_int_mat(t) fc = fc_of_pullback_of_diff_eisen(l, k, j, A, D, u3_val, u4_val) if verbose: print "Checking for t = %s" % (t, ) if j > 0: self.assertEqual(fc, f_vec[t]._to_pol()) else: self.assertEqual(fc, f_vec[t]) @skip("OK") def test_pullback_lin_comb_wt14_scalar(self): S14 = CuspFormsDegree2(14, prec=4) f = S14.basis()[0] self.assert_pullback_lin_comb(S14, f, 6, [(1, 1, 2), (2, 1, 2)]) @skip("OK") def test_pullback_linear_comb2(self): M, f, g = _wt10_13_space_forms() a = algebraic_part_of_standard_l(f, 10, M) b = algebraic_part_of_standard_l(g, 10, M) t0 = f._none_zero_tpl() u3_val, u4_val, _ = _u3_u4_nonzero(f, t0) u3, u4 = f[t0]._to_pol().parent().gens() af = f[t0]._to_pol().subs({u3: u3_val, u4: u4_val}) ag = g[t0]._to_pol().subs({u3: u3_val, u4: u4_val}) A = tpl_to_half_int_mat(t0) self.assertEqual((f[t0] * af * a + g[t0] * ag * b)._to_pol(), fc_of_pullback_of_diff_eisen(12, 13, 10, A, A, u3_val, u4_val)) def delta_al_be(t1, t2, t4, z2, al, be): n = z2.ncols() p = n - al - be t3 = t2.transpose() res = sqcap_mul(bracket_power(t1 * z2, al + be) * sqcap_mul(bracket_power(t4, al), bracket_power(t3 * t1 ** (-1) * t2, be), n, al, be), bracket_power(t2, p), n, al + be, p) res = res[0, 0] * (QQ(2) ** (- p - 2 * be)) return res def random_t1234(n): t2 = random_matrix(QQ, n) t3 = t2.transpose() t1 = random_even_symm_mat(n).change_ring(QQ) t4 = random_even_symm_mat(n).change_ring(QQ) return [t1, t2, t3, t4] def scalar_valued_diff_pol_1(k, nu, A, D, R): mat = block_matrix([[A, R / ZZ(2)], [R.transpose() / ZZ(2), D]]) G = G_poly(k, nu) y1, y2, y3 = G.parent().gens() G_y1_y3 = G.subs({y2: A.det() * D.det()}) return G_y1_y3.subs({y1: R.det() / ZZ(4), y3: mat.det()}).constant_coefficient() def scalar_valued_diff_pol_2(k, nu, A, D, R): A0 = D0 = tpl_to_half_int_mat((1, 1, 1)) a0 = QQ( D_tilde_nu(k, nu, QQ(1), [ZZ(0)] * 4, A=A0, D=D0).constant_coefficient()) b0 = QQ(scalar_valued_diff_pol_1( k, nu, A0, D0, tpl_to_half_int_mat((0, 0, 0)))) a = b0 / a0 return QQ(D_tilde_nu(k, nu, QQ(1), R.list(), A=A, D=D).constant_coefficient()) * a suite = unittest.TestLoader().loadTestsFromTestCase(TestPullBackVectorValued) unittest.TextTestRunner(verbosity=2).run(suite)
10,101
38.155039
99
py
degree2
degree2-master/tests/test_all.py
import os import re # import imp from os.path import dirname, isfile, join pat = re.compile(".*test.+py$") this_dir = dirname(__file__) def test_module_names(): return [f for f in os.listdir(this_dir) if re.match(pat, f) and isfile(join(this_dir, f)) and join(this_dir, f) != __file__] from degree2.tests import (test_fc_mul_add, test_eigenforms, test_divide, test_save_load, test_interpolate, test_gens, misc_test, test_prec_class, test_vector_valued, test_const) # def import_tests(): # for f in test_module_names(): # mod_name = "degree2.tests." + f.split(".")[0] # pth = join(this_dir, f) # imp.load_source(mod_name, pth) # import_tests()
774
28.807692
76
py
degree2
degree2-master/tests/test_gens.py
# -*- coding: utf-8 -*- import unittest from degree2.scalar_valued_smfs import eisenstein_series_degree2, x10_with_prec, x12_with_prec,\ x35_with_prec from degree2.basic_operation import PrecisionDeg2 from .data_dir import load_from_data_dir fc_dct4 = load_from_data_dir("es4_fc_dct.sobj", "eigen_forms") fc_dct6 = load_from_data_dir("es6_fc_dct.sobj", "eigen_forms") fc_dct10 = load_from_data_dir("x10_fc_dct.sobj", "eigen_forms") fc_dct12 = load_from_data_dir("x12_fc_dct.sobj", "eigen_forms") fc_dct35 = load_from_data_dir("x35_fc_dct.sobj", "eigen_forms") global_prec = PrecisionDeg2([(34, -17, 51), (8, 35, 39), (12, 33, 27)]) class TestDeg2Gens(unittest.TestCase): def sub_dct(self, form, prec=PrecisionDeg2(10)): return {k: form[k] for k in prec} def test_es4(self): es4 = eisenstein_series_degree2(4, global_prec) self.assertTrue(self.sub_dct(es4) == fc_dct4) def test_es6(self): es6 = eisenstein_series_degree2(6, global_prec) self.assertTrue(self.sub_dct(es6) == fc_dct6) def test_x10(self): x10 = x10_with_prec(global_prec) self.assertTrue(self.sub_dct(x10) == fc_dct10) def test_x12(self): x12 = x12_with_prec(global_prec) self.assertTrue(self.sub_dct(x12) == fc_dct12) def test_x35(self): x35 = x35_with_prec(global_prec) self.assertTrue(self.sub_dct(x35) == fc_dct35) suite = unittest.TestLoader().loadTestsFromTestCase(TestDeg2Gens) unittest.TextTestRunner(verbosity=2).run(suite)
1,528
30.204082
96
py
degree2
degree2-master/tests/test_pullback_se_scalar_valued.py
import unittest from sage.all import random_prime, matrix, PolynomialRing, QQ, mul, vector from degree2.all import CuspFormsDegree2 from degree2.standard_l_scalar_valued import (epsilon_tilde_l_k_degree2, tpl_to_half_int_mat, first_elt_of_kern_of_vandermonde) from degree2.basic_operation import PrecisionDeg2 # from unittest import skip class TestPullBackScalarValued(unittest.TestCase): # @skip("OK") def test_pullback(self): S = CuspFormsDegree2(24, prec=5) l = 2 tpls = S.linearly_indep_tuples() A1 = tpl_to_half_int_mat((1, 1, 1)) pull_back_dct = {t: epsilon_tilde_l_k_degree2( l + 2, S.wt, A1, tpl_to_half_int_mat(t)) for t in tpls} pull_back_vec = S._to_vector(pull_back_dct) f = S._to_form(pull_back_vec) for t in PrecisionDeg2(3): self.assertEqual( f[t], epsilon_tilde_l_k_degree2(l + 2, S.wt, A1, tpl_to_half_int_mat(t))) def test_solution_of_vandermonde(self): def _assert(d): alphas = [] while len(set(alphas)) < d and all(a != 0 for a in alphas): alphas.append(random_prime(100000)) alphas = list(set(alphas)) A = matrix([[alpha ** i for alpha in alphas] for i in range(d)]) v = [random_prime(100000) for _ in range(d)] x = PolynomialRing(QQ, names="x").gens()[0] chpy = mul(x - alpha for alpha in alphas) self.assertEqual(first_elt_of_kern_of_vandermonde(chpy, alphas[0], v), (A ** (-1) * vector(v))[0]) for d in [1, 2, 3, 10, 20]: _assert(d) suite = unittest.TestLoader().loadTestsFromTestCase(TestPullBackScalarValued) unittest.TextTestRunner(verbosity=2).run(suite)
1,824
40.477273
93
py
SAFE
SAFE-master/safe.py
# SAFE TEAM # Copyright (C) 2019 Luca Massarelli, Giuseppe Antonio Di Luna, Fabio Petroni, Leonardo Querzoni, Roberto Baldoni from asm_embedding.FunctionAnalyzerRadare import RadareFunctionAnalyzer from argparse import ArgumentParser from asm_embedding.FunctionNormalizer import FunctionNormalizer from asm_embedding.InstructionsConverter import InstructionsConverter from neural_network.SAFEEmbedder import SAFEEmbedder from utils import utils class SAFE: def __init__(self, model): self.converter = InstructionsConverter("data/i2v/word2id.json") self.normalizer = FunctionNormalizer(max_instruction=150) self.embedder = SAFEEmbedder(model) self.embedder.loadmodel() self.embedder.get_tensor() def embedd_function(self, filename, address): analyzer = RadareFunctionAnalyzer(filename, use_symbol=False, depth=0) functions = analyzer.analyze() instructions_list = None for function in functions: if functions[function]['address'] == address: instructions_list = functions[function]['filtered_instructions'] break if instructions_list is None: print("Function not found") return None converted_instructions = self.converter.convert_to_ids(instructions_list) instructions, length = self.normalizer.normalize_functions([converted_instructions]) embedding = self.embedder.embedd(instructions, length) return embedding if __name__ == '__main__': utils.print_safe() parser = ArgumentParser(description="Safe Embedder") parser.add_argument("-m", "--model", help="Safe trained model to generate function embeddings") parser.add_argument("-i", "--input", help="Input executable that contains the function to embedd") parser.add_argument("-a", "--address", help="Hexadecimal address of the function to embedd") args = parser.parse_args() address = int(args.address, 16) safe = SAFE(args.model) embedding = safe.embedd_function(args.input, address) print(embedding[0])
2,105
35.947368
114
py
SAFE
SAFE-master/downloader.py
# SAFE TEAM # Copyright (C) 2019 Luca Massarelli, Giuseppe Antonio Di Luna, Fabio Petroni, Leonardo Querzoni, Roberto Baldoni import argparse import os import sys from subprocess import call class Downloader: def __init__(self): parser = argparse.ArgumentParser(description='SAFE downloader') parser.add_argument("-m", "--model", dest="model", help="Download the trained SAFE model for x86", action="store_true", required=False) parser.add_argument("-i2v", "--i2v", dest="i2v", help="Download the i2v dictionary and embedding matrix", action="store_true", required=False) parser.add_argument("-b", "--bundle", dest="bundle", help="Download all files necessary to run the model", action="store_true", required=False) parser.add_argument("-td", "--train_data", dest="train_data", help="Download the files necessary to train the model (It takes a lot of space!)", action="store_true", required=False) args = parser.parse_args() self.download_model = (args.model or args.bundle) self.download_i2v = (args.i2v or args.bundle) self.download_train = args.train_data if not (self.download_model or self.download_i2v or self.download_train): parser.print_help(sys.__stdout__) self.url_model = "https://drive.google.com/file/d/1Kwl8Jy-g9DXe1AUjUZDhJpjRlDkB4NBs/view?usp=sharing" self.url_i2v = "https://drive.google.com/file/d/1CqJVGYbLDEuJmJV6KH4Dzzhy-G12GjGP" self.url_train = ['https://drive.google.com/file/d/1sNahtLTfZY5cxPaYDUjqkPTK0naZ45SH/view?usp=sharing','https://drive.google.com/file/d/16D5AVDux_Q8pCVIyvaMuiL2cw2V6gtLc/view?usp=sharing','https://drive.google.com/file/d/1cBRda8fYdqHtzLwstViuwK6U5IVHad1N/view?usp=sharing'] self.train_name = ['AMD64ARMOpenSSL.tar.bz2','AMD64multipleCompilers.tar.bz2','AMD64PostgreSQL.tar.bz2'] self.base_path = "data" self.path_i2v = os.path.join(self.base_path, "") self.path_model = os.path.join(self.base_path, "") self.path_train_data = os.path.join(self.base_path, "") self.i2v_compress_name='i2v.tar.bz2' self.model_compress_name='model.tar.bz2' self.datasets_compress_name='safe.pb' @staticmethod def download_file(id,path): try: print("Downloading from "+ str(id) +" into "+str(path)) call(['./godown.pl',id,path]) except Exception as e: print("Error downloading file at url:" + str(id)) print(e) @staticmethod def decompress_file(file_src,file_path): try: call(['tar','-xvf',file_src,'-C',file_path]) except Exception as e: print("Error decompressing file:" + str(file_src)) print('you need tar command e b2zip support') print(e) def download(self): print('Making the godown.pl script executable, thanks:'+str('https://github.com/circulosmeos/gdown.pl')) call(['chmod', '+x','godown.pl']) print("SAFE --- downloading models") if self.download_i2v: print("Downloading i2v model.... in the folder data/i2v/") if not os.path.exists(self.path_i2v): os.makedirs(self.path_i2v) Downloader.download_file(self.url_i2v, os.path.join(self.path_i2v,self.i2v_compress_name)) print("Decompressing i2v model and placing in" + str(self.path_i2v)) Downloader.decompress_file(os.path.join(self.path_i2v,self.i2v_compress_name),self.path_i2v) if self.download_model: print("Downloading the SAFE model... in the folder data") if not os.path.exists(self.path_model): os.makedirs(self.path_model) Downloader.download_file(self.url_model, os.path.join(self.path_model,self.datasets_compress_name)) #print("Decompressing SAFE model and placing in" + str(self.path_model)) #Downloader.decompress_file(os.path.join(self.path_model,self.model_compress_name),self.path_model) if self.download_train: print("Downloading the train data.... in the folder data") if not os.path.exists(self.path_train_data): os.makedirs(self.path_train_data) for i,x in enumerate(self.url_train): print("Downloading dataset "+str(self.train_name[i])) Downloader.download_file(x, os.path.join(self.path_train_data,self.train_name[i])) #print("Decompressing the train data and placing in" + str(self.path_train_data)) #Downloader.decompress_file(os.path.join(self.path_train_data,self.datasets_compress_name),self.path_train_data) if __name__=='__main__': a=Downloader() a.download()
5,027
46.885714
281
py
SAFE
SAFE-master/__init__.py
0
0
0
py
SAFE
SAFE-master/dataset_creation/DatabaseFactory.py
# SAFE TEAM # Copyright (C) 2019 Luca Massarelli, Giuseppe Antonio Di Luna, Fabio Petroni, Leonardo Querzoni, Roberto Baldoni from asm_embedding.InstructionsConverter import InstructionsConverter from asm_embedding.FunctionAnalyzerRadare import RadareFunctionAnalyzer import json import multiprocessing from multiprocessing import Pool from multiprocessing.dummy import Pool as ThreadPool import os import random import signal import sqlite3 from tqdm import tqdm class DatabaseFactory: def __init__(self, db_name, root_path): self.db_name = db_name self.root_path = root_path @staticmethod def worker(item): DatabaseFactory.analyze_file(item) return 0 @staticmethod def extract_function(graph_analyzer): return graph_analyzer.extractAll() @staticmethod def insert_in_db(db_name, pool_sem, func, filename, function_name, instruction_converter): path = filename.split(os.sep) if len(path) < 4: return asm = func["asm"] instructions_list = func["filtered_instructions"] instruction_ids = json.dumps(instruction_converter.convert_to_ids(instructions_list)) pool_sem.acquire() conn = sqlite3.connect(db_name) cur = conn.cursor() cur.execute('''INSERT INTO functions VALUES (?,?,?,?,?,?,?,?)''', (None, # id path[-4], # project path[-3], # compiler path[-2], # optimization path[-1], # file_name function_name, # function_name asm, # asm len(instructions_list)) # num of instructions ) inserted_id = cur.lastrowid cur.execute('''INSERT INTO filtered_functions VALUES (?,?)''', (inserted_id, instruction_ids) ) conn.commit() conn.close() pool_sem.release() @staticmethod def analyze_file(item): global pool_sem os.setpgrp() filename = item[0] db = item[1] use_symbol = item[2] depth = item[3] instruction_converter = item[4] analyzer = RadareFunctionAnalyzer(filename, use_symbol, depth) p = ThreadPool(1) res = p.apply_async(analyzer.analyze) try: result = res.get(120) except multiprocessing.TimeoutError: print("Aborting due to timeout:" + str(filename)) print('Try to modify the timeout value in DatabaseFactory instruction result = res.get(TIMEOUT)') os.killpg(0, signal.SIGKILL) except Exception: print("Aborting due to error:" + str(filename)) os.killpg(0, signal.SIGKILL) for func in result: DatabaseFactory.insert_in_db(db, pool_sem, result[func], filename, func, instruction_converter) analyzer.close() return 0 # Create the db where data are stored def create_db(self): print('Database creation...') conn = sqlite3.connect(self.db_name) conn.execute(''' CREATE TABLE IF NOT EXISTS functions (id INTEGER PRIMARY KEY, project text, compiler text, optimization text, file_name text, function_name text, asm text, num_instructions INTEGER) ''') conn.execute('''CREATE TABLE IF NOT EXISTS filtered_functions (id INTEGER PRIMARY KEY, instructions_list text) ''') conn.commit() conn.close() # Scan the root directory to find all the file to analyze, # query also the db for already analyzed files. def scan_for_file(self, start): file_list = [] # Scan recursively all the subdirectory directories = os.listdir(start) for item in directories: item = os.path.join(start,item) if os.path.isdir(item): file_list.extend(self.scan_for_file(item + os.sep)) elif os.path.isfile(item) and item.endswith('.o'): file_list.append(item) return file_list # Looks for already existing files in the database # It returns a list of files that are not in the database def remove_override(self, file_list): conn = sqlite3.connect(self.db_name) cur = conn.cursor() q = cur.execute('''SELECT project, compiler, optimization, file_name FROM functions''') names = q.fetchall() names = [os.path.join(self.root_path, n[0], n[1], n[2], n[3]) for n in names] names = set(names) # If some files is already in the db remove it from the file list if len(names) > 0: print(str(len(names)) + ' Already in the database') cleaned_file_list = [] for f in file_list: if not(f in names): cleaned_file_list.append(f) return cleaned_file_list # root function to create the db def build_db(self, use_symbol, depth): global pool_sem pool_sem = multiprocessing.BoundedSemaphore(value=1) instruction_converter = InstructionsConverter("data/i2v/word2id.json") self.create_db() file_list = self.scan_for_file(self.root_path) print('Found ' + str(len(file_list)) + ' during the scan') file_list = self.remove_override(file_list) print('Find ' + str(len(file_list)) + ' files to analyze') random.shuffle(file_list) t_args = [(f, self.db_name, use_symbol, depth, instruction_converter) for f in file_list] # Start a parallel pool to analyze files p = Pool(processes=None, maxtasksperchild=20) for _ in tqdm(p.imap_unordered(DatabaseFactory.worker, t_args), total=len(file_list)): pass p.close() p.join()
6,785
38.684211
118
py
SAFE
SAFE-master/dataset_creation/ExperimentUtil.py
# SAFE TEAM # Copyright (C) 2019 Luca Massarelli, Giuseppe Antonio Di Luna, Fabio Petroni, Leonardo Querzoni, Roberto Baldoni import argparse from dataset_creation import DatabaseFactory, DataSplitter, FunctionsEmbedder from utils.utils import print_safe def debug_msg(): msg = "SAFE DATABASE UTILITY" msg += "-------------------------------------------------\n" msg += "This program is an utility to save data into an sqlite database with SAFE \n\n" msg += "There are three main command: \n" msg += "BUILD: It create a db with two tables: functions, filtered_functions. \n" msg += " In the first table there are all the functions extracted from the executable with their hex code.\n" msg += " In the second table functions are converted to i2v representation. \n" msg += "SPLIT: Data are splitted into train validation and test set. " \ " Then it generate the pairs for the training of the network.\n" msg += "EMBEDD: Generate the embeddings of each function in the database using a trained SAFE model\n\n" msg += "If you want to train the network use build + split" msg += "If you want to create a knowledge base for the binary code search engine use build + embedd" msg += "This program has been written by the SAFE team.\n" msg += "-------------------------------------------------" return msg def build_configuration(db_name, root_dir, use_symbols, callee_depth): msg = "Database creation options: \n" msg += " - Database Name: {} \n".format(db_name) msg += " - Root dir: {} \n".format(root_dir) msg += " - Use symbols: {} \n".format(use_symbols) msg += " - Callee depth: {} \n".format(callee_depth) return msg def split_configuration(db_name, val_split, test_split, epochs): msg = "Splitting options: \n" msg += " - Database Name: {} \n".format(db_name) msg += " - Validation Size: {} \n".format(val_split) msg += " - Test Size: {} \n".format(test_split) msg += " - Epochs: {} \n".format(epochs) return msg def embedd_configuration(db_name, model, batch_size, max_instruction, embeddings_table): msg = "Embedding options: \n" msg += " - Database Name: {} \n".format(db_name) msg += " - Model: {} \n".format(model) msg += " - Batch Size: {} \n".format(batch_size) msg += " - Max Instruction per function: {} \n".format(max_instruction) msg += " - Table for saving embeddings: {}.".format(embeddings_table) return msg if __name__ == '__main__': print_safe() parser = argparse.ArgumentParser(description=debug_msg) parser.add_argument("-db", "--db", help="Name of the database to create", required=True) parser.add_argument("-b", "--build", help="Build db disassebling executables", action="store_true") parser.add_argument("-s", "--split", help="Perform data splitting for training", action="store_true") parser.add_argument("-e", "--embed", help="Compute functions embedding", action="store_true") parser.add_argument("-dir", "--dir", help="Root path of the directory to scan") parser.add_argument("-sym", "--symbols", help="Use it if you want to use symbols", action="store_true") parser.add_argument("-dep", "--depth", help="Recursive depth for analysis", default=0, type=int) parser.add_argument("-test", "--test_size", help="Test set size [0-1]", type=float, default=0.2) parser.add_argument("-val", "--val_size", help="Validation set size [0-1]", type=float, default=0.2) parser.add_argument("-epo", "--epochs", help="# Epochs to generate pairs for", type=int, default=25) parser.add_argument("-mod", "--model", help="Model for embedding generation") parser.add_argument("-bat", "--batch_size", help="Batch size for function embeddings", type=int, default=500) parser.add_argument("-max", "--max_instruction", help="Maximum instruction per function", type=int, default=150) parser.add_argument("-etb", "--embeddings_table", help="Name for the table that contains embeddings", default="safe_embeddings") try: args = parser.parse_args() except: parser.print_help() print(debug_msg()) exit(0) if args.build: print("Disassemblying files and creating dataset") print(build_configuration(args.db, args.dir, args.symbols, args.depth)) factory = DatabaseFactory.DatabaseFactory(args.db, args.dir) factory.build_db(args.symbols, args.depth) if args.split: print("Splitting data and generating epoch pairs") print(split_configuration(args.db, args.val_size, args.test_size, args.epochs)) splitter = DataSplitter.DataSplitter(args.db) splitter.split_data(args.val_size, args.test_size) splitter.create_pairs(args.epochs) if args.embed: print("Computing embeddings for function in db") print(embedd_configuration(args.db, args.model, args.batch_size, args.max_instruction, args.embeddings_table)) embedder = FunctionsEmbedder.FunctionsEmbedder(args.model, args.batch_size, args.max_instruction) embedder.compute_and_save_embeddings_from_db(args.db, args.embeddings_table) exit(0)
5,282
47.916667
120
py
SAFE
SAFE-master/dataset_creation/DataSplitter.py
# SAFE TEAM # Copyright (C) 2019 Luca Massarelli, Giuseppe Antonio Di Luna, Fabio Petroni, Leonardo Querzoni, Roberto Baldoni import json import random import sqlite3 from tqdm import tqdm class DataSplitter: def __init__(self, db_name): self.db_name = db_name def create_pair_table(self, table_name): conn = sqlite3.connect(self.db_name) c = conn.cursor() c.executescript("DROP TABLE IF EXISTS {} ".format(table_name)) c.execute("CREATE TABLE {} (id INTEGER PRIMARY KEY, true_pair TEXT, false_pair TEXT)".format(table_name)) conn.commit() conn.close() def get_ids(self, set_type): conn = sqlite3.connect(self.db_name) cur = conn.cursor() q = cur.execute("SELECT id FROM {}".format(set_type)) ids = q.fetchall() conn.close() return ids @staticmethod def select_similar_cfg(id, provenance, ids, cursor): q1 = cursor.execute('SELECT id FROM functions WHERE project=? AND file_name=? and function_name=?', provenance) candidates = [i[0] for i in q1.fetchall() if (i[0] != id and i[0] in ids)] if len(candidates) == 0: return None id_similar = random.choice(candidates) return id_similar @staticmethod def select_dissimilar_cfg(ids, provenance, cursor): while True: id_dissimilar = random.choice(ids) q2 = cursor.execute('SELECT project, file_name, function_name FROM functions WHERE id=?', id_dissimilar) res = q2.fetchone() if res != provenance: break return id_dissimilar def create_epoch_pairs(self, epoch_number, pairs_table,id_table): random.seed = epoch_number conn = sqlite3.connect(self.db_name) cur = conn.cursor() ids = cur.execute("SELECT id FROM "+id_table).fetchall() id_set=set(ids) true_pair = [] false_pair = [] for my_id in tqdm(ids): q = cur.execute('SELECT project, file_name, function_name FROM functions WHERE id =?', my_id) cfg_0_provenance = q.fetchone() id_sim = DataSplitter.select_similar_cfg(my_id, cfg_0_provenance, id_set, cur) id_dissim = DataSplitter.select_dissimilar_cfg(ids, cfg_0_provenance, cur) if id_sim is not None and id_dissim is not None: true_pair.append((my_id, id_sim)) false_pair.append((my_id, id_dissim)) true_pair = str(json.dumps(true_pair)) false_pair = str(json.dumps(false_pair)) cur.execute("INSERT INTO {} VALUES (?,?,?)".format(pairs_table), (epoch_number, true_pair, false_pair)) conn.commit() conn.close() def create_pairs(self, total_epochs): self.create_pair_table('train_pairs') self.create_pair_table('validation_pairs') self.create_pair_table('test_pairs') for i in range(0, total_epochs): print("Creating training pairs for epoch {} of {}".format(i, total_epochs)) self.create_epoch_pairs(i, 'train_pairs','train') print("Creating validation pairs") self.create_epoch_pairs(0, 'validation_pairs','validation') print("Creating test pairs") self.create_epoch_pairs(0, "test_pairs",'test') @staticmethod def prepare_set(data_to_include, table_name, file_list, cur): i = 0 while i < data_to_include and len(file_list) > 0: choice = random.choice(file_list) file_list.remove(choice) q = cur.execute("SELECT id FROM functions where project=? AND file_name=?", choice) data = q.fetchall() cur.executemany("INSERT INTO {} VALUES (?)".format(table_name), data) i += len(data) return file_list, i def split_data(self, validation_dim, test_dim): random.seed = 12345 conn = sqlite3.connect(self.db_name) c = conn.cursor() q = c.execute('''SELECT project, file_name FROM functions ''') data = q.fetchall() conn.commit() num_data = len(data) num_test = int(num_data * test_dim) num_validation = int(num_data * validation_dim) filename = list(set(data)) c.execute("DROP TABLE IF EXISTS train") c.execute("DROP TABLE IF EXISTS test") c.execute("DROP TABLE IF EXISTS validation") c.execute("CREATE TABLE IF NOT EXISTS train (id INTEGER PRIMARY KEY)") c.execute("CREATE TABLE IF NOT EXISTS validation (id INTEGER PRIMARY KEY)") c.execute("CREATE TABLE IF NOT EXISTS test (id INTEGER PRIMARY KEY)") c.execute('''CREATE INDEX IF NOT EXISTS my_index ON functions(project, file_name, function_name)''') c.execute('''CREATE INDEX IF NOT EXISTS my_index_2 ON functions(project, file_name)''') filename, test_num = DataSplitter.prepare_set(num_test, 'test', filename, conn.cursor()) conn.commit() assert len(filename) > 0 filename, val_num = self.prepare_set(num_validation, 'validation', filename, conn.cursor()) conn.commit() assert len(filename) > 0 _, train_num = self.prepare_set(num_data - num_test - num_validation, 'train', filename, conn.cursor()) conn.commit() print("Train Size: {}".format(train_num)) print("Validation Size: {}".format(val_num)) print("Test Size: {}".format(test_num))
5,467
37.237762
119
py
SAFE
SAFE-master/dataset_creation/__init__.py
0
0
0
py
SAFE
SAFE-master/dataset_creation/convertDB.py
# SAFE TEAM # Copyright (C) 2019 Luca Massarelli, Giuseppe Antonio Di Luna, Fabio Petroni, Leonardo Querzoni, Roberto Baldoni import sqlite3 import json from networkx.readwrite import json_graph import logging from tqdm import tqdm from asm_embedding.InstructionsConverter import InstructionsConverter # Create the db where data are stored def create_db(db_name): print('Database creation...') conn = sqlite3.connect(db_name) conn.execute(''' CREATE TABLE IF NOT EXISTS functions (id INTEGER PRIMARY KEY, project text, compiler text, optimization text, file_name text, function_name text, asm text, num_instructions INTEGER) ''') conn.execute('''CREATE TABLE IF NOT EXISTS filtered_functions (id INTEGER PRIMARY KEY, instructions_list text) ''') conn.commit() conn.close() def reverse_graph(cfg, lstm_cfg): instructions = [] asm = "" node_addr = list(cfg.nodes()) node_addr.sort() nodes = cfg.nodes(data=True) lstm_nodes = lstm_cfg.nodes(data=True) for addr in node_addr: a = nodes[addr]["asm"] if a is not None: asm += a instructions.extend(lstm_nodes[addr]['features']) return instructions, asm def copy_split(old_cur, new_cur, table): q = old_cur.execute("SELECT id FROM {}".format(table)) iii = q.fetchall() print("Copying table {}".format(table)) for ii in tqdm(iii): new_cur.execute("INSERT INTO {} VALUES (?)".format(table), ii) def copy_table(old_cur, new_cur, table_old, table_new): q = old_cur.execute("SELECT * FROM {}".format(table_old)) iii = q.fetchall() print("Copying table {} to {}".format(table_old, table_new)) for ii in tqdm(iii): new_cur.execute("INSERT INTO {} VALUES (?,?,?)".format(table_new), ii) logger = logging.getLogger() logger.setLevel(logging.DEBUG) db = "/home/lucamassarelli/binary_similarity_data/databases/big_dataset_X86.db" new_db = "/home/lucamassarelli/binary_similarity_data/new_databases/big_dataset_X86_new.db" create_db(new_db) conn_old = sqlite3.connect(db) conn_new = sqlite3.connect(new_db) cur_old = conn_old.cursor() cur_new = conn_new.cursor() q = cur_old.execute("SELECT id FROM functions") ids = q.fetchall() converter = InstructionsConverter() for my_id in tqdm(ids): q0 = cur_old.execute("SELECT id, project, compiler, optimization, file_name, function_name, cfg FROM functions WHERE id=?", my_id) meta = q.fetchone() q1 = cur_old.execute("SELECT lstm_cfg FROM lstm_cfg WHERE id=?", my_id) cfg = json_graph.adjacency_graph(json.loads(meta[6])) lstm_cfg = json_graph.adjacency_graph(json.loads(q1.fetchone()[0])) instructions, asm = reverse_graph(cfg, lstm_cfg) values = meta[0:6] + (asm, len(instructions)) q_n = cur_new.execute("INSERT INTO functions VALUES (?,?,?,?,?,?,?,?)", values) converted_instruction = json.dumps(converter.convert_to_ids(instructions)) q_n = cur_new.execute("INSERT INTO filtered_functions VALUES (?,?)", (my_id[0], converted_instruction)) conn_new.commit() cur_new.execute("CREATE TABLE train (id INTEGER PRIMARY KEY) ") cur_new.execute("CREATE TABLE validation (id INTEGER PRIMARY KEY) ") cur_new.execute("CREATE TABLE test (id INTEGER PRIMARY KEY) ") conn_new.commit() copy_split(cur_old, cur_new, "train") conn_new.commit() copy_split(cur_old, cur_new, "validation") conn_new.commit() copy_split(cur_old, cur_new, "test") conn_new.commit() cur_new.execute("CREATE TABLE train_pairs (id INTEGER PRIMARY KEY, true_pair TEXT, false_pair TEXT)") cur_new.execute("CREATE TABLE validation_pairs (id INTEGER PRIMARY KEY, true_pair TEXT, false_pair TEXT)") cur_new.execute("CREATE TABLE test_pairs (id INTEGER PRIMARY KEY, true_pair TEXT, false_pair TEXT)") conn_new.commit() copy_table(cur_old, cur_new, "train_couples", "train_pairs") conn_new.commit() copy_table(cur_old, cur_new, "validation_couples", "validation_pairs") conn_new.commit() copy_table(cur_old, cur_new, "test_couples", "test_pairs") conn_new.commit() conn_new.close()
4,558
36.368852
134
py
SAFE
SAFE-master/dataset_creation/FunctionsEmbedder.py
# SAFE TEAM # Copyright (C) 2019 Luca Massarelli, Giuseppe Antonio Di Luna, Fabio Petroni, Leonardo Querzoni, Roberto Baldoni from asm_embedding.FunctionNormalizer import FunctionNormalizer import json from neural_network.SAFEEmbedder import SAFEEmbedder import numpy as np import sqlite3 from tqdm import tqdm class FunctionsEmbedder: def __init__(self, model, batch_size, max_instruction): self.batch_size = batch_size self.normalizer = FunctionNormalizer(max_instruction) self.safe = SAFEEmbedder(model) self.safe.loadmodel() self.safe.get_tensor() def compute_embeddings(self, functions): functions, lenghts = self.normalizer.normalize_functions(functions) embeddings = self.safe.embedd(functions, lenghts) return embeddings @staticmethod def create_table(db_name, table_name): conn = sqlite3.connect(db_name) c = conn.cursor() c.execute("CREATE TABLE IF NOT EXISTS {} (id INTEGER PRIMARY KEY, {} TEXT)".format(table_name, table_name)) conn.commit() conn.close() def compute_and_save_embeddings_from_db(self, db_name, table_name): FunctionsEmbedder.create_table(db_name, table_name) conn = sqlite3.connect(db_name) cur = conn.cursor() q = cur.execute("SELECT id FROM functions WHERE id not in (SELECT id from {})".format(table_name)) ids = q.fetchall() for i in tqdm(range(0, len(ids), self.batch_size)): functions = [] batch_ids = ids[i:i+self.batch_size] for my_id in batch_ids: q = cur.execute("SELECT instructions_list FROM filtered_functions where id=?", my_id) functions.append(json.loads(q.fetchone()[0])) embeddings = self.compute_embeddings(functions) for l, id in enumerate(batch_ids): cur.execute("INSERT INTO {} VALUES (?,?)".format(table_name), (id[0], np.array2string(embeddings[l]))) conn.commit()
2,021
37.884615
118
py
SAFE
SAFE-master/neural_network/SAFE_model.py
# SAFE TEAM # distributed under license: GPL 3 License http://www.gnu.org/licenses/ from SiameseSAFE import SiameseSelfAttentive from PairFactory import PairFactory import tensorflow as tf import random import sys, os import numpy as np from sklearn import metrics import matplotlib import tqdm matplotlib.use('Agg') import matplotlib.pyplot as plt class modelSAFE: def __init__(self, flags, embedding_matrix): self.embedding_size = flags.embedding_size self.num_epochs = flags.num_epochs self.learning_rate = flags.learning_rate self.l2_reg_lambda = flags.l2_reg_lambda self.num_checkpoints = flags.num_checkpoints self.logdir = flags.logdir self.logger = flags.logger self.seed = flags.seed self.batch_size = flags.batch_size self.max_instructions = flags.max_instructions self.embeddings_matrix = embedding_matrix self.session = None self.db_name = flags.db_name self.trainable_embeddings = flags.trainable_embeddings self.cross_val = flags.cross_val self.attention_hops = flags.attention_hops self.attention_depth = flags.attention_depth self.dense_layer_size = flags.dense_layer_size self.rnn_state_size = flags.rnn_state_size random.seed(self.seed) np.random.seed(self.seed) print(self.db_name) # loads an usable model # returns the network and a tensorflow session in which the network can be used. @staticmethod def load_model(path): session = tf.Session() checkpoint_dir = os.path.abspath(os.path.join(path, "checkpoints")) saver = tf.train.import_meta_graph(os.path.join(checkpoint_dir, "model.meta")) tf.global_variables_initializer().run(session=session) saver.restore(session, os.path.join(checkpoint_dir, "model")) network = SiameseSelfAttentive( rnn_state_size=1, learning_rate=1, l2_reg_lambda=1, batch_size=1, max_instructions=1, embedding_matrix=1, trainable_embeddings=1, attention_hops=1, attention_depth=1, dense_layer_size=1, embedding_size=1 ) network.restore_model(session) return session, network def create_network(self): self.network = SiameseSelfAttentive( rnn_state_size=self.rnn_state_size, learning_rate=self.learning_rate, l2_reg_lambda=self.l2_reg_lambda, batch_size=self.batch_size, max_instructions=self.max_instructions, embedding_matrix=self.embeddings_matrix, trainable_embeddings=self.trainable_embeddings, attention_hops=self.attention_hops, attention_depth=self.attention_depth, dense_layer_size=self.dense_layer_size, embedding_size=self.embedding_size ) def train(self): tf.reset_default_graph() with tf.Graph().as_default() as g: session_conf = tf.ConfigProto( allow_soft_placement=True, log_device_placement=False ) sess = tf.Session(config=session_conf) # Sets the graph-level random seed. tf.set_random_seed(self.seed) self.create_network() self.network.generate_new_safe() # --tbrtr # Initialize all variables sess.run(tf.global_variables_initializer()) # TensorBoard # Summaries for loss and accuracy loss_summary = tf.summary.scalar("loss", self.network.loss) # Train Summaries train_summary_op = tf.summary.merge([loss_summary]) train_summary_dir = os.path.join(self.logdir, "summaries", "train") train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph) # Validation summaries val_summary_op = tf.summary.merge([loss_summary]) val_summary_dir = os.path.join(self.logdir, "summaries", "validation") val_summary_writer = tf.summary.FileWriter(val_summary_dir, sess.graph) # Test summaries test_summary_op = tf.summary.merge([loss_summary]) test_summary_dir = os.path.join(self.logdir, "summaries", "test") test_summary_writer = tf.summary.FileWriter(test_summary_dir, sess.graph) # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it checkpoint_dir = os.path.abspath(os.path.join(self.logdir, "checkpoints")) checkpoint_prefix = os.path.join(checkpoint_dir, "model") if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) saver = tf.train.Saver(tf.global_variables(), max_to_keep=self.num_checkpoints) best_val_auc = 0 stat_file = open(str(self.logdir) + "/epoch_stats.tsv", "w") stat_file.write("#epoch\ttrain_loss\tval_loss\tval_auc\ttest_loss\ttest_auc\n") p_train = PairFactory(self.db_name, 'train_pairs', self.batch_size, self.max_instructions) p_validation = PairFactory(self.db_name, 'validation_pairs', self.batch_size, self.max_instructions, False) p_test = PairFactory(self.db_name, 'test_pairs', self.batch_size, self.max_instructions, False) step = 0 for epoch in range(0, self.num_epochs): epoch_msg = "" epoch_msg += " epoch: {}\n".format(epoch) epoch_loss = 0 # ----------------------# # TRAIN # # ----------------------# n_batch = 0 for function1_batch, function2_batch, len1_batch, len2_batch, y_batch in tqdm.tqdm( p_train.async_chunker(epoch % 25), total=p_train.num_batches): feed_dict = { self.network.x_1: function1_batch, self.network.x_2: function2_batch, self.network.lengths_1: len1_batch, self.network.lengths_2: len2_batch, self.network.y: y_batch, } summaries, _, loss, norms, cs = sess.run( [train_summary_op, self.network.train_step, self.network.loss, self.network.norms, self.network.cos_similarity], feed_dict=feed_dict) train_summary_writer.add_summary(summaries, step) epoch_loss += loss * p_train.batch_dim # ??? step += 1 # recap epoch epoch_loss /= p_train.num_pairs epoch_msg += "\ttrain_loss: {}\n".format(epoch_loss) # ----------------------# # VALIDATION # # ----------------------# val_loss = 0 epoch_msg += "\n" val_y = [] val_pred = [] for function1_batch, function2_batch, len1_batch, len2_batch, y_batch in tqdm.tqdm( p_validation.async_chunker(0), total=p_validation.num_batches): feed_dict = { self.network.x_1: function1_batch, self.network.x_2: function2_batch, self.network.lengths_1: len1_batch, self.network.lengths_2: len2_batch, self.network.y: y_batch, } summaries, loss, similarities = sess.run( [val_summary_op, self.network.loss, self.network.cos_similarity], feed_dict=feed_dict) val_loss += loss * p_validation.batch_dim val_summary_writer.add_summary(summaries, step) val_y.extend(y_batch) val_pred.extend(similarities.tolist()) val_loss /= p_validation.num_pairs if np.isnan(val_pred).any(): print("Validation: carefull there is NaN in some ouput values, I am fixing it but be aware...") val_pred = np.nan_to_num(val_pred) val_fpr, val_tpr, val_thresholds = metrics.roc_curve(val_y, val_pred, pos_label=1) val_auc = metrics.auc(val_fpr, val_tpr) epoch_msg += "\tval_loss : {}\n\tval_auc : {}\n".format(val_loss, val_auc) sys.stdout.write( "\r\tepoch {} / {}, loss {:g}, val_auc {:g}, norms {}".format(epoch, self.num_epochs, epoch_loss, val_auc, norms)) sys.stdout.flush() # execute test only if validation auc increased test_loss = "-" test_auc = "-" # in case of cross validation we do not need to evaluate on a test split that is effectively missing if val_auc > best_val_auc and self.cross_val: # ##-- --## # best_val_auc = val_auc saver.save(sess, checkpoint_prefix) print("\nNEW BEST_VAL_AUC: {} !\n".format(best_val_auc)) # write ROC raw data with open(str(self.logdir) + "/best_val_roc.tsv", "w") as the_file: the_file.write("#thresholds\ttpr\tfpr\n") for t, tpr, fpr in zip(val_thresholds, val_tpr, val_fpr): the_file.write("{}\t{}\t{}\n".format(t, tpr, fpr)) # in case we are not cross validating we expect to have a test split. if val_auc > best_val_auc and not self.cross_val: best_val_auc = val_auc epoch_msg += "\tNEW BEST_VAL_AUC: {} !\n".format(best_val_auc) # save best model saver.save(sess, checkpoint_prefix) # ----------------------# # TEST # # ----------------------# # TEST test_loss = 0 epoch_msg += "\n" test_y = [] test_pred = [] for function1_batch, function2_batch, len1_batch, len2_batch, y_batch in tqdm.tqdm( p_test.async_chunker(0), total=p_test.num_batches): feed_dict = { self.network.x_1: function1_batch, self.network.x_2: function2_batch, self.network.lengths_1: len1_batch, self.network.lengths_2: len2_batch, self.network.y: y_batch, } summaries, loss, similarities = sess.run( [test_summary_op, self.network.loss, self.network.cos_similarity], feed_dict=feed_dict) test_loss += loss * p_test.batch_dim test_summary_writer.add_summary(summaries, step) test_y.extend(y_batch) test_pred.extend(similarities.tolist()) test_loss /= p_test.num_pairs if np.isnan(test_pred).any(): print("Test: carefull there is NaN in some ouput values, I am fixing it but be aware...") test_pred = np.nan_to_num(test_pred) test_fpr, test_tpr, test_thresholds = metrics.roc_curve(test_y, test_pred, pos_label=1) # write ROC raw data with open(str(self.logdir) + "/best_test_roc.tsv", "w") as the_file: the_file.write("#thresholds\ttpr\tfpr\n") for t, tpr, fpr in zip(test_thresholds, test_tpr, test_fpr): the_file.write("{}\t{}\t{}\n".format(t, tpr, fpr)) test_auc = metrics.auc(test_fpr, test_tpr) epoch_msg += "\ttest_loss : {}\n\ttest_auc : {}\n".format(test_loss, test_auc) fig = plt.figure() plt.title('Receiver Operating Characteristic') plt.plot(test_fpr, test_tpr, 'b', label='AUC = %0.2f' % test_auc) fig.savefig(str(self.logdir) + "/best_test_roc.png") print( "\nNEW BEST_VAL_AUC: {} !\n\ttest_loss : {}\n\ttest_auc : {}\n".format(best_val_auc, test_loss, test_auc)) plt.close(fig) stat_file.write( "{}\t{}\t{}\t{}\t{}\t{}\n".format(epoch, epoch_loss, val_loss, val_auc, test_loss, test_auc)) self.logger.info("\n{}\n".format(epoch_msg)) stat_file.close() sess.close() return best_val_auc
13,262
43.959322
119
py
SAFE
SAFE-master/neural_network/SiameseSAFE.py
import tensorflow as tf # SAFE TEAM # # # distributed under license: CC BY-NC-SA 4.0 (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode.txt) # # Siamese Self-Attentive Network for Binary Similarity: # # arXiv Nostro. # # based on the self attentive network:arXiv:1703.03130 Z. Lin at al. “A structured self-attentive sentence embedding'' # # Authors: SAFE team class SiameseSelfAttentive: def __init__(self, rnn_state_size, # Dimension of the RNN State learning_rate, # Learning rate l2_reg_lambda, batch_size, max_instructions, embedding_matrix, # Matrix containg the embeddings for each asm instruction trainable_embeddings, # if this value is True, the embeddings of the asm instruction are modified by the training. attention_hops, # attention hops parameter r of [1] attention_depth, # attention detph parameter d_a of [1] dense_layer_size, # parameter e of [1] embedding_size, # size of the final function embedding, in our test this is twice the rnn_state_size ): self.rnn_depth = 1 # if this value is modified then the RNN becames a multilayer network. In our tests we fix it to 1 feel free to be adventurous. self.learning_rate = learning_rate self.l2_reg_lambda = l2_reg_lambda self.rnn_state_size = rnn_state_size self.batch_size = batch_size self.max_instructions = max_instructions self.embedding_matrix = embedding_matrix self.trainable_embeddings = trainable_embeddings self.attention_hops = attention_hops self.attention_depth = attention_depth self.dense_layer_size = dense_layer_size self.embedding_size = embedding_size # self.generate_new_safe() def restore_model(self, old_session): graph = old_session.graph self.x_1 = graph.get_tensor_by_name("x_1:0") self.x_2 = graph.get_tensor_by_name("x_2:0") self.len_1 = graph.get_tensor_by_name("lengths_1:0") self.len_2 = graph.get_tensor_by_name("lengths_2:0") self.y = graph.get_tensor_by_name('y_:0') self.cos_similarity = graph.get_tensor_by_name("siamese_layer/cosSimilarity:0") self.loss = graph.get_tensor_by_name("Loss/loss:0") self.train_step = graph.get_operation_by_name("Train_Step/Adam") return def self_attentive_network(self, input_x, lengths): # each functions is a list of embeddings id (an id is an index in the embedding matrix) # with this we transform it in a list of embeddings vectors. embbedded_functions = tf.nn.embedding_lookup(self.instructions_embeddings_t, input_x) # We create the GRU RNN (output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(self.cell_fw, self.cell_bw, embbedded_functions, sequence_length=lengths, dtype=tf.float32, time_major=False) # We create the matrix H H = tf.concat([output_fw, output_bw], axis=2) # We do a tile to account for training batches ws1_tiled = tf.tile(tf.expand_dims(self.WS1, 0), [tf.shape(H)[0], 1, 1], name="WS1_tiled") ws2_tile = tf.tile(tf.expand_dims(self.WS2, 0), [tf.shape(H)[0], 1, 1], name="WS2_tiled") # we compute the matrix A self.A = tf.nn.softmax(tf.matmul(ws2_tile, tf.nn.tanh(tf.matmul(ws1_tiled, tf.transpose(H, perm=[0, 2, 1])))), name="Attention_Matrix") # embedding matrix M M = tf.identity(tf.matmul(self.A, H), name="Attention_Embedding") # we create the flattened version of M flattened_M = tf.reshape(M, [tf.shape(M)[0], self.attention_hops * self.rnn_state_size * 2]) return flattened_M def generate_new_safe(self): self.instructions_embeddings_t = tf.Variable(initial_value=tf.constant(self.embedding_matrix), trainable=self.trainable_embeddings, name="instructions_embeddings", dtype=tf.float32) self.x_1 = tf.placeholder(tf.int32, [None, self.max_instructions], name="x_1") # List of instructions for Function 1 self.lengths_1 = tf.placeholder(tf.int32, [None], name='lengths_1') # List of lengths for Function 1 # example x_1=[[mov,add,padding,padding],[mov,mov,mov,padding]] # lenghts_1=[2,3] self.x_2 = tf.placeholder(tf.int32, [None, self.max_instructions], name="x_2") # List of instructions for Function 2 self.lengths_2 = tf.placeholder(tf.int32, [None], name='lengths_2') # List of lengths for Function 2 self.y = tf.placeholder(tf.float32, [None], name='y_') # Real label of the pairs, +1 similar, -1 dissimilar. # Euclidean norms; p = 2 self.norms = [] # Keeping track of l2 regularization loss (optional) l2_loss = tf.constant(0.0) with tf.name_scope('parameters_Attention'): self.WS1 = tf.Variable(tf.truncated_normal([self.attention_depth, 2 * self.rnn_state_size], stddev=0.1), name="WS1") self.WS2 = tf.Variable(tf.truncated_normal([self.attention_hops, self.attention_depth], stddev=0.1), name="WS2") rnn_layers_fw = [tf.nn.rnn_cell.GRUCell(size) for size in ([self.rnn_state_size] * self.rnn_depth)] rnn_layers_bw = [tf.nn.rnn_cell.GRUCell(size) for size in ([self.rnn_state_size] * self.rnn_depth)] self.cell_fw = tf.nn.rnn_cell.MultiRNNCell(rnn_layers_fw) self.cell_bw = tf.nn.rnn_cell.MultiRNNCell(rnn_layers_bw) with tf.name_scope('Self-Attentive1'): self.function_1 = self.self_attentive_network(self.x_1, self.lengths_1) with tf.name_scope('Self-Attentive2'): self.function_2 = self.self_attentive_network(self.x_2, self.lengths_2) self.dense_1 = tf.nn.relu(tf.layers.dense(self.function_1, self.dense_layer_size)) self.dense_2 = tf.nn.relu(tf.layers.dense(self.function_2, self.dense_layer_size)) with tf.name_scope('Embedding1'): self.function_embedding_1 = tf.layers.dense(self.dense_1, self.embedding_size) with tf.name_scope('Embedding2'): self.function_embedding_2 = tf.layers.dense(self.dense_2, self.embedding_size) with tf.name_scope('siamese_layer'): self.cos_similarity = tf.reduce_sum(tf.multiply(self.function_embedding_1, self.function_embedding_2), axis=1, name="cosSimilarity") # CalculateMean cross-entropy loss with tf.name_scope("Loss"): A_square = tf.matmul(self.A, tf.transpose(self.A, perm=[0, 2, 1])) I = tf.eye(tf.shape(A_square)[1]) I_tiled = tf.tile(tf.expand_dims(I, 0), [tf.shape(A_square)[0], 1, 1], name="I_tiled") self.A_pen = tf.norm(A_square - I_tiled) self.loss = tf.reduce_sum(tf.squared_difference(self.cos_similarity, self.y), name="loss") self.regularized_loss = self.loss + self.l2_reg_lambda * l2_loss + self.A_pen # Train step with tf.name_scope("Train_Step"): self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.regularized_loss)
7,721
48.819355
155
py
SAFE
SAFE-master/neural_network/SAFEEmbedder.py
import tensorflow as tf # SAFE TEAM # distributed under license: GPL 3 License http://www.gnu.org/licenses/ class SAFEEmbedder: def __init__(self, model_file): self.model_file = model_file self.session = None self.x_1 = None self.adj_1 = None self.len_1 = None self.emb = None def loadmodel(self): with tf.gfile.GFile(self.model_file, "rb") as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) with tf.Graph().as_default() as graph: tf.import_graph_def(graph_def) sess = tf.Session(graph=graph) self.session = sess return sess def get_tensor(self): self.x_1 = self.session.graph.get_tensor_by_name("import/x_1:0") self.len_1 = self.session.graph.get_tensor_by_name("import/lengths_1:0") self.emb = tf.nn.l2_normalize(self.session.graph.get_tensor_by_name('import/Embedding1/dense/BiasAdd:0'), axis=1) def embedd(self, nodi_input, lengths_input): out_embedding= self.session.run(self.emb, feed_dict = { self.x_1: nodi_input, self.len_1: lengths_input}) return out_embedding
1,282
31.075
121
py
SAFE
SAFE-master/neural_network/parameters.py
# SAFE TEAM # distributed under license: GPL 3 License http://www.gnu.org/licenses/ import argparse import time import sys, os import logging # # Parameters File for the SAFE network. # # Authors: SAFE team def getLogger(logfile): logger = logging.getLogger(__name__) hdlr = logging.FileHandler(logfile) formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') hdlr.setFormatter(formatter) logger.addHandler(hdlr) logger.setLevel(logging.INFO) return logger, hdlr class Flags: def __init__(self): parser = argparse.ArgumentParser(description='SAFE') parser.add_argument("-o", "--output", dest="output_file", help="output directory for logging and models", required=False) parser.add_argument("-e", "--embedder", dest="embedder_folder", help="file with the embedding matrix and dictionary for asm instructions", required=False) parser.add_argument("-n", "--dbName", dest="db_name", help="Name of the database", required=False) parser.add_argument("-ld", "--load_dir", dest="load_dir", help="Load the model from directory load_dir", required=False) parser.add_argument("-r", "--random", help="if present the network use random embedder", default=False, action="store_true", dest="random_embedding", required=False) parser.add_argument("-te", "--trainable_embedding", help="if present the network consider the embedding as trainable", action="store_true", dest="trainable_embeddings", default=False) parser.add_argument("-cv", "--cross_val", help="if present the training is done with cross validiation", default=False, action="store_true", dest="cross_val") args = parser.parse_args() # mode = mean_field self.batch_size = 250 # minibatch size (-1 = whole dataset) self.num_epochs = 50 # number of epochs self.embedding_size = 100 # dimension of the function embedding self.learning_rate = 0.001 # init learning_rate self.l2_reg_lambda = 0 # 0.002 #0.002 # regularization coefficient self.num_checkpoints = 1 # max number of checkpoints self.out_dir = args.output_file # directory for logging self.rnn_state_size = 50 # dimesion of the rnn state self.db_name = args.db_name self.load_dir = str(args.load_dir) self.random_embedding = args.random_embedding self.trainable_embeddings = args.trainable_embeddings self.cross_val = args.cross_val self.cross_val_fold = 5 # ## ## RNN PARAMETERS, these parameters are only used for RNN model. # self.rnn_depth = 1 # depth of the rnn self.max_instructions = 150 # number of instructions ## ATTENTION PARAMETERS self.attention_hops = 10 self.attention_depth = 250 # RNN SINGLE PARAMETER self.dense_layer_size = 2000 self.seed = 2 # random seed # create logdir and logger self.reset_logdir() self.embedder_folder = args.embedder_folder def reset_logdir(self): # create logdir timestamp = str(int(time.time())) self.logdir = os.path.abspath(os.path.join(self.out_dir, "runs", timestamp)) os.makedirs(self.logdir, exist_ok=True) # create logger self.log_file = str(self.logdir) + '/console.log' self.logger, self.hdlr = getLogger(self.log_file) # create symlink for last_run sym_path_logdir = str(self.out_dir) + "/last_run" try: os.unlink(sym_path_logdir) except: pass try: os.symlink(self.logdir, sym_path_logdir) except: print("\nfailed to create symlink!\n") def close_log(self): self.hdlr.close() self.logger.removeHandler(self.hdlr) handlers = self.logger.handlers[:] for handler in handlers: handler.close() self.logger.removeHandler(handler) def __str__(self): msg = "" msg += "\nParameters:\n" msg += "\tRandom embedding: {}\n".format(self.random_embedding) msg += "\tTrainable embedding: {}\n".format(self.trainable_embeddings) msg += "\tlogdir: {}\n".format(self.logdir) msg += "\tbatch_size: {}\n".format(self.batch_size) msg += "\tnum_epochs: {}\n".format(self.num_epochs) msg += "\tembedding_size: {}\n".format(self.embedding_size) msg += "\trnn_state_size: {}\n".format(self.rnn_state_size) msg += "\tattention depth: {}\n".format(self.attention_depth) msg += "\tattention hops: {}\n".format(self.attention_hops) msg += "\tdense layer e: {}\n".format(self.dense_layer_size) msg += "\tlearning_rate: {}\n".format(self.learning_rate) msg += "\tl2_reg_lambda: {}\n".format(self.l2_reg_lambda) msg += "\tnum_checkpoints: {}\n".format(self.num_checkpoints) msg += "\tseed: {}\n".format(self.seed) msg += "\tMax Instructions per functions: {}\n".format(self.max_instructions) return msg
5,285
37.867647
118
py
SAFE
SAFE-master/neural_network/__init__.py
0
0
0
py
SAFE
SAFE-master/neural_network/PairFactory.py
# SAFE TEAM # distributed under license: GPL 3 License http://www.gnu.org/licenses/ import sqlite3 import json import numpy as np from multiprocessing import Queue from multiprocessing import Process from asm_embedding.FunctionNormalizer import FunctionNormalizer # # PairFactory class, used for training the SAFE network. # This class generates the pairs for training, test and validation # # # Authors: SAFE team class PairFactory: def __init__(self, db_name, dataset_type, batch_size, max_instructions, shuffle=True): self.db_name = db_name self.dataset_type = dataset_type self.max_instructions = max_instructions self.batch_dim = 0 self.num_pairs = 0 self.num_batches = 0 self.batch_size = batch_size conn = sqlite3.connect(self.db_name) cur = conn.cursor() q = cur.execute("SELECT true_pair from " + self.dataset_type + " WHERE id=?", (0,)) self.num_pairs=len(json.loads(q.fetchone()[0]))*2 n_chunk = int(self.num_pairs / self.batch_size) - 1 conn.close() self.num_batches = n_chunk self.shuffle = shuffle @staticmethod def split( a, n): return [a[i::n] for i in range(n)] @staticmethod def truncate_and_compute_lengths(pairs, max_instructions): lenghts = [] new_pairs=[] for x in pairs: f0 = np.asarray(x[0][0:max_instructions]) f1 = np.asarray(x[1][0:max_instructions]) lenghts.append((f0.shape[0], f1.shape[0])) if f0.shape[0] < max_instructions: f0 = np.pad(f0, (0, max_instructions - f0.shape[0]), mode='constant') if f1.shape[0] < max_instructions: f1 = np.pad(f1, (0, max_instructions - f1.shape[0]), mode='constant') new_pairs.append((f0, f1)) return new_pairs, lenghts def async_chunker(self, epoch): conn = sqlite3.connect(self.db_name) cur = conn.cursor() query_string = "SELECT true_pair,false_pair from {} where id=?".format(self.dataset_type) q = cur.execute(query_string, (int(epoch),)) true_pairs_id, false_pairs_id = q.fetchone() true_pairs_id = json.loads(true_pairs_id) false_pairs_id = json.loads(false_pairs_id) assert len(true_pairs_id) == len(false_pairs_id) data_len = len(true_pairs_id) # print("Data Len: " + str(data_len)) conn.close() n_chunk = int(data_len / (self.batch_size / 2)) - 1 lista_chunk = range(0, n_chunk) coda = Queue(maxsize=50) n_proc = 8 # modify this to increase the parallelism for the db loading, from our thest 8-10 is the sweet spot on a 16 cores machine with K80 listone = PairFactory.split(lista_chunk, n_proc) # this ugly workaround is somehow needed, Pool is working oddly when TF is loaded. for i in range(0, n_proc): p = Process(target=self.async_create_couple, args=((epoch, listone[i], coda))) p.start() for i in range(0, n_chunk): yield self.async_get_dataset(coda) def get_pair_fromdb(self, id_1, id_2): conn = sqlite3.connect(self.db_name) cur = conn.cursor() q0 = cur.execute("SELECT instructions_list FROM filtered_functions WHERE id=?", (id_1,)) f0 = json.loads(q0.fetchone()[0]) q1 = cur.execute("SELECT instructions_list FROM filtered_functions WHERE id=?", (id_2,)) f1 = json.loads(q1.fetchone()[0]) conn.close() return f0, f1 def get_couple_from_db(self, epoch_number, chunk): conn = sqlite3.connect(self.db_name) cur = conn.cursor() pairs = [] labels = [] q = cur.execute("SELECT true_pair, false_pair from " + self.dataset_type + " WHERE id=?", (int(epoch_number),)) true_pairs_id, false_pairs_id = q.fetchone() true_pairs_id = json.loads(true_pairs_id) false_pairs_id = json.loads(false_pairs_id) conn.close() data_len = len(true_pairs_id) i = 0 normalizer = FunctionNormalizer(self.max_instructions) while i < self.batch_size: if chunk * int(self.batch_size / 2) + i > data_len: break p = true_pairs_id[chunk * int(self.batch_size / 2) + i] f0, f1 = self.get_pair_fromdb(p[0], p[1]) pairs.append((f0, f1)) labels.append(+1) p = false_pairs_id[chunk * int(self.batch_size / 2) + i] f0, f1 = self.get_pair_fromdb(p[0], p[1]) pairs.append((f0, f1)) labels.append(-1) i += 2 pairs, lengths = normalizer.normalize_function_pairs(pairs) function1, function2 = zip(*pairs) len1, len2 = zip(*lengths) n_samples = len(pairs) if self.shuffle: shuffle_indices = np.random.permutation(np.arange(n_samples)) function1 = np.array(function1)[shuffle_indices] function2 = np.array(function2)[shuffle_indices] len1 = np.array(len1)[shuffle_indices] len2 = np.array(len2)[shuffle_indices] labels = np.array(labels)[shuffle_indices] else: function1=np.array(function1) function2=np.array(function2) len1=np.array(len1) len2=np.array(len2) labels=np.array(labels) upper_bound = min(self.batch_size, n_samples) len1 = len1[0:upper_bound] len2 = len2[0:upper_bound] function1 = function1[0:upper_bound] function2 = function2[0:upper_bound] y_ = labels[0:upper_bound] return function1, function2, len1, len2, y_ def async_create_couple(self, epoch,n_chunk,q): for i in n_chunk: function1, function2, len1, len2, y_ = self.get_couple_from_db(epoch, i) q.put((function1, function2, len1, len2, y_), block=True) def async_get_dataset(self, q): item = q.get() function1 = item[0] function2 = item[1] len1 = item[2] len2 = item[3] y_ = item[4] assert (len(function1) == len(y_)) n_samples = len(function1) self.batch_dim = n_samples #self.num_pairs += n_samples return function1, function2, len1, len2, y_
6,350
32.962567
150
py
SAFE
SAFE-master/neural_network/train.py
from SAFE_model import modelSAFE from parameters import Flags import sys import os import numpy as np from utils import utils import traceback def load_embedding_matrix(embedder_folder): matrix_file='embedding_matrix.npy' matrix_path=os.path.join(embedder_folder,matrix_file) if os.path.isfile(matrix_path): try: print('Loading embedding matrix....') with open(matrix_path,'rb') as f: return np.float32(np.load(f)) except Exception as e: print("Exception handling file:"+str(matrix_path)) print("Embedding matrix cannot be load") print(str(e)) sys.exit(-1) else: print('Embedding matrix not found at path:'+str(matrix_path)) sys.exit(-1) def run_test(): flags = Flags() flags.logger.info("\n{}\n".format(flags)) print(str(flags)) embedding_matrix = load_embedding_matrix(flags.embedder_folder) if flags.random_embedding: embedding_matrix = np.random.rand(*np.shape(embedding_matrix)).astype(np.float32) embedding_matrix[0, :] = np.zeros(np.shape(embedding_matrix)[1]).astype(np.float32) if flags.cross_val: print("STARTING CROSS VALIDATION") res = [] mean = 0 for i in range(0, flags.cross_val_fold): print("CROSS VALIDATION STARTING FOLD: " + str(i)) if i > 0: flags.close_log() flags.reset_logdir() del flags flags = Flags() flags.logger.info("\n{}\n".format(flags)) flags.logger.info("Starting cross validation fold: {}".format(i)) flags.db_name = flags.db_name + "_val_" + str(i+1) + ".db" flags.logger.info("Cross validation db name: {}".format(flags.db_name)) trainer = modelSAFE(flags, embedding_matrix) best_val_auc = trainer.train() mean += best_val_auc res.append(best_val_auc) flags.logger.info("Cross validation fold {} finished best auc: {}".format(i, best_val_auc)) print("FINISH FOLD: " + str(i) + " BEST VAL AUC: " + str(best_val_auc)) print("CROSS VALIDATION ENDED") print("Result: " + str(res)) print("") flags.logger.info("Cross validation finished results: {}".format(res)) flags.logger.info(" mean: {}".format(mean / flags.cross_val_fold)) flags.close_log() else: trainer = modelSAFE(flags, embedding_matrix) trainer.train() flags.close_log() if __name__ == '__main__': utils.print_safe() print('-Trainer for SAFE-') run_test()
2,675
30.482353
103
py
SAFE
SAFE-master/utils/utils.py
from pyfiglet import figlet_format def print_safe(): a = figlet_format('SAFE', font='starwars') print(a) print("By Massarelli L., Di Luna G. A., Petroni F., Querzoni L., Baldoni R.") print("Please cite: http://arxiv.org/abs/1811.05296 \n")
257
31.25
81
py
SAFE
SAFE-master/utils/__init__.py
0
0
0
py
SAFE
SAFE-master/function_search/FunctionSearchEngine.py
# SAFE TEAM # Copyright (C) 2019 Luca Massarelli, Giuseppe Antonio Di Luna, Fabio Petroni, Leonardo Querzoni, Roberto Baldoni import sys import numpy as np import sqlite3 import pandas as pd import tqdm import tensorflow as tf if sys.version_info >= (3, 0): from functools import reduce pd.set_option('display.max_column',None) pd.set_option('display.max_rows',None) pd.set_option('display.max_seq_items',None) pd.set_option('display.max_colwidth', 500) pd.set_option('expand_frame_repr', True) class TopK: # # This class computes the similarities between the targets and the list of functions on which we are searching. # This is done by using matrices multiplication and top_k of tensorflow def __init__(self): self.graph=tf.Graph() nop=0 def loads_embeddings_SE(self, lista_embeddings): with self.graph.as_default(): tf.set_random_seed(1234) dim = lista_embeddings[0].shape[0] ll = np.asarray(lista_embeddings) self.matrix = tf.constant(ll, name='matrix_embeddings', dtype=tf.float32) self.target = tf.placeholder("float", [None, dim], name='target_embedding') self.sim = tf.matmul(self.target, self.matrix, transpose_b=True, name="embeddings_similarities") self.k = tf.placeholder(tf.int32, shape=(), name='k') self.top_k = tf.nn.top_k(self.sim, self.k, sorted=True) self.session = tf.Session() def topK(self, k, target): with self.graph.as_default(): tf.set_random_seed(1234) return self.session.run(self.top_k, {self.target: target, self.k: int(k)}) class FunctionSearchEngine: def __init__(self, db_name, table_name, limit=None): self.s2v = TopK() self.db_name = db_name self.table_name = table_name self.labels = [] self.trunc_labels = [] self.lista_embedding = [] self.ids = [] self.n_similar=[] self.ret = {} self.precision = None print("Query for ids") conn = sqlite3.connect(db_name) cur = conn.cursor() if limit is None: q = cur.execute("SELECT id, project, compiler, optimization, file_name, function_name FROM functions") res = q.fetchall() else: q = cur.execute("SELECT id, project, compiler, optimization, file_name, function_name FROM functions LIMIT {}".format(limit)) res = q.fetchall() for item in tqdm.tqdm(res, total=len(res)): q = cur.execute("SELECT " + self.table_name + " FROM " + self.table_name + " WHERE id=?", (item[0],)) e = q.fetchone() if e is None: continue self.lista_embedding.append(self.embeddingToNp(e[0])) element = "{}/{}/{}".format(item[1], item[4], item[5]) self.trunc_labels.append(element) element = "{}@{}/{}/{}/{}".format(item[5], item[1], item[2], item[3], item[4]) self.labels.append(element) self.ids.append(item[0]) conn.close() self.s2v.loads_embeddings_SE(self.lista_embedding) self.num_funcs = len(self.lista_embedding) def load_target(self, target_db_name, target_fcn_ids, calc_mean=False): conn = sqlite3.connect(target_db_name) cur = conn.cursor() mean = None for id in target_fcn_ids: if target_db_name == self.db_name and id in self.ids: idx = self.ids.index(id) e = self.lista_embedding[idx] else: q = cur.execute("SELECT " + self.table_name + " FROM " + self.table_name + " WHERE id=?", (id,)) e = q.fetchone() e = self.embeddingToNp(e[0]) if mean is None: mean = e.reshape([e.shape[0], 1]) else: mean = np.hstack((mean, e.reshape(e.shape[0], 1))) if calc_mean: target = [np.mean(mean, axis=1)] else: target = mean.T return target def embeddingToNp(self, e): e = e.replace('\n', '') e = e.replace('[', '') e = e.replace(']', '') emb = np.fromstring(e, dtype=float, sep=' ') return emb def top_k(self, target, k=None): if k is not None: top_k = self.s2v.topK(k, target) else: top_k = self.s2v.topK(len(self.lista_embedding), target) return top_k def pp_search(self, k): result = pd.DataFrame(columns=['Id', 'Name', 'Score']) top_k = self.s2v.topK(k) for i, e in enumerate(top_k.indices[0]): result = result.append({'Id': self.ids[e], 'Name': self.labels[e], 'Score': top_k.values[0][i]}, ignore_index=True) print(result) def search(self, k): result = [] top_k = self.s2v.topK(k) for i, e in enumerate(top_k.indices[0]): result = result.append({'Id': self.ids[e], 'Name': self.labels[e], 'Score': top_k.values[0][i]}) return result
5,075
34.25
137
py
SAFE
SAFE-master/function_search/fromJsonSearchToPlot.py
# SAFE TEAM # Copyright (C) 2019 Luca Massarelli, Giuseppe Antonio Di Luna, Fabio Petroni, Leonardo Querzoni, Roberto Baldoni import matplotlib.pyplot as plt import json import math import numpy as np from multiprocessing import Pool def find_dcg(element_list): dcg_score = 0.0 for j, sim in enumerate(element_list): dcg_score += float(sim) / math.log(j + 2) return dcg_score def count_ones(element_list): return len([x for x in element_list if x == 1]) def extract_info(file_1): with open(file_1, 'r') as f: data1 = json.load(f) performance1 = [] average_recall_k1 = [] precision_at_k1 = [] for f_index in range(0, len(data1)): f1 = data1[f_index][0] pf1 = data1[f_index][1] tp1 = [] recall_p1 = [] precision_p1 = [] # we start from 1 to remove ourselves for k in range(1, 200): cut1 = f1[0:k] dcg1 = find_dcg(cut1) ideal1 = find_dcg(([1] * (pf1) + [0] * (k - pf1))[0:k]) p1k = float(count_ones(cut1)) tp1.append(dcg1 / ideal1) recall_p1.append(p1k / pf1) precision_p1.append(p1k / k) performance1.append(tp1) average_recall_k1.append(recall_p1) precision_at_k1.append(precision_p1) avg_p1 = np.average(performance1, axis=0) avg_p10 = np.average(average_recall_k1, axis=0) average_precision = np.average(precision_at_k1, axis=0) return avg_p1, avg_p10, average_precision def print_graph(info1, file_name, label_y, title_1, p): fig, ax = plt.subplots() ax.plot(range(0, len(info1)), info1, color='b', label=title_1) ax.legend(loc=p, shadow=True, fontsize='x-large') plt.xlabel("Number of Nearest Results") plt.ylabel(label_y) fname = file_name plt.savefig(fname) plt.close(fname) def compare_and_print(file): filename = file.split('_')[0] + '_' + file.split('_')[1] t_short = filename label_1 = t_short + '_' + file.split('_')[3] avg_p1, recall_p1, precision1 = extract_info(file) fname = filename + '_nDCG.pdf' print_graph(avg_p1, fname, 'nDCG', label_1, 'upper right') fname = filename + '_recall.pdf' print_graph(recall_p1, fname, 'Recall', label_1, 'lower right') fname = filename + '_precision.pdf' print_graph(precision1, fname, 'Precision', label_1, 'upper right') return avg_p1, recall_p1, precision1 e1 = 'embeddings_safe' opt = ['O0', 'O1', 'O2', 'O3'] compilers = ['gcc-7', 'gcc-4.8', 'clang-6.0', 'clang-4.0'] values = [] for o in opt: for c in compilers: f0 = '' + c + '_' + o + '_' + e1 + '_top200.json' values.append(f0) p = Pool(4) result = p.map(compare_and_print, values) avg_p1 = [] recal_p1 = [] pre_p1 = [] avg_p2 = [] recal_p2 = [] pre_p2 = [] for t in result: avg_p1.append(t[0]) recal_p1.append(t[1]) pre_p1.append(t[2]) avg_p1 = np.average(avg_p1, axis=0) recal_p1 = np.average(recal_p1, axis=0) pre_p1 = np.average(pre_p1, axis=0) print_graph(avg_p1[0:20], 'nDCG.pdf', 'normalized DCG', 'SAFE', 'upper right') print_graph(recal_p1, 'recall.pdf', 'recall', 'SAFE', 'lower right') print_graph(pre_p1[0:20], 'precision.pdf', 'precision', 'SAFE', 'upper right')
3,260
25.088
114
py
SAFE
SAFE-master/function_search/EvaluateSearchEngine.py
# SAFE TEAM # Copyright (C) 2019 Luca Massarelli, Giuseppe Antonio Di Luna, Fabio Petroni, Leonardo Querzoni, Roberto Baldoni from FunctionSearchEngine import FunctionSearchEngine from sklearn import metrics import sqlite3 from multiprocessing import Process import math import warnings import random import json class SearchEngineEvaluator: def __init__(self, db_name, table, limit=None,k=None): self.tables = table self.db_name = db_name self.SE = FunctionSearchEngine(db_name, table, limit=limit) self.k=k self.number_similar={} def do_search(self, target_db_name, target_fcn_ids): self.SE.load_target(target_db_name, target_fcn_ids) self.SE.pp_search(50) def calc_auc(self, target_db_name, target_fcn_ids): self.SE.load_target(target_db_name, target_fcn_ids) result = self.SE.auc() print(result) # # This methods searches for all target function in the DB, in our test we take num functions compiled with compiler and opt # moreover it populates the self.number_similar dictionary, that contains the number of similar function for each target # def find_target_fcn(self, compiler, opt, num): conn = sqlite3.connect(self.db_name) cur = conn.cursor() q = cur.execute("SELECT id, project, file_name, function_name FROM functions WHERE compiler=? AND optimization=?", (compiler, opt)) res = q.fetchall() ids = [i[0] for i in res] true_labels = [l[1]+"/"+l[2]+"/"+l[3] for l in res] n_ids = [] n_true_labels = [] num = min(num, len(ids)) for i in range(0, num): index = random.randrange(len(ids)) n_ids.append(ids[index]) n_true_labels.append(true_labels[index]) f_name=true_labels[index].split('/')[2] fi_name=true_labels[index].split('/')[1] q = cur.execute("SELECT num FROM count_func WHERE file_name='{}' and function_name='{}'".format(fi_name,f_name)) f = q.fetchone() if f is not None: num=int(f[0]) else: num = 0 self.number_similar[true_labels[index]]=num return n_ids, n_true_labels @staticmethod def functions_ground_truth(labels, indices, values, true_label): y_true = [] y_score = [] for i, e in enumerate(indices): y_score.append(float(values[i])) l = labels[e] if l == true_label: y_true.append(1) else: y_true.append(0) return y_true, y_score # this methos execute the test # it select the targets functions and it looks up for the targets in the entire db # the outcome is json file containing the top 200 similar for each target function. # the json file is an array and such array contains an entry for each target function # each entry is a triple (t0,t1,t2) # t0: an array that contains 1 at entry j if the entry j is similar to the target 0 otherwise # t1: the number of similar functions to the target in the whole db # t2: an array that at entry j contains the similarity score of the j-th most similar function to the target. # # def evaluate_precision_on_all_functions(self, compiler, opt): target_fcn_ids, true_labels = self.find_target_fcn(compiler, opt, 10000) batch = 1000 labels = self.SE.trunc_labels info=[] for i in range(0, len(target_fcn_ids), batch): if i + batch > len(target_fcn_ids): batch = len(target_fcn_ids) - i target = self.SE.load_target(self.db_name, target_fcn_ids[i:i+batch]) top_k = self.SE.top_k(target, self.k) for j in range(0, batch): a, b = SearchEngineEvaluator.functions_ground_truth(labels, top_k.indices[j, :], top_k.values[j, :], true_labels[i+j]) info.append((a,self.number_similar[true_labels[i + j]],b)) with open(compiler+'_'+opt+'_'+self.tables+'_top200.json', 'w') as outfile: json.dump(info, outfile) def test(dbName, table, opt,x,k): print("k:{} - Table: {} - Opt: {}".format(k,table, opt)) SEV = SearchEngineEvaluator(dbName, table, limit=2000000,k=k) SEV.evaluate_precision_on_all_functions(x, opt) print("-------------------------------------") if __name__ == '__main__': random.seed(12345) dbName = '../data/AMD64PostgreSQL.db' table = ['safe_embeddings'] opt = ["O0", "O1", "O2", "O3"] for x in ['gcc-4.8',"clang-4.0",'gcc-7','clang-6.0']: for t in table: for o in opt: p = Process(target=test, args=(dbName, t, o,x,200)) p.start() p.join()
4,821
35.255639
139
py
SAFE
SAFE-master/function_search/__init__.py
0
0
0
py
SAFE
SAFE-master/asm_embedding/FunctionNormalizer.py
# SAFE TEAM # Copyright (C) 2019 Luca Massarelli, Giuseppe Antonio Di Luna, Fabio Petroni, Leonardo Querzoni, Roberto Baldoni import numpy as np class FunctionNormalizer: def __init__(self, max_instruction): self.max_instructions = max_instruction def normalize(self, f): f = np.asarray(f[0:self.max_instructions]) length = f.shape[0] if f.shape[0] < self.max_instructions: f = np.pad(f, (0, self.max_instructions - f.shape[0]), mode='constant') return f, length def normalize_function_pairs(self, pairs): lengths = [] new_pairs = [] for x in pairs: f0, len0 = self.normalize(x[0]) f1, len1 = self.normalize(x[1]) lengths.append((len0, len1)) new_pairs.append((f0, f1)) return new_pairs, lengths def normalize_functions(self, functions): lengths = [] new_functions = [] for f in functions: f, length = self.normalize(f) lengths.append(length) new_functions.append(f) return new_functions, lengths
1,121
29.324324
114
py
SAFE
SAFE-master/asm_embedding/DocumentManipulation.py
import json import re import os def list_to_str(li): i='' for x in li: i=i+' '+x i=i+' endfun'*5 return i def document_append(strin): with open('/Users/giuseppe/docuent_X86','a') as f: f.write(strin) ciro=set() cantina=[] num_total=0 num_filtered=0 with open('/Users/giuseppe/dump.x86.linux.json') as f: l=f.readline() print('loaded') r = re.split('(\[.*?\])(?= *\[)', l) del l for x in r: if '[' in x: gennaro=json.loads(x) for materdomini in gennaro: num_total=num_total+1 if materdomini[0] not in ciro: ciro.add(materdomini[0]) num_filtered=num_filtered+1 a=list_to_str(materdomini[1]) document_append(a) del x print(num_total) print(num_filtered)
869
22.513514
54
py
SAFE
SAFE-master/asm_embedding/__init__.py
0
0
0
py
SAFE
SAFE-master/asm_embedding/FunctionAnalyzerRadare.py
# SAFE TEAM # Copyright (C) 2019 Luca Massarelli, Giuseppe Antonio Di Luna, Fabio Petroni, Leonardo Querzoni, Roberto Baldoni import json import r2pipe class RadareFunctionAnalyzer: def __init__(self, filename, use_symbol, depth): self.r2 = r2pipe.open(filename, flags=['-2']) self.filename = filename self.arch, _ = self.get_arch() self.top_depth = depth self.use_symbol = use_symbol def __enter__(self): return self @staticmethod def filter_reg(op): return op["value"] @staticmethod def filter_imm(op): imm = int(op["value"]) if -int(5000) <= imm <= int(5000): ret = str(hex(op["value"])) else: ret = str('HIMM') return ret @staticmethod def filter_mem(op): if "base" not in op: op["base"] = 0 if op["base"] == 0: r = "[" + "MEM" + "]" else: reg_base = str(op["base"]) disp = str(op["disp"]) scale = str(op["scale"]) r = '[' + reg_base + "*" + scale + "+" + disp + ']' return r @staticmethod def filter_memory_references(i): inst = "" + i["mnemonic"] for op in i["operands"]: if op["type"] == 'reg': inst += " " + RadareFunctionAnalyzer.filter_reg(op) elif op["type"] == 'imm': inst += " " + RadareFunctionAnalyzer.filter_imm(op) elif op["type"] == 'mem': inst += " " + RadareFunctionAnalyzer.filter_mem(op) if len(i["operands"]) > 1: inst = inst + "," if "," in inst: inst = inst[:-1] inst = inst.replace(" ", "_") return str(inst) @staticmethod def get_callref(my_function, depth): calls = {} if 'callrefs' in my_function and depth > 0: for cc in my_function['callrefs']: if cc["type"] == "C": calls[cc['at']] = cc['addr'] return calls def get_instruction(self): instruction = json.loads(self.r2.cmd("aoj 1")) if len(instruction) > 0: instruction = instruction[0] else: return None operands = [] if 'opex' not in instruction: return None for op in instruction['opex']['operands']: operands.append(op) instruction['operands'] = operands return instruction def function_to_inst(self, functions_dict, my_function, depth): instructions = [] asm = "" if self.use_symbol: s = my_function['vaddr'] else: s = my_function['offset'] calls = RadareFunctionAnalyzer.get_callref(my_function, depth) self.r2.cmd('s ' + str(s)) if self.use_symbol: end_address = s + my_function["size"] else: end_address = s + my_function["realsz"] while s < end_address: instruction = self.get_instruction() asm += instruction["bytes"] if self.arch == 'x86': filtered_instruction = "X_" + RadareFunctionAnalyzer.filter_memory_references(instruction) elif self.arch == 'arm': filtered_instruction = "A_" + RadareFunctionAnalyzer.filter_memory_references(instruction) instructions.append(filtered_instruction) if s in calls and depth > 0: if calls[s] in functions_dict: ii, aa = self.function_to_inst(functions_dict, functions_dict[calls[s]], depth-1) instructions.extend(ii) asm += aa self.r2.cmd("s " + str(s)) self.r2.cmd("so 1") s = int(self.r2.cmd("s"), 16) return instructions, asm def get_arch(self): try: info = json.loads(self.r2.cmd('ij')) if 'bin' in info: arch = info['bin']['arch'] bits = info['bin']['bits'] except: print("Error loading file") arch = None bits = None return arch, bits def find_functions(self): self.r2.cmd('aaa') try: function_list = json.loads(self.r2.cmd('aflj')) except: function_list = [] return function_list def find_functions_by_symbols(self): self.r2.cmd('aa') try: symbols = json.loads(self.r2.cmd('isj')) fcn_symb = [s for s in symbols if s['type'] == 'FUNC'] except: fcn_symb = [] return fcn_symb def analyze(self): if self.use_symbol: function_list = self.find_functions_by_symbols() else: function_list = self.find_functions() functions_dict = {} if self.top_depth > 0: for my_function in function_list: if self.use_symbol: functions_dict[my_function['vaddr']] = my_function else: functions_dict[my_function['offset']] = my_function result = {} for my_function in function_list: if self.use_symbol: address = my_function['vaddr'] else: address = my_function['offset'] try: instructions, asm = self.function_to_inst(functions_dict, my_function, self.top_depth) result[my_function['name']] = {'filtered_instructions': instructions, "asm": asm, "address": address} except: print("Error in functions: {} from {}".format(my_function['name'], self.filename)) pass return result def close(self): self.r2.quit() def __exit__(self, exc_type, exc_value, traceback): self.r2.quit()
5,885
29.030612
117
py
SAFE
SAFE-master/asm_embedding/InstructionsConverter.py
# SAFE TEAM # Copyright (C) 2019 Luca Massarelli, Giuseppe Antonio Di Luna, Fabio Petroni, Leonardo Querzoni, Roberto Baldoni import json class InstructionsConverter: def __init__(self, json_i2id): f = open(json_i2id, 'r') self.i2id = json.load(f) f.close() def convert_to_ids(self, instructions_list): ret_array = [] # For each instruction we add +1 to its ID because the first # element of the embedding matrix is zero for x in instructions_list: if x in self.i2id: ret_array.append(self.i2id[x] + 1) elif 'X_' in x: # print(str(x) + " is not a known x86 instruction") ret_array.append(self.i2id['X_UNK'] + 1) elif 'A_' in x: # print(str(x) + " is not a known arm instruction") ret_array.append(self.i2id['A_UNK'] + 1) else: # print("There is a problem " + str(x) + " does not appear to be an asm or arm instruction") ret_array.append(self.i2id['X_UNK'] + 1) return ret_array
1,118
32.909091
114
py
CaBERT-SLU
CaBERT-SLU-main/baseline_midsf.py
"""For model training and inference Data input should be a single sentence. """ import random import torch import torch.nn as nn from torch.autograd import Variable from torch.optim import Adam, RMSprop from transformers import BertTokenizer, BertModel, BertConfig from keras.preprocessing.sequence import pad_sequences from sklearn.model_selection import train_test_split import pickle import copy import numpy as np import collections from tqdm import tqdm from collections import Counter, defaultdict from model import MULTI from all_data_slot import get_dataloader from config import opt from utils import * def train(**kwargs): # attributes for k, v in kwargs.items(): setattr(opt, k, v) np.random.seed(0) device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') torch.backends.cudnn.enabled = False print('Dataset to use: ', opt.train_path) print('Dictionary to use: ', opt.dic_path_with_tokens) print('Data Type: ', opt.datatype) print('Use pretrained weights: ', opt.retrain) # dataset with open(opt.dic_path_with_tokens, 'rb') as f: dic = pickle.load(f) with open(opt.slot_path, 'rb') as f: slot_dic = pickle.load(f) with open(opt.train_path, 'rb') as f: train_data = pickle.load(f) if opt.datatype == "mixatis" or opt.datatype == "mixsnips": # ATIS Dataset X_train, y_train, _ = zip(*train_data) X_test, y_test, _ = zip(*test_data) elif opt.datatype == "semantic": # Semantic parsing Dataset X, y = zip(*train_data) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) elif opt.datatype == "e2e" or opt.datatype == "sgd": # Microsoft Dialogue Dataset / SGD Dataset all_data = [] dialogue_id = {} dialogue_counter = 0 counter = 0 for data in train_data: for instance in data: all_data.append(instance) dialogue_id[counter] = dialogue_counter counter += 1 dialogue_counter += 1 indices = np.random.permutation(len(all_data)) train = np.array(all_data)[indices[:int(len(all_data)*0.7)]]#[:10000] test = np.array(all_data)[indices[int(len(all_data)*0.7):]]#[:100] train_loader = get_dataloader(train, len(dic), len(slot_dic), opt) val_loader = get_dataloader(test, len(dic), len(slot_dic), opt) # model config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = MULTI(opt, len(dic), len(slot_dic)) if opt.model_path: model.load_state_dict(torch.load(opt.model_path)) print("Pretrained model has been loaded.\n") else: print("Train from scratch...") model = model.to(device) # optimizer, criterion # param_optimizer = list(model.named_parameters()) # no_decay = ['bias', 'gamma', 'beta'] # optimizer_grouped_parameters = [ # {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], # 'weight_decay_rate': 0.01}, # {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], # 'weight_decay_rate': 0.0} # ] # optimizer = BertAdam(optimizer_grouped_parameters,lr=opt.learning_rate_bert, warmup=.1) optimizer = Adam(model.parameters(), weight_decay=0.01, lr=opt.learning_rate_classifier) if opt.data_mode == 'single': criterion = nn.CrossEntropyLoss().to(device) else: criterion = nn.BCEWithLogitsLoss(reduction='sum').to(device) criterion2 = nn.CrossEntropyLoss(reduction='sum').to(device) best_loss = 100 best_accuracy = 0 best_f1 = 0 # Start training for epoch in range(opt.epochs): print("====== epoch %d / %d: ======"% (epoch+1, opt.epochs)) # Training Phase total_train_loss = 0 total_P = 0 total_R = 0 total_F1 = 0 total_acc = 0 model.train() ccounter = 0 for (captions_t, masks, labels, slot_labels) in tqdm(train_loader): captions_t = captions_t.to(device) masks = masks.to(device) labels = labels.to(device) slot_labels = slot_labels.to(device) slot_labels = slot_labels.reshape(-1) optimizer.zero_grad() encoder_logits, decoder_logits, slot_logits = model(captions_t) train_loss = criterion(encoder_logits, labels) decoder_logits = decoder_logits.view(-1, len(dic)) slabels = labels.unsqueeze(1) slabels = slabels.repeat(1, opt.maxlen, 1) slabels = slabels.view(-1, len(dic)) train_loss += criterion(decoder_logits, slabels) train_loss += criterion2(slot_logits, slot_labels) train_loss.backward() optimizer.step() total_train_loss += train_loss P, R, F1, acc = f1_score_intents(encoder_logits, labels) total_P += P total_R += R total_F1 += F1 total_acc += acc ccounter += 1 print('Average train loss: {:.4f} '.format(total_train_loss / train_loader.dataset.num_data)) precision = total_P / ccounter recall = total_R / ccounter f1 = total_F1 / ccounter print(f'P = {precision:.4f}, R = {recall:.4f}, F1 = {f1:.4f}') print('Accuracy: ', total_acc/train_loader.dataset.num_data) # Validation Phase total_val_loss = 0 total_P = 0 total_R = 0 total_F1 = 0 total_acc = 0 model.eval() ccounter = 0 stats = defaultdict(Counter) for (captions_t, masks, labels, slot_labels) in val_loader: captions_t = captions_t.to(device) masks = masks.to(device) labels = labels.to(device) slot_labels = slot_labels.to(device) slot_labels = slot_labels.reshape(-1) with torch.no_grad(): encoder_logits, decoder_logits, slot_logits = model(captions_t) val_loss = criterion(encoder_logits, labels) decoder_logits = decoder_logits.view(-1, len(dic)) slabels = labels.unsqueeze(1) slabels = slabels.repeat(1, opt.maxlen, 1) slabels = slabels.view(-1, len(dic)) val_loss += criterion(decoder_logits, slabels) total_val_loss += val_loss P, R, F1, acc = f1_score_intents(encoder_logits, labels) total_P += P total_R += R total_F1 += F1 total_acc += acc ccounter += 1 _, index = torch.topk(slot_logits, k=1, dim=-1) evaluate_iob(index, slot_labels, slot_dic, stats) print('========= Validation =========') print('Average val loss: {:.4f} '.format(total_val_loss / val_loader.dataset.num_data)) precision = total_P / ccounter recall = total_R / ccounter f1 = total_F1 / ccounter print(f'P = {precision:.4f}, R = {recall:.4f}, F1 = {f1:.4f}') print('Accuracy: ', total_acc/val_loader.dataset.num_data) val_acc = total_acc/val_loader.dataset.num_data # print slot stats p_slot, r_slot, f1_slot = prf(stats['total']) print('========= Slot =========') print(f'Slot Score: P = {p_slot:.4f}, R = {r_slot:.4f}, F1 = {f1_slot:.4f}') # for label in stats: # if label != 'total': # p, r, f1 = prf(stats[label]) # print(f'{label:4s}: P = {p:.4f}, R = {r:.4f}, F1 = {f1:.4f}') if f1 > best_f1: print('saving with loss of {}'.format(total_val_loss), 'improved over previous {}'.format(best_loss)) best_loss = total_val_loss best_accuracy = val_acc best_f1 = f1 best_stats = copy.deepcopy(stats) torch.save(model.state_dict(), 'checkpoints/best_{}_{}_baseline.pth'.format(opt.datatype, opt.data_mode)) print() print('Best total val loss: {:.4f}'.format(total_val_loss)) print('Best Test Accuracy: {:.4f}'.format(best_accuracy)) print('Best F1 Score: {:.4f}'.format(best_f1)) p_slot, r_slot, f1_slot = prf(best_stats['total']) print('Final evaluation on slot filling of the validation set:') print(f'Overall: P = {p_slot:.4f}, R = {r_slot:.4f}, F1 = {f1_slot:.4f}') ##################################################################### def test(**kwargs): # attributes for k, v in kwargs.items(): setattr(opt, k, v) np.random.seed(0) device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') torch.backends.cudnn.enabled = False print('Dataset to use: ', opt.train_path) print('Dictionary to use: ', opt.dic_path) # dataset with open(opt.dic_path, 'rb') as f: dic = pickle.load(f) reverse_dic = {v: k for k,v in dic.items()} with open(opt.slot_path, 'rb') as f: slot_dic = pickle.load(f) with open(opt.train_path, 'rb') as f: train_data = pickle.load(f) if opt.test_path: with open(opt.test_path, 'rb') as f: test_data = pickle.load(f) if opt.datatype == "atis": # ATIS Dataset X_train, y_train, _ = zip(*train_data) X_test, y_test, _ = zip(*test_data) elif opt.datatype == "semantic": # Semantic parsing Dataset X, y = zip(*train_data) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) elif opt.datatype == "e2e" or opt.datatype == "sgd": # Microsoft Dialogue Dataset / SGD Dataset all_data = [] dialogue_id = {} dialogue_counter = 0 counter = 0 for data in train_data: for instance in data: all_data.append(instance) dialogue_id[counter] = dialogue_counter counter += 1 dialogue_counter += 1 indices = np.random.permutation(len(all_data)) X_train = np.array(all_data)[indices[:int(len(all_data)*0.7)]]#[:10000] X_test = np.array(all_data)[indices[int(len(all_data)*0.7):]]#[:100] X_train, mask_train = load_data(X_train) X_test, mask_test = load_data(X_test) # model config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = MULTI(opt, len(dic), len(slot_dic)) if opt.model_path: model.load_state_dict(torch.load(opt.model_path)) print("Pretrained model has been loaded.\n") model = model.to(device) tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True) # Store embeddings if opt.test_mode == "embedding": train_loader = get_dataloader(X_train, y_train, mask_train, opt) results = collections.defaultdict(list) model.eval() for i, (captions_t, labels, masks) in enumerate(train_loader): captions_t = captions_t.to(device) labels = labels.to(device) masks = masks.to(device) with torch.no_grad(): hidden_states, pooled_output, outputs = model(captions_t, masks) print("Saving Data: %d" % i) for ii in range(len(labels)): key = labels[ii].data.cpu().item() embedding = pooled_output[ii].data.cpu().numpy().reshape(-1) word_embeddings = hidden_states[-1][ii].data.cpu().numpy() tokens = tokenizer.convert_ids_to_tokens(captions_t[ii].data.cpu().numpy()) tokens = [token for token in tokens if token != "[CLS]" and token != "[SEP]" and token != "[PAD]"] original_sentence = " ".join(tokens) results[key].append((original_sentence, embedding, word_embeddings)) torch.save(results, embedding_path) # Run test classification elif opt.test_mode == "data": # Single instance # index = np.random.randint(0, len(X_test), 1)[0] # input_ids = X_test[index] # attention_masks = mask_test[index] # print(" ".join(tokenizer.convert_ids_to_tokens(input_ids))) # captions_t = torch.LongTensor(input_ids).unsqueeze(0).to(device) # mask = torch.LongTensor(attention_masks).unsqueeze(0).to(device) # with torch.no_grad(): # pooled_output, outputs = model(captions_t, mask) # print("Predicted label: ", reverse_dic[torch.max(outputs, 1)[1].item()]) # print("Real label: ", reverse_dic[y_test[index]]) # Validation Phase test_loader = get_dataloader(X_test, y_test, mask_test, len(dic), opt) error_ids = [] pred_labels = [] real_labels = [] test_corrects = 0 totals = 0 model.eval() for i, (captions_t, labels, masks) in enumerate(test_loader): print('predict batches: ', i) captions_t = captions_t.to(device) labels = labels.to(device) masks = masks.to(device) with torch.no_grad(): _, pooled_output, outputs = model(captions_t, masks) co, to = calc_score(outputs, labels) test_corrects += co totals += to if opt.data_mode == 'single': idx = torch.max(outputs, 1)[1] != labels wrong_ids = [tokenizer.convert_ids_to_tokens(caption, skip_special_tokens=True) for caption in captions_t[idx]] error_ids += wrong_ids pred_labels += [reverse_dic[label.item()] for label in torch.max(outputs, 1)[1][idx]] real_labels += [reverse_dic[label.item()] for label in labels[idx]] else: for i, logits in enumerate(outputs): log = torch.sigmoid(logits) correct = (labels[i][torch.where(log>0.5)[0]]).sum() total = len(torch.where(labels[i]==1)[0]) if correct != total: wrong_caption = tokenizer.convert_ids_to_tokens(captions_t[i], skip_special_tokens=True) error_ids.append(wrong_caption) pred_ls = [reverse_dic[p] for p in torch.where(log>0.5)[0].detach().cpu().numpy()] real_ls = [reverse_dic[i] for i, r in enumerate(labels[i].detach().cpu().numpy()) if r == 1] pred_labels.append(pred_ls) real_labels.append(real_ls) with open('error_analysis/{}_{}.txt'.format(opt.datatype, opt.data_mode), 'w') as f: f.write('----------- Wrong Examples ------------\n') for i, (caption, pred, real) in enumerate(zip(error_ids, pred_labels, real_labels)): f.write(str(i)+'\n') f.write(' '.join(caption)+'\n') f.write('Predicted label: {}\n'.format(pred)) f.write('Real label: {}\n'.format(real)) f.write('------\n') test_acc = test_corrects.double() / test_loader.dataset.num_data if opt.data_mode == 'single' else test_corrects.double() / totals print('Test accuracy: {:.4f}'.format(test_acc)) # User defined elif opt.test_mode == "user": while True: print("Please input the sentence: ") text = input() print("\n======== Predicted Results ========") print(text) text = "[CLS] " + text + " [SEP]" tokenized_text = tokenizer.tokenize(text) tokenized_ids = np.array(tokenizer.convert_tokens_to_ids(tokenized_text))[np.newaxis,:] input_ids = pad_sequences(tokenized_ids, maxlen=opt.maxlen, dtype="long", truncating="post", padding="post").squeeze(0) attention_masks = [float(i>0) for i in input_ids] captions_t = torch.LongTensor(input_ids).unsqueeze(0).to(device) mask = torch.LongTensor(attention_masks).unsqueeze(0).to(device) with torch.no_grad(): pooled_output, outputs = model(captions_t, mask) print("Predicted label: ", reverse_dic[torch.max(outputs, 1)[1].item()]) print("=================================") if __name__ == '__main__': import fire fire.Fire()
16,880
36.182819
138
py
CaBERT-SLU
CaBERT-SLU-main/all_data_context.py
import torch as t from torch.utils.data import Dataset, DataLoader import pickle from config import opt from sklearn.model_selection import train_test_split import numpy as np from keras.preprocessing.sequence import pad_sequences class Turns: def __init__(self, token_ids, slot_ids): token_ids, mask = self.load_data(token_ids) slot_ids, _ = self.load_data(slot_ids) self.token_ids = np.stack(token_ids, axis=0) self.slot_ids = np.stack(slot_ids, axis=0) self.attention_masks = mask def load_data(self, X): input_ids = pad_sequences(X, maxlen=60, dtype="long", truncating="post", padding="post") attention_masks = [] for seq in input_ids: seq_mask = [float(i>0) for i in seq] attention_masks.append(seq_mask) return input_ids, attention_masks class CoreDataset(Dataset): def __init__(self, data, dic, slot_dic, opt): self.data = data self.dic = dic self.slot_dic = slot_dic self.opt = opt self.num_labels = len(dic) self.num_slot_labels = len(slot_dic) self.X_turns, self.Y_turns = self.postprocess() self.num_data = sum([len(turn.token_ids) for turn in self.X_turns]) def postprocess(self): dialogs = [] y_slots = [] y_labels = [] for dialog in self.data: utts, slots, labels = zip(*dialog) dialogs.append(utts) y_slots.append(slots) y_labels.append(labels) X_turns = np.array([Turns(turns, slots) for turns, slots in zip(dialogs, y_slots)]) Y_turns = np.array(y_labels) return X_turns, Y_turns def __getitem__(self, index): # onehot labels = self.Y_turns[index] new_labels = t.zeros((len(labels), self.num_labels)).long() for i, label in enumerate(labels): label = t.LongTensor(np.array(label)) label = t.zeros(self.num_labels).scatter_(0, label, 1) new_labels[i] = label return self.X_turns[index], new_labels def __len__(self): return len(self.X_turns) def collate_fn(batch): X_turns, Y_update = zip(*batch) num_labels = Y_update[0].shape[1] lengths = [i.token_ids.shape[0] for i in X_turns] lengths = t.LongTensor(lengths) max_len = max([i.token_ids.shape[0] for i in X_turns]) max_dim = max([i.token_ids.shape[1] for i in X_turns]) result_ids = t.zeros((len(X_turns), max_len, max_dim)).long() result_token_masks = t.zeros((len(X_turns), max_len, max_dim)).long() result_masks = t.zeros((len(X_turns), max_len)).long() result_slot_labels = t.zeros((len(X_turns), max_len, max_dim)).long() result_labels = t.ones((len(X_turns), max_len, num_labels))*-1 for i in range(len(X_turns)): len1 = X_turns[i].token_ids.shape[0] dim1 = X_turns[i].token_ids.shape[1] result_ids[i, :len1, :dim1] = t.Tensor(X_turns[i].token_ids) result_token_masks[i, :len1, :dim1] = t.Tensor(X_turns[i].attention_masks) for j in range(lengths[i]): result_masks[i][j] = 1 result_slot_labels[i, :len1, :dim1] = t.Tensor(X_turns[i].slot_ids) result_labels[i, :len1, :] = Y_update[i] return result_ids, result_token_masks, result_masks, lengths, result_slot_labels, result_labels def get_dataloader_context(data, dic, slot_dic, opt): dataset = CoreDataset(data, dic, slot_dic, opt) batch_size = opt.batch_size return DataLoader(dataset, batch_size=batch_size, shuffle=False, collate_fn= lambda x: collate_fn(x)) ###################################################################### if __name__ == '__main__': with open(opt.dic_path_with_tokens, 'rb') as f: dic = pickle.load(f) with open(opt.slot_path, 'rb') as f: slot_dic = pickle.load(f) with open(opt.train_path, 'rb') as f: train_data = pickle.load(f) np.random.seed(0) indices = np.arange(len(train_data)) #np.random.permutation(len(train_data)) train = np.array(train_data)[indices[:int(len(train_data)*0.7)]] test = np.array(train_data)[indices[int(len(train_data)*0.7):]] train_loader = get_dataloader_context(train, dic, slot_dic, opt) for result_ids, result_token_masks, result_masks, lengths, result_slot_labels, result_labels in train_loader: print(result_ids[0]) print(result_token_masks[0]) print(result_masks[0]) print(lengths[0]) print(result_slot_labels[0]) print(result_labels[0]) dae
4,711
34.164179
113
py
CaBERT-SLU
CaBERT-SLU-main/all_data_slot.py
import torch as t from torch.utils.data import Dataset, DataLoader import pickle from config import opt from sklearn.model_selection import train_test_split from keras.preprocessing.sequence import pad_sequences import numpy as np class CoreDataset(Dataset): def __init__(self, data, num_labels, num_slot_labels, opt): self.data = data self.num_data = len(self.data) self.maxlen = opt.maxlen self.num_labels = num_labels self.num_slot_labels = num_slot_labels self.opt = opt caps, slots, labels = zip(*self.data) self.caps, self.masks = self.load_data(caps, self.maxlen) self.slot_labels, _ = self.load_data(slots, self.maxlen) self.labels = labels def load_data(self, X, maxlen): input_ids = pad_sequences(X, maxlen=maxlen, dtype="long", truncating="post", padding="post") attention_masks = [] for seq in input_ids: seq_mask = [float(i>0) for i in seq] attention_masks.append(seq_mask) return t.tensor(input_ids), t.tensor(attention_masks) def __getitem__(self, index): # caps caps = self.caps[index] slot_labels = self.slot_labels[index] masks = self.masks[index] # labels label = t.LongTensor(np.array(self.labels[index])) labels = t.zeros(self.num_labels).scatter_(0, label, 1) return caps, masks, labels, slot_labels def __len__(self): return len(self.data) def get_dataloader(data, num_labels, num_slot_labels, opt): dataset = CoreDataset(data, num_labels, num_slot_labels, opt) batch_size = opt.batch_size return DataLoader(dataset, batch_size=batch_size, shuffle=False)
1,819
29.847458
100
py
CaBERT-SLU
CaBERT-SLU-main/utils.py
import random import torch import torch.nn as nn from torch.autograd import Variable from torch.optim import Adam, RMSprop from transformers import BertTokenizer, BertModel, BertConfig, AdamW from keras.preprocessing.sequence import pad_sequences from sklearn.model_selection import train_test_split import pickle import copy import numpy as np import collections from tqdm import tqdm from more_itertools import collapse from collections import defaultdict from model import BertContextNLU from all_data_context import get_dataloader_context from config import opt def load_data(X, maxlen): input_ids = pad_sequences(X, maxlen=maxlen, dtype="long", truncating="post", padding="post") attention_masks = [] for seq in input_ids: seq_mask = [float(i>0) for i in seq] attention_masks.append(seq_mask) return (input_ids, attention_masks) def f1_score_intents(outputs, labels): P, R, F1, acc = 0, 0, 0, 0 outputs = torch.sigmoid(outputs) for i in range(outputs.shape[0]): TP, FP, FN = 0, 0, 0 for j in range(outputs.shape[1]): if outputs[i][j] > 0.5 and labels[i][j] == 1: TP += 1 elif outputs[i][j] <= 0.5 and labels[i][j] == 1: FN += 1 elif outputs[i][j] > 0.5 and labels[i][j] == 0: FP += 1 precision = TP / float(TP + FP) if (TP + FP) != 0 else 0 recall = TP / float(TP + FN) if (TP + FN) != 0 else 0 F1 += 2 * precision * recall / float(precision + recall) if (precision + recall) != 0 else 0 P += precision R += recall p = (torch.where(outputs[i]>0.5)[0]) r = (torch.where(labels[i]==1)[0]) if len(p) == len(r) and (p == r).all(): acc += 1 P /= outputs.shape[0] R /= outputs.shape[0] F1 /= outputs.shape[0] return P, R, F1, acc ############################################3 def to_spans(l_ids, voc): """Convert a list of BIO labels, coded as integers, into spans identified by a beginning, an end, and a label. To allow easy comparison later, we store them in a dictionary indexed by the start position. @param l_ids: a list of predicted label indices @param voc: label vocabulary dictionary: index to label ex. 0: B-C """ spans = {} current_lbl = None current_start = None for i, l_id in enumerate(l_ids): l = voc[l_id] if l[0] == 'B': # Beginning of a named entity: B-something. if current_lbl: # If we're working on an entity, close it. spans[current_start] = (current_lbl, i) # Create a new entity that starts here. current_lbl = l[2:] current_start = i elif l[0] == 'I': # Continuation of an entity: I-something. if current_lbl: # If we have an open entity, but its label does not # correspond to the predicted I-tag, then we close # the open entity and create a new one. if current_lbl != l[2:]: spans[current_start] = (current_lbl, i) current_lbl = l[2:] current_start = i else: # If we don't have an open entity but predict an I tag, # we create a new entity starting here even though we're # not following the format strictly. current_lbl = l[2:] current_start = i else: # Outside: O. if current_lbl: # If we have an open entity, we close it. spans[current_start] = (current_lbl, i) current_lbl = None current_start = None if current_lbl != None: spans[current_start] = (current_lbl, i+1) return spans def compare(gold, pred, stats, mode='strict'): """Compares two sets of spans and records the results for future aggregation. @param gold: ground truth @param pred: predictions @param stats: the final dictionary with keys of different counts including total and specific labels ex. {'total': {'gold': 5, 'pred': 5}, 'Cause': {'gold': 5, 'pred': 5}} """ for start, (lbl, end) in gold.items(): stats['total']['gold'] += 1 stats[lbl]['gold'] += 1 for start, (lbl, end) in pred.items(): stats['total']['pred'] += 1 stats[lbl]['pred'] += 1 if mode == 'strict': for start, (glbl, gend) in gold.items(): if start in pred: plbl, pend = pred[start] if glbl == plbl and gend == pend: stats['total']['corr'] += 1 stats[glbl]['corr'] += 1 elif mode == 'partial': for gstart, (glbl, gend) in gold.items(): for pstart, (plbl, pend) in pred.items(): if glbl == plbl: g = set(range(gstart, gend+1)) p = set(range(pstart, pend+1)) if len(g & p) / max(len(g), len(p)) >= opt.token_percent: stats['total']['corr'] += 1 stats[glbl]['corr'] += 1 break def evaluate_iob(predicted, gold, label_field, stats): """This function will evaluate the model from bert dataloader pipeline. """ gold_cpu = gold.cpu().numpy() pred_cpu = predicted.cpu().numpy() gold_cpu = list(gold_cpu.reshape(-1)) pred_cpu = list(pred_cpu.reshape(-1)) # pred_cpu = [l for sen in predicted for l in sen] id2label = {v:k for k,v in label_field.items()} # Compute spans for the gold standard and prediction. gold_spans = to_spans(gold_cpu, id2label) pred_spans = to_spans(pred_cpu, id2label) # Finally, update the counts for correct, predicted and gold-standard spans. compare(gold_spans, pred_spans, stats, 'strict') def prf(stats): """ Computes precision, recall and F-score, given a dictionary that contains the counts of correct, predicted and gold-standard items. @params stats: the final statistics """ if stats['pred'] == 0: return 0, 0, 0 p = stats['corr']/stats['pred'] r = stats['corr']/stats['gold'] if p > 0 and r > 0: f = 2*p*r/(p+r) else: f = 0 return p, r, f
6,421
34.877095
115
py
CaBERT-SLU
CaBERT-SLU-main/config.py
class Config: #################### For BERT fine-tuning #################### # control datatype = "e2e" retrain = False # Reuse trained model weights test_mode = "data" # "validation", "data" data_mode = "multi" #"single" # single or multi intent in data ################################# if datatype == "e2e": # Microsoft e2e dialogue dataset train_path = "data/e2e_dialogue/dialogue_data_multi_with_slots.pkl" dic_path = "data/e2e_dialogue/intent2id.pkl" if data_mode == "single" else "data/e2e_dialogue/intent2id_multi.pkl" dic_path_with_tokens = "data/e2e_dialogue/intent2id_multi_with_tokens.pkl" slot_path = "data/e2e_dialogue/slot2id.pkl" elif datatype == "sgd": # dstc8-sgd dialogue dataset train_path = "data/sgd_dialogue/dialogue_data_multi_with_slots.pkl" dic_path = "data/sgd_dialogue/intent2id.pkl" if data_mode == "single" else "data/sgd_dialogue/intent2id_multi.pkl" dic_path_with_tokens = "data/sgd_dialogue/intent2id_multi_with_tokens.pkl" slot_path = "data/sgd_dialogue/slot2id.pkl" model_path = None if not retrain else "checkpoints/best_{}_{}.pth".format(datatype, data_mode) maxlen = 60 batch_size = 8 #CaBERT-SLU: e2e 16/8 sgd 4 # multi 128 eca 8 epochs = 20 learning_rate_bert = 2e-5 #1e-3 learning_rate_classifier = 5e-3 rnn_hidden = 256 opt = Config()
1,463
39.666667
122
py
CaBERT-SLU
CaBERT-SLU-main/bert_context.py
"""For model training and inference (multi dialogue act & slot detection) """ import random import torch import torch.nn as nn from torch.autograd import Variable from torch.optim import Adam, RMSprop from transformers import BertTokenizer, BertModel, BertConfig, AdamW from keras.preprocessing.sequence import pad_sequences from sklearn.model_selection import train_test_split import pickle import copy import numpy as np import collections from tqdm import tqdm from collections import defaultdict, Counter from model import BertContextNLU, ECA from all_data_context import get_dataloader_context from config import opt from utils import * def train(**kwargs): # attributes for k, v in kwargs.items(): setattr(opt, k, v) np.random.seed(0) device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') torch.backends.cudnn.enabled = False print('Dataset to use: ', opt.train_path) print('Dictionary to use: ', opt.dic_path_with_tokens) print('Data Type: ', opt.datatype) print('Use pretrained weights: ', opt.retrain) # dataset with open(opt.dic_path_with_tokens, 'rb') as f: dic = pickle.load(f) with open(opt.slot_path, 'rb') as f: slot_dic = pickle.load(f) with open(opt.train_path, 'rb') as f: train_data = pickle.load(f) # Microsoft Dialogue Dataset / SGD Dataset indices = np.random.permutation(len(train_data)) train = np.array(train_data)[indices[:int(len(train_data)*0.7)]]#[:1000] test = np.array(train_data)[indices[int(len(train_data)*0.7):]]#[:100] train_loader = get_dataloader_context(train, dic, slot_dic, opt) val_loader = get_dataloader_context(test, dic, slot_dic, opt) # label tokens intent_tokens = [intent for name, (tag, intent) in dic.items()] intent_tok, mask_tok = load_data(intent_tokens, 10) intent_tokens = torch.zeros(len(intent_tok), 10).long().to(device) mask_tokens = torch.zeros(len(mask_tok), 10).long().to(device) for i in range(len(intent_tok)): intent_tokens[i] = torch.tensor(intent_tok[i]) for i in range(len(mask_tok)): mask_tokens[i] = torch.tensor(mask_tok[i]) # model config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertContextNLU(config, opt, len(dic), len(slot_dic)) # model = ECA(opt, len(dic), len(slot_dic)) if opt.model_path: model.load_state_dict(torch.load(opt.model_path)) print("Pretrained model has been loaded.\n") else: print("Train from scratch...") model = model.to(device) optimizer = AdamW(model.parameters(), weight_decay=0.01, lr=opt.learning_rate_bert) criterion = nn.BCEWithLogitsLoss(reduction='sum').to(device) criterion2 = nn.CrossEntropyLoss(reduction='sum').to(device) best_loss = 100 best_accuracy = 0 best_f1 = 0 #################################### Start training #################################### for epoch in range(opt.epochs): print("====== epoch %d / %d: ======"% (epoch+1, opt.epochs)) # Training Phase total_train_loss = 0 total_P = 0 total_R = 0 total_F1 = 0 total_acc = 0 model.train() ccounter = 0 for (result_ids, result_token_masks, result_masks, lengths, result_slot_labels, result_labels) in tqdm(train_loader): result_ids = result_ids.to(device) result_token_masks = result_token_masks.to(device) result_masks = result_masks.to(device) lengths = lengths.to(device) result_slot_labels = result_slot_labels.to(device) result_slot_labels = result_slot_labels.reshape(-1) result_labels = result_labels.to(device) optimizer.zero_grad() outputs, labels, slot_out = model(result_ids, result_token_masks, result_masks, lengths, result_slot_labels, result_labels, intent_tokens, mask_tokens) train_loss = criterion(outputs, labels) slot_loss = criterion2(slot_out, result_slot_labels) total_loss = train_loss + slot_loss total_loss.backward() optimizer.step() total_train_loss += total_loss P, R, F1, acc = f1_score_intents(outputs, labels) total_P += P total_R += R total_F1 += F1 total_acc += acc ccounter += 1 print('Average train loss: {:.4f} '.format(total_train_loss / train_loader.dataset.num_data)) precision = total_P / ccounter recall = total_R / ccounter f1 = total_F1 / ccounter print(f'P = {precision:.4f}, R = {recall:.4f}, F1 = {f1:.4f}') print('Accuracy: ', total_acc/train_loader.dataset.num_data) # Validation Phase total_val_loss = 0 total_P = 0 total_R = 0 total_F1 = 0 total_acc = 0 model.eval() ccounter = 0 stats = defaultdict(Counter) for (result_ids, result_token_masks, result_masks, lengths, result_slot_labels, result_labels) in val_loader: result_ids = result_ids.to(device) result_token_masks = result_token_masks.to(device) result_masks = result_masks.to(device) lengths = lengths.to(device) result_slot_labels = result_slot_labels.to(device) result_slot_labels = result_slot_labels.reshape(-1) result_labels = result_labels.to(device) with torch.no_grad(): outputs, labels, predicted_slot_outputs = model(result_ids, result_token_masks, result_masks, lengths, result_slot_labels, result_labels, intent_tokens, mask_tokens) val_loss = criterion(outputs, labels) total_val_loss += val_loss P, R, F1, acc = f1_score_intents(outputs, labels) total_P += P total_R += R total_F1 += F1 total_acc += acc ccounter += 1 _, index = torch.topk(predicted_slot_outputs, k=1, dim=-1) evaluate_iob(index, result_slot_labels, slot_dic, stats) print('========= Validation =========') print('Average val loss: {:.4f} '.format(total_val_loss / val_loader.dataset.num_data)) precision = total_P / ccounter recall = total_R / ccounter f1 = total_F1 / ccounter print(f'P = {precision:.4f}, R = {recall:.4f}, F1 = {f1:.4f}') print('Accuracy: ', total_acc/val_loader.dataset.num_data) val_acc = total_acc/val_loader.dataset.num_data # print slot stats p_slot, r_slot, f1_slot = prf(stats['total']) print('========= Slot =========') print(f'Slot Score: P = {p_slot:.4f}, R = {r_slot:.4f}, F1 = {f1_slot:.4f}') if f1 > best_f1: print('saving with loss of {}'.format(total_val_loss), 'improved over previous {}'.format(best_loss)) best_loss = total_val_loss best_accuracy = val_acc best_f1 = f1 best_stats = copy.deepcopy(stats) torch.save(model.state_dict(), 'checkpoints/best_{}_{}.pth'.format(opt.datatype, opt.data_mode)) print() print('Best total val loss: {:.4f}'.format(total_val_loss)) print('Best Test Accuracy: {:.4f}'.format(best_accuracy)) print('Best F1 Score: {:.4f}'.format(best_f1)) p_slot, r_slot, f1_slot = prf(best_stats['total']) print('Final evaluation on slot filling of the validation set:') print(f'Overall: P = {p_slot:.4f}, R = {r_slot:.4f}, F1 = {f1_slot:.4f}') ##################################################################### def test(**kwargs): # attributes for k, v in kwargs.items(): setattr(opt, k, v) np.random.seed(0) device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') torch.backends.cudnn.enabled = False print('Dataset to use: ', opt.train_path) print('Dictionary to use: ', opt.dic_path_with_tokens) # dataset with open(opt.dic_path_with_tokens, 'rb') as f: dic = pickle.load(f) print(dic) with open(opt.slot_path, 'rb') as f: slot_dic = pickle.load(f) reverse_dic = {v[0]: k for k,v in dic.items()} with open(opt.train_path, 'rb') as f: train_data = pickle.load(f) with open(opt.test_path, 'rb') as f: test_data = pickle.load(f) # Microsoft Dialogue Dataset / SGD Dataset indices = np.random.permutation(len(train_data)) train = np.array(train_data)[indices[:int(len(train_data)*0.7)]] test = np.array(train_data)[indices[int(len(train_data)*0.7):]][:1000] train_loader = get_dataloader_context(train, dic, slot_dic, opt) test_loader = get_dataloader_context(test, dic, slot_dic, opt) # label tokens intent_tokens = [intent for name, (tag, intent) in dic.items()] intent_tok, mask_tok = load_data(intent_tokens, 10) intent_tokens = torch.zeros(len(intent_tok), 10).long().to(device) mask_tokens = torch.zeros(len(mask_tok), 10).long().to(device) for i in range(len(intent_tok)): intent_tokens[i] = torch.tensor(intent_tok[i]) for i in range(len(mask_tok)): mask_tokens[i] = torch.tensor(mask_tok[i]) # model config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertContextNLU(config, opt, len(dic), len(slot_dic)) if opt.model_path: model.load_state_dict(torch.load(opt.model_path)) print("Pretrained model {} has been loaded.".format(opt.model_path)) model = model.to(device) tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True) # Run multi-intent validation if opt.test_mode == "validation": total_P = 0 total_R = 0 total_F1 = 0 total_acc = 0 model.eval() ccounter = 0 stats = defaultdict(Counter) for (result_ids, result_token_masks, result_masks, lengths, result_slot_labels, result_labels) in tqdm(test_loader): result_ids = result_ids.to(device) result_token_masks = result_token_masks.to(device) result_masks = result_masks.to(device) lengths = lengths.to(device) result_slot_labels = result_slot_labels.to(device) result_slot_labels = result_slot_labels.reshape(-1) result_labels = result_labels.to(device) with torch.no_grad(): outputs, labels, predicted_slot_outputs = model(result_ids, result_token_masks, result_masks, lengths, result_slot_labels, result_labels, intent_tokens, mask_tokens) P, R, F1, acc = f1_score_intents(outputs, labels) total_P += P total_R += R total_F1 += F1 total_acc += acc ccounter += 1 _, index = torch.topk(predicted_slot_outputs, k=1, dim=-1) evaluate_iob(index, result_slot_labels, slot_dic, stats) precision = total_P / ccounter recall = total_R / ccounter f1 = total_F1 / ccounter print(f'P = {precision:.4f}, R = {recall:.4f}, F1 = {f1:.4f}') print('Accuracy: ', total_acc/test_loader.dataset.num_data) # print slot stats p_slot, r_slot, f1_slot = prf(stats['total']) print('========= Slot =========') print(f'Slot Score: P = {p_slot:.4f}, R = {r_slot:.4f}, F1 = {f1_slot:.4f}') # Run test classification elif opt.test_mode == "data": # Validation Phase pred_labels = [] real_labels = [] error_ids = [] total_P, total_R, total_F1, total_acc = 0, 0, 0, 0 ccounter = 0 stats = defaultdict(Counter) model.eval() print(len(test_loader.dataset)) for num, (result_ids, result_token_masks, result_masks, lengths, result_slot_labels, result_labels) in enumerate(test_loader): print('predict batches: ', num) result_ids = result_ids.to(device) result_token_masks = result_token_masks.to(device) result_masks = result_masks.to(device) lengths = lengths.to(device) result_slot_labels = result_slot_labels.to(device) result_slot_labels = result_slot_labels.reshape(-1) result_labels = result_labels.to(device) # Remove padding texts_no_pad = [] for i in range(len(result_ids)): texts_no_pad.append(result_ids[i,:lengths[i],:]) texts_no_pad = torch.vstack(texts_no_pad) with torch.no_grad(): outputs, labels, predicted_slot_outputs, ffscores = model(result_ids, result_token_masks, result_masks, lengths, result_slot_labels, result_labels, intent_tokens, mask_tokens) # total P, R, F1, acc = f1_score_intents(outputs, labels) total_P += P total_R += R total_F1 += F1 total_acc += acc ccounter += 1 _, index = torch.topk(predicted_slot_outputs, k=1, dim=-1) evaluate_iob(index, result_slot_labels, slot_dic, stats) for i, logits in enumerate(outputs): log = torch.sigmoid(logits) correct = (labels[i][torch.where(log>0.5)[0]]).sum() total = len(torch.where(labels[i]==1)[0]) wrong_caption = tokenizer.convert_ids_to_tokens(texts_no_pad[i], skip_special_tokens=True) error_ids.append(wrong_caption) pred_ls = [p for p in torch.where(log>0.5)[0].detach().cpu().numpy()] real_ls = [i for i, r in enumerate(labels[i].detach().cpu().numpy()) if r == 1] pred_labels.append(pred_ls) real_labels.append(real_ls) with open('error_analysis/{}_{}_context_slots.txt'.format(opt.datatype, opt.data_mode), 'w') as f: f.write('----------- Examples ------------\n') for i, (caption, pred, real) in enumerate(zip(error_ids, pred_labels, real_labels)): f.write(str(i)+'\n') f.write(' '.join(caption)+'\n') p_r = [reverse_dic[p] for p in pred] r_r = [reverse_dic[r] for r in real] f.write('Predicted label: {}\n'.format(p_r)) f.write('Real label: {}\n'.format(r_r)) f.write('------\n') precision = total_P / ccounter recall = total_R / ccounter f1 = total_F1 / ccounter print(f'P = {precision:.4f}, R = {recall:.4f}, F1 = {f1:.4f}') print('Accuracy: ', total_acc/test_loader.dataset.num_data) print(len(ffscores)) with open('ffscores.pkl', 'wb') as f: pickle.dump(ffscores, f) if __name__ == '__main__': import fire fire.Fire()
15,293
36.211679
192
py
CaBERT-SLU
CaBERT-SLU-main/__MACOSX/CaBERT-SLU-main/._config.py
Mac OS X  2ATTR<<com.apple.quarantineq/0081;6067ce05;Chrome;EB6190ED-75B9-4A04-87B2-88BD2FC5C689
207
207
207
py
CaBERT-SLU
CaBERT-SLU-main/__MACOSX/CaBERT-SLU-main/._all_data_slot.py
Mac OS X  2ATTR<<com.apple.quarantineq/0081;6067ce05;Chrome;EB6190ED-75B9-4A04-87B2-88BD2FC5C689
207
207
207
py
CaBERT-SLU
CaBERT-SLU-main/__MACOSX/CaBERT-SLU-main/._all_data_context.py
Mac OS X  2ATTR<<com.apple.quarantineq/0081;6067ce05;Chrome;EB6190ED-75B9-4A04-87B2-88BD2FC5C689
207
207
207
py
CaBERT-SLU
CaBERT-SLU-main/__MACOSX/CaBERT-SLU-main/._utils.py
Mac OS X  2ATTR<<com.apple.quarantineq/0081;6067ce05;Chrome;EB6190ED-75B9-4A04-87B2-88BD2FC5C689
207
207
207
py
CaBERT-SLU
CaBERT-SLU-main/__MACOSX/CaBERT-SLU-main/._baseline_midsf.py
Mac OS X  2ATTR<<com.apple.quarantineq/0081;6067ce05;Chrome;EB6190ED-75B9-4A04-87B2-88BD2FC5C689
207
207
207
py
CaBERT-SLU
CaBERT-SLU-main/__MACOSX/CaBERT-SLU-main/._bert_context.py
Mac OS X  2ATTR<<com.apple.quarantineq/0081;6067ce05;Chrome;EB6190ED-75B9-4A04-87B2-88BD2FC5C689
207
207
207
py
CaBERT-SLU
CaBERT-SLU-main/__MACOSX/CaBERT-SLU-main/baseline_stackprop/._train.py
Mac OS X  2ATTR<<com.apple.quarantineq/0081;6067ce05;Chrome;EB6190ED-75B9-4A04-87B2-88BD2FC5C689
207
207
207
py
CaBERT-SLU
CaBERT-SLU-main/__MACOSX/CaBERT-SLU-main/baseline_stackprop/._utils_bert.py
Mac OS X  2ATTR<<com.apple.quarantineq/0081;6067ce05;Chrome;EB6190ED-75B9-4A04-87B2-88BD2FC5C689
207
207
207
py
CaBERT-SLU
CaBERT-SLU-main/__MACOSX/CaBERT-SLU-main/baseline_stackprop/utils/.___init__.py
Mac OS X  2ATTR<<com.apple.quarantineq/0081;6067ce05;Chrome;EB6190ED-75B9-4A04-87B2-88BD2FC5C689
207
207
207
py
CaBERT-SLU
CaBERT-SLU-main/__MACOSX/CaBERT-SLU-main/baseline_stackprop/utils/._process.py
Mac OS X  2ATTR<<com.apple.quarantineq/0081;6067ce05;Chrome;EB6190ED-75B9-4A04-87B2-88BD2FC5C689
207
207
207
py
CaBERT-SLU
CaBERT-SLU-main/__MACOSX/CaBERT-SLU-main/baseline_stackprop/utils/._miulab.py
Mac OS X  2ATTR<<com.apple.quarantineq/0081;6067ce05;Chrome;EB6190ED-75B9-4A04-87B2-88BD2FC5C689
207
207
207
py