INSTRUCTION
stringlengths
1
46.3k
RESPONSE
stringlengths
75
80.2k
Loads a task with the given ID from the given queue in the given state. An integer may be passed in the load_executions parameter to indicate how many executions should be loaded (starting from the latest). If the task doesn't exist, None is returned.
def from_id(self, tiger, queue, state, task_id, load_executions=0): """ Loads a task with the given ID from the given queue in the given state. An integer may be passed in the load_executions parameter to indicate how many executions should be loaded (starting from the latest). If the task doesn't exist, None is returned. """ if load_executions: pipeline = tiger.connection.pipeline() pipeline.get(tiger._key('task', task_id)) pipeline.lrange(tiger._key('task', task_id, 'executions'), -load_executions, -1) serialized_data, serialized_executions = pipeline.execute() else: serialized_data = tiger.connection.get(tiger._key('task', task_id)) serialized_executions = [] # XXX: No timestamp for now if serialized_data: data = json.loads(serialized_data) executions = [json.loads(e) for e in serialized_executions if e] return Task(tiger, queue=queue, _data=data, _state=state, _executions=executions) else: raise TaskNotFound('Task {} not found.'.format( task_id ))
Returns a tuple with the following information: * total items in the queue * tasks from the given queue in the given state, latest first. An integer may be passed in the load_executions parameter to indicate how many executions should be loaded (starting from the latest).
def tasks_from_queue(self, tiger, queue, state, skip=0, limit=1000, load_executions=0): """ Returns a tuple with the following information: * total items in the queue * tasks from the given queue in the given state, latest first. An integer may be passed in the load_executions parameter to indicate how many executions should be loaded (starting from the latest). """ key = tiger._key(state, queue) pipeline = tiger.connection.pipeline() pipeline.zcard(key) pipeline.zrange(key, -limit-skip, -1-skip, withscores=True) n, items = pipeline.execute() tasks = [] if items: tss = [datetime.datetime.utcfromtimestamp(item[1]) for item in items] if load_executions: pipeline = tiger.connection.pipeline() pipeline.mget([tiger._key('task', item[0]) for item in items]) for item in items: pipeline.lrange(tiger._key('task', item[0], 'executions'), -load_executions, -1) results = pipeline.execute() for serialized_data, serialized_executions, ts in zip(results[0], results[1:], tss): data = json.loads(serialized_data) executions = [json.loads(e) for e in serialized_executions if e] task = Task(tiger, queue=queue, _data=data, _state=state, _ts=ts, _executions=executions) tasks.append(task) else: data = tiger.connection.mget([tiger._key('task', item[0]) for item in items]) for serialized_data, ts in zip(data, tss): data = json.loads(serialized_data) task = Task(tiger, queue=queue, _data=data, _state=state, _ts=ts) tasks.append(task) return n, tasks
Queries and returns the number of past task executions.
def n_executions(self): """ Queries and returns the number of past task executions. """ pipeline = self.tiger.connection.pipeline() pipeline.exists(self.tiger._key('task', self.id)) pipeline.llen(self.tiger._key('task', self.id, 'executions')) exists, n_executions = pipeline.execute() if not exists: raise TaskNotFound('Task {} not found.'.format( self.id )) return n_executions
Set inputs after initialization Parameters ------- nr: integer length of generated time-series number must be power of two qd: float discrete variance b: float noise type: 0 : White Phase Modulation (WPM) -1 : Flicker Phase Modulation (FPM) -2 : White Frequency Modulation (WFM) -3 : Flicker Frequency Modulation (FFM) -4 : Random Walk Frequency Modulation (RWFM)
def set_input(self, nr=2, qd=1, b=0): """ Set inputs after initialization Parameters ------- nr: integer length of generated time-series number must be power of two qd: float discrete variance b: float noise type: 0 : White Phase Modulation (WPM) -1 : Flicker Phase Modulation (FPM) -2 : White Frequency Modulation (WFM) -3 : Flicker Frequency Modulation (FFM) -4 : Random Walk Frequency Modulation (RWFM) """ self.nr = nr self.qd = qd self.b = b
Generate noise time series based on input parameters Returns ------- time_series: np.array Time series with colored noise. len(time_series) == nr
def generateNoise(self): """ Generate noise time series based on input parameters Returns ------- time_series: np.array Time series with colored noise. len(time_series) == nr """ # Fill wfb array with white noise based on given discrete variance wfb = np.zeros(self.nr*2) wfb[:self.nr] = np.random.normal(0, np.sqrt(self.qd), self.nr) # Generate the hfb coefficients based on the noise type mhb = -self.b/2.0 hfb = np.zeros(self.nr*2) hfb = np.zeros(self.nr*2) hfb[0] = 1.0 indices = np.arange(self.nr-1) hfb[1:self.nr] = (mhb+indices)/(indices+1.0) hfb[:self.nr] = np.multiply.accumulate(hfb[:self.nr]) # Perform discrete Fourier transform of wfb and hfb time series wfb_fft = np.fft.rfft(wfb) hfb_fft = np.fft.rfft(hfb) # Perform inverse Fourier transform of the product of wfb and hfb FFTs time_series = np.fft.irfft(wfb_fft*hfb_fft)[:self.nr] self.time_series = time_series
return phase power spectral density coefficient g_b for noise-type defined by (qd, b, tau0) where tau0 is the interval between data points Colored noise generated with (qd, b, tau0) parameters will show a phase power spectral density of S_x(f) = Phase_PSD(f) = g_b * f^b Kasdin & Walter eqn (39)
def phase_psd_from_qd(self, tau0=1.0): """ return phase power spectral density coefficient g_b for noise-type defined by (qd, b, tau0) where tau0 is the interval between data points Colored noise generated with (qd, b, tau0) parameters will show a phase power spectral density of S_x(f) = Phase_PSD(f) = g_b * f^b Kasdin & Walter eqn (39) """ return self.qd*2.0*pow(2.0*np.pi, self.b)*pow(tau0, self.b+1.0)
return frequency power spectral density coefficient h_a for the noise type defined by (qd, b, tau0) Colored noise generated with (qd, b, tau0) parameters will show a frequency power spectral density of S_y(f) = Frequency_PSD(f) = h_a * f^a where the slope a comes from the phase PSD slope b: a = b + 2 Kasdin & Walter eqn (39)
def frequency_psd_from_qd(self, tau0=1.0): """ return frequency power spectral density coefficient h_a for the noise type defined by (qd, b, tau0) Colored noise generated with (qd, b, tau0) parameters will show a frequency power spectral density of S_y(f) = Frequency_PSD(f) = h_a * f^a where the slope a comes from the phase PSD slope b: a = b + 2 Kasdin & Walter eqn (39) """ a = self.b + 2.0 return self.qd*2.0*pow(2.0*np.pi, a)*pow(tau0, a-1.0)
return predicted ADEV of noise-type at given tau
def adev(self, tau0, tau): """ return predicted ADEV of noise-type at given tau """ prefactor = self.adev_from_qd(tau0=tau0, tau=tau) c = self.c_avar() avar = pow(prefactor, 2)*pow(tau, c) return np.sqrt(avar)
return predicted MDEV of noise-type at given tau
def mdev(self, tau0, tau): """ return predicted MDEV of noise-type at given tau """ prefactor = self.mdev_from_qd(tau0=tau0, tau=tau) c = self.c_mvar() mvar = pow(prefactor, 2)*pow(tau, c) return np.sqrt(mvar)
return tau exponent "c" for noise type. AVAR = prefactor * h_a * tau^c
def c_avar(self): """ return tau exponent "c" for noise type. AVAR = prefactor * h_a * tau^c """ if self.b == -4: return 1.0 elif self.b == -3: return 0.0 elif self.b == -2: return -1.0 elif self.b == -1: return -2.0 elif self.b == 0: return -2.0
return tau exponent "c" for noise type. MVAR = prefactor * h_a * tau^c
def c_mvar(self): """ return tau exponent "c" for noise type. MVAR = prefactor * h_a * tau^c """ if self.b == -4: return 1.0 elif self.b == -3: return 0.0 elif self.b == -2: return -1.0 elif self.b == -1: return -2.0 elif self.b == 0: return -3.0
prefactor for Allan deviation for noise type defined by (qd, b, tau0) Colored noise generated with (qd, b, tau0) parameters will show an Allan variance of: AVAR = prefactor * h_a * tau^c where a = b + 2 is the slope of the frequency PSD. and h_a is the frequency PSD prefactor S_y(f) = h_a * f^a The relation between a, b, c is: a b c(AVAR) c(MVAR) ----------------------- -2 -4 1 1 -1 -3 0 0 0 -2 -1 -1 +1 -1 -2 -2 +2 0 -2 -3 Coefficients from: S. T. Dawkins, J. J. McFerran and A. N. Luiten, "Considerations on the measurement of the stability of oscillators with frequency counters," in IEEE Transactions on Ultrasonics, Ferroelectrics, and Frequency Control, vol. 54, no. 5, pp. 918-925, May 2007. doi: 10.1109/TUFFC.2007.337
def adev_from_qd(self, tau0=1.0, tau=1.0): """ prefactor for Allan deviation for noise type defined by (qd, b, tau0) Colored noise generated with (qd, b, tau0) parameters will show an Allan variance of: AVAR = prefactor * h_a * tau^c where a = b + 2 is the slope of the frequency PSD. and h_a is the frequency PSD prefactor S_y(f) = h_a * f^a The relation between a, b, c is: a b c(AVAR) c(MVAR) ----------------------- -2 -4 1 1 -1 -3 0 0 0 -2 -1 -1 +1 -1 -2 -2 +2 0 -2 -3 Coefficients from: S. T. Dawkins, J. J. McFerran and A. N. Luiten, "Considerations on the measurement of the stability of oscillators with frequency counters," in IEEE Transactions on Ultrasonics, Ferroelectrics, and Frequency Control, vol. 54, no. 5, pp. 918-925, May 2007. doi: 10.1109/TUFFC.2007.337 """ g_b = self.phase_psd_from_qd(tau0) f_h = 0.5/tau0 if self.b == 0: coeff = 3.0*f_h / (4.0*pow(np.pi, 2)) # E, White PM, tau^-1 elif self.b == -1: coeff = (1.038+3*np.log(2.0*np.pi*f_h*tau))/(4.0*pow(np.pi, 2))# D, Flicker PM, tau^-1 elif self.b == -2: coeff = 0.5 # C, white FM, 1/sqrt(tau) elif self.b == -3: coeff = 2*np.log(2) # B, flicker FM, constant ADEV elif self.b == -4: coeff = 2.0*pow(np.pi, 2)/3.0 # A, RW FM, sqrt(tau) return np.sqrt(coeff*g_b*pow(2.0*np.pi, 2))
calculate power spectral density of input signal x x = signal f_sample = sampling frequency in Hz. i.e. 1/fs is the time-interval in seconds between datapoints scale fft so that output corresponds to 1-sided PSD output has units of [X^2/Hz] where X is the unit of x
def numpy_psd(x, f_sample=1.0): """ calculate power spectral density of input signal x x = signal f_sample = sampling frequency in Hz. i.e. 1/fs is the time-interval in seconds between datapoints scale fft so that output corresponds to 1-sided PSD output has units of [X^2/Hz] where X is the unit of x """ psd_of_x = (2.0/ (float(len(x)) * f_sample)) * numpy.abs(numpy.fft.rfft(x))**2 f_axis = numpy.linspace(0, f_sample/2.0, len(psd_of_x)) # frequency axis return f_axis, psd_of_x
PSD routine from scipy we can compare our own numpy result against this one
def scipy_psd(x, f_sample=1.0, nr_segments=4): """ PSD routine from scipy we can compare our own numpy result against this one """ f_axis, psd_of_x = scipy.signal.welch(x, f_sample, nperseg=len(x)/nr_segments) return f_axis, psd_of_x
generate time series with white noise that has constant PSD = b0, up to the nyquist frequency fs/2 N = number of samples b0 = desired power-spectral density in [X^2/Hz] where X is the unit of x fs = sampling frequency, i.e. 1/fs is the time-interval between datapoints the pre-factor corresponds to the area 'box' under the PSD-curve: The PSD is at 'height' b0 and extends from 0 Hz up to the nyquist frequency fs/2
def white(num_points=1024, b0=1.0, fs=1.0): """ generate time series with white noise that has constant PSD = b0, up to the nyquist frequency fs/2 N = number of samples b0 = desired power-spectral density in [X^2/Hz] where X is the unit of x fs = sampling frequency, i.e. 1/fs is the time-interval between datapoints the pre-factor corresponds to the area 'box' under the PSD-curve: The PSD is at 'height' b0 and extends from 0 Hz up to the nyquist frequency fs/2 """ return math.sqrt(b0*fs/2.0)*numpy.random.randn(num_points)
Brownian or random walk (diffusion) noise with 1/f^2 PSD (not really a color... rather Brownian or random-walk) N = number of samples b2 = desired PSD is b2*f^-2 fs = sampling frequency we integrate white-noise to get Brownian noise.
def brown(num_points=1024, b2=1.0, fs=1.0): """ Brownian or random walk (diffusion) noise with 1/f^2 PSD (not really a color... rather Brownian or random-walk) N = number of samples b2 = desired PSD is b2*f^-2 fs = sampling frequency we integrate white-noise to get Brownian noise. """ return (1.0/float(fs))*numpy.cumsum(white(num_points, b0=b2*(4.0*math.pi*math.pi), fs=fs))
N-length vector with (approximate) pink noise pink noise has 1/f PSD
def pink(N, depth=80): """ N-length vector with (approximate) pink noise pink noise has 1/f PSD """ a = [] s = iterpink(depth) for n in range(N): a.append(next(s)) return a
Generate a sequence of samples of pink noise. pink noise generator from http://pydoc.net/Python/lmj.sound/0.1.1/lmj.sound.noise/ Based on the Voss-McCartney algorithm, discussion and code examples at http://www.firstpr.com.au/dsp/pink-noise/ depth: Use this many samples of white noise to calculate the output. A higher number is slower to run, but renders low frequencies with more correct power spectra. Generates a never-ending sequence of floating-point values. Any continuous set of these samples will tend to have a 1/f power spectrum.
def iterpink(depth=20): """Generate a sequence of samples of pink noise. pink noise generator from http://pydoc.net/Python/lmj.sound/0.1.1/lmj.sound.noise/ Based on the Voss-McCartney algorithm, discussion and code examples at http://www.firstpr.com.au/dsp/pink-noise/ depth: Use this many samples of white noise to calculate the output. A higher number is slower to run, but renders low frequencies with more correct power spectra. Generates a never-ending sequence of floating-point values. Any continuous set of these samples will tend to have a 1/f power spectrum. """ values = numpy.random.randn(depth) smooth = numpy.random.randn(depth) source = numpy.random.randn(depth) sumvals = values.sum() i = 0 while True: yield sumvals + smooth[i] # advance the index by 1. if the index wraps, generate noise to use in # the calculations, but do not update any of the pink noise values. i += 1 if i == depth: i = 0 smooth = numpy.random.randn(depth) source = numpy.random.randn(depth) continue # count trailing zeros in i c = 0 while not (i >> c) & 1: c += 1 # replace value c with a new source element sumvals += source[i] - values[c] values[c] = source[i]
plot a line with the slope alpha
def plotline(plt, alpha, taus, style,label=""): """ plot a line with the slope alpha """ y = [pow(tt, alpha) for tt in taus] plt.loglog(taus, y, style,label=label)
B1 ratio for noise identification ratio of Standard Variace to AVAR
def b1_noise_id(x, af, rate): """ B1 ratio for noise identification ratio of Standard Variace to AVAR """ (taus,devs,errs,ns) = at.adev(x,taus=[af*rate],data_type="phase", rate=rate) oadev_x = devs[0] y = np.diff(x) y_cut = np.array( y[:len(y)-(len(y)%af)] ) # cut to length assert len(y_cut)%af == 0 y_shaped = y_cut.reshape( ( int(len(y_cut)/af), af) ) y_averaged = np.average(y_shaped,axis=1) # average var = np.var(y_averaged, ddof=1) return var/pow(oadev_x,2.0)
use matplotlib methods for plotting Parameters ---------- atDataset : allantools.Dataset() a dataset with computed data errorbars : boolean Plot errorbars. Defaults to False grid : boolean Plot grid. Defaults to False
def plot(self, atDataset, errorbars=False, grid=False): """ use matplotlib methods for plotting Parameters ---------- atDataset : allantools.Dataset() a dataset with computed data errorbars : boolean Plot errorbars. Defaults to False grid : boolean Plot grid. Defaults to False """ if errorbars: self.ax.errorbar(atDataset.out["taus"], atDataset.out["stat"], yerr=atDataset.out["stat_err"], ) else: self.ax.plot(atDataset.out["taus"], atDataset.out["stat"], ) self.ax.set_xlabel("Tau") self.ax.set_ylabel(atDataset.out["stat_id"]) self.ax.grid(grid, which="minor", ls="-", color='0.65') self.ax.grid(grid, which="major", ls="-", color='0.25')
returns confidence interval (dev_min, dev_max) for a given deviation dev, equivalent degrees of freedom edf, and degree of confidence ci. Parameters ---------- dev: float Mean value (e.g. adev) around which we produce the confidence interval edf: float Equivalent degrees of freedon ci: float, defaults to scipy.special.erf(1/math.sqrt(2)) for 1-sigma standard error set ci = scipy.special.erf(1/math.sqrt(2)) = 0.68268949213708585 Returns ------- (dev_min, dev_max): (float, float) Confidence interval
def confidence_interval(dev, edf, ci=ONE_SIGMA_CI): """ returns confidence interval (dev_min, dev_max) for a given deviation dev, equivalent degrees of freedom edf, and degree of confidence ci. Parameters ---------- dev: float Mean value (e.g. adev) around which we produce the confidence interval edf: float Equivalent degrees of freedon ci: float, defaults to scipy.special.erf(1/math.sqrt(2)) for 1-sigma standard error set ci = scipy.special.erf(1/math.sqrt(2)) = 0.68268949213708585 Returns ------- (dev_min, dev_max): (float, float) Confidence interval """ ci_l = min(np.abs(ci), np.abs((ci-1))) / 2 ci_h = 1 - ci_l # function from scipy, works OK, but scipy is large and slow to build chi2_l = scipy.stats.chi2.ppf(ci_l, edf) chi2_h = scipy.stats.chi2.ppf(ci_h, edf) variance = dev*dev var_l = float(edf) * variance / chi2_h # NIST SP1065 eqn (45) var_h = float(edf) * variance / chi2_l return (np.sqrt(var_l), np.sqrt(var_h))
returns confidence interval (dev_min, dev_max) for a given deviation dev = Xdev( x, tau = af*(1/rate) ) steps: 1) identify noise type 2) compute EDF 3) compute confidence interval Parameters ---------- x: numpy.array time-series dev: float Mean value (e.g. adev) around which we produce the confidence interval af: int averaging factor dev_type: string adev, oadev, mdev, tdev, hdev, ohdev data_type: "phase" or "freq" ci: float, defaults to scipy.special.erf(1/math.sqrt(2)) for 1-sigma standard error set ci = scipy.special.erf(1/math.sqrt(2)) = 0.68268949213708585 Returns ------- (dev_min, dev_max): (float, float) Confidence interval
def confidence_interval_noiseID(x, dev, af, dev_type="adev", data_type="phase", ci=ONE_SIGMA_CI): """ returns confidence interval (dev_min, dev_max) for a given deviation dev = Xdev( x, tau = af*(1/rate) ) steps: 1) identify noise type 2) compute EDF 3) compute confidence interval Parameters ---------- x: numpy.array time-series dev: float Mean value (e.g. adev) around which we produce the confidence interval af: int averaging factor dev_type: string adev, oadev, mdev, tdev, hdev, ohdev data_type: "phase" or "freq" ci: float, defaults to scipy.special.erf(1/math.sqrt(2)) for 1-sigma standard error set ci = scipy.special.erf(1/math.sqrt(2)) = 0.68268949213708585 Returns ------- (dev_min, dev_max): (float, float) Confidence interval """ # 1) noise ID dmax = 2 if (dev_type is "hdev") or (dev_type is "ohdev"): dmax = 3 alpha_int = autocorr_noise_id( x, int(af), data_type=data_type, dmin=0, dmax=dmax)[0] # 2) EDF if dev_type is "adev": edf = edf_greenhall( alpha=alpha_int, d=2, m=af, N=len(x), overlapping = False, modified=False ) elif dev_type is "oadev": edf = edf_greenhall( alpha=alpha_int, d=2, m=af, N=len(x), overlapping = True, modified=False ) elif (dev_type is "mdev") or (dev_type is "tdev"): edf = edf_greenhall( alpha=alpha_int, d=2, m=af, N=len(x), overlapping = True, modified=True ) elif dev_type is "hdev": edf = edf_greenhall( alpha=alpha_int, d=3, m=af, N=len(x), overlapping = False, modified=False ) elif dev_type is "ohdev": edf = edf_greenhall( alpha=alpha_int, d=3, m=af, N=len(x), overlapping = True, modified=False ) else: raise NotImplementedError # 3) confidence interval (low, high) = confidence_interval(dev, edf, ci) return (low, high)
R(n) ratio for noise identification ration of MVAR to AVAR
def rn(x, af, rate): """ R(n) ratio for noise identification ration of MVAR to AVAR """ (taus,devs,errs,ns) = at.adev(x,taus=[af*rate], data_type='phase', rate=rate) oadev_x = devs[0] (mtaus,mdevs,errs,ns) = at.mdev(x,taus=[af*rate], data_type='phase', rate=rate) mdev_x = mdevs[0] rn = pow(mdev_x/oadev_x,2) return rn
R(n) ratio expected from theory for given noise type alpha = b + 2
def rn_theory(af, b): """ R(n) ratio expected from theory for given noise type alpha = b + 2 """ # From IEEE1139-2008 # alpha beta ADEV_mu MDEV_mu Rn_mu # -2 -4 1 1 0 Random Walk FM # -1 -3 0 0 0 Flicker FM # 0 -2 -1 -1 0 White FM # 1 -1 -2 -2 0 Flicker PM # 2 0 -2 -3 -1 White PM # (a=-3 flicker walk FM) # (a=-4 random run FM) if b==0: return pow(af,-1) elif b==-1: # f_h = 0.5/tau0 (assumed!) # af = tau/tau0 # so f_h*tau = 0.5/tau0 * af*tau0 = 0.5*af avar = (1.038+3*np.log(2*np.pi*0.5*af)) / (4.0*pow(np.pi,2)) mvar = 3*np.log(256.0/27.0)/(8.0*pow(np.pi,2)) return mvar/avar else: return pow(af,0)
R(n) ratio boundary for selecting between [b_hi-1, b_hi] alpha = b + 2
def rn_boundary(af, b_hi): """ R(n) ratio boundary for selecting between [b_hi-1, b_hi] alpha = b + 2 """ return np.sqrt( rn_theory(af, b)*rn_theory(af, b-1) )
Expected B1 ratio for given time-series length N and exponent mu FIXME: add reference (paper & link) The exponents are defined as S_y(f) = h_a f^alpha (power spectrum of y) S_x(f) = g_b f^b (power spectrum of x) bias = const * tau^mu and (b, alpha, mu) relate to eachother by: b alpha mu 0 +2 -2 -1 +1 -2 resolve between -2 cases with R(n) -2 0 -1 -3 -1 0 -4 -2 +1 -5 -3 +2 -6 -4 +3 for HDEV, by applying B1 to frequency data, and add +2 to resulting mu
def b1_theory(N, mu): """ Expected B1 ratio for given time-series length N and exponent mu FIXME: add reference (paper & link) The exponents are defined as S_y(f) = h_a f^alpha (power spectrum of y) S_x(f) = g_b f^b (power spectrum of x) bias = const * tau^mu and (b, alpha, mu) relate to eachother by: b alpha mu 0 +2 -2 -1 +1 -2 resolve between -2 cases with R(n) -2 0 -1 -3 -1 0 -4 -2 +1 -5 -3 +2 -6 -4 +3 for HDEV, by applying B1 to frequency data, and add +2 to resulting mu """ # see Table 3 of Howe 2000 if mu == 2: return float(N)*(float(N)+1.0)/6.0 #up = N*(1.0-pow(N, mu)) #down = 2*(N-1.0)*(1-pow(2.0, mu)) #return up/down elif mu == 1: return float(N)/2.0 elif mu == 0: return N*np.log(N)/(2.0*(N-1.0)*np.log(2)) elif mu == -1: return 1 elif mu == -2: return (pow(N,2)-1.0)/(1.5*N*(N-1.0)) else: up = N*(1.0-pow(N, mu)) down = 2*(N-1.0)*(1-pow(2.0, mu)) return up/down assert False
B1 ratio boundary for selecting between [b_hi-1, b_hi] alpha = b + 2
def b1_boundary(b_hi, N): """ B1 ratio boundary for selecting between [b_hi-1, b_hi] alpha = b + 2 """ b_lo = b_hi-1 b1_lo = b1_theory(N, b_to_mu(b_lo)) b1_hi = b1_theory(N, b_to_mu(b_hi)) if b1_lo >= -4: return np.sqrt(b1_lo*b1_hi) # geometric mean else: return 0.5*(b1_lo+b1_hi)
Lag-1 autocorrelation function as defined in Riley 2004, Eqn (2) used by autocorr_noise_id() Parameters ---------- x: numpy.array time-series Returns ------- ACF: float Lag-1 autocorrelation for input time-series x Notes ----- * a faster algorithm based on FFT might be better!? * numpy.corrcoeff() gives similar but not identical results. #c = np.corrcoef( np.array(x[:-lag]), np.array(x[lag:]) ) #r1 = c[0,1] # lag-1 autocorrelation of x
def lag1_acf(x, detrend_deg=1): """ Lag-1 autocorrelation function as defined in Riley 2004, Eqn (2) used by autocorr_noise_id() Parameters ---------- x: numpy.array time-series Returns ------- ACF: float Lag-1 autocorrelation for input time-series x Notes ----- * a faster algorithm based on FFT might be better!? * numpy.corrcoeff() gives similar but not identical results. #c = np.corrcoef( np.array(x[:-lag]), np.array(x[lag:]) ) #r1 = c[0,1] # lag-1 autocorrelation of x """ mu = np.mean(x) a=0 b=0 for n in range(len(x)-1): a = a + (x[n]-mu)*(x[n+1]-mu) for n in range(len(x)): b=b+pow( x[n]-mu , 2 ) return a/b
Lag-1 autocorrelation based noise identification Parameters ---------- x: numpy.array phase or fractional frequency time-series data minimum recommended length is len(x)>30 roughly. af: int averaging factor data_type: string {'phase', 'freq'} "phase" for phase data in seconds "freq" for fractional frequency data dmin: int minimum required number of differentiations in the algorithm dmax: int maximum number of differentiations defaults to 2 for ADEV set to 3 for HDEV Returns ------- alpha_int: int noise-slope as integer alpha: float noise-slope as float d: int number of differentiations of the time-series performed Notes ----- http://www.stable32.com/Auto.pdf http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.503.9864&rep=rep1&type=pdf Power law noise identification using the lag 1 autocorrelation Riley,W.J. et al. 18th European Frequency and Time Forum (EFTF 2004) https://ieeexplore.ieee.org/document/5075021
def autocorr_noise_id(x, af, data_type="phase", dmin=0, dmax=2): """ Lag-1 autocorrelation based noise identification Parameters ---------- x: numpy.array phase or fractional frequency time-series data minimum recommended length is len(x)>30 roughly. af: int averaging factor data_type: string {'phase', 'freq'} "phase" for phase data in seconds "freq" for fractional frequency data dmin: int minimum required number of differentiations in the algorithm dmax: int maximum number of differentiations defaults to 2 for ADEV set to 3 for HDEV Returns ------- alpha_int: int noise-slope as integer alpha: float noise-slope as float d: int number of differentiations of the time-series performed Notes ----- http://www.stable32.com/Auto.pdf http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.503.9864&rep=rep1&type=pdf Power law noise identification using the lag 1 autocorrelation Riley,W.J. et al. 18th European Frequency and Time Forum (EFTF 2004) https://ieeexplore.ieee.org/document/5075021 """ d = 0 # number of differentiations lag = 1 if data_type is "phase": if af>1: #x = scipy.signal.decimate(x, af, n=1, ftype='fir') x = x[0:len(x):af] # decimate by averaging factor x = detrend(x, deg=2) # remove quadratic trend (frequency offset and drift) elif data_type is "freq": # average by averaging factor y_cut = np.array(x[:len(x)-(len(x)%af)]) # cut to length assert len(y_cut)%af == 0 y_shaped = y_cut.reshape((int(len(y_cut)/af), af)) x = np.average(y_shaped, axis=1) # average x = detrend(x, deg=1) # remove frequency drift # require minimum length for time-series if len(x)<30: print("autocorr_noise_id() Don't know how to do noise-ID for time-series length= %d"%len(x)) raise NotImplementedError while True: r1 = lag1_acf(x) rho = r1/(1.0+r1) if d >= dmin and ( rho < 0.25 or d >= dmax ): p = -2*(rho+d) #print r1 #assert r1 < 0 #assert r1 > -1.0/2.0 phase_add2 = 0 if data_type is "phase": phase_add2 = 2 alpha = p+phase_add2 alpha_int = int( -1.0*np.round(2*rho) - 2.0*d )+phase_add2 #print "d=",d,"alpha=",p+2 return alpha_int, alpha, d, rho else: x = np.diff(x) d = d + 1 assert False
remove polynomial from data. used by autocorr_noise_id() Parameters ---------- x: numpy.array time-series deg: int degree of polynomial to remove from x Returns ------- x_detrended: numpy.array detrended time-series
def detrend(x, deg=1): """ remove polynomial from data. used by autocorr_noise_id() Parameters ---------- x: numpy.array time-series deg: int degree of polynomial to remove from x Returns ------- x_detrended: numpy.array detrended time-series """ t=range(len(x)) p = np.polyfit(t, x, deg) residual = x - np.polyval(p, t) return residual
Eqn (13) from Greenhall2004
def edf_greenhall_simple(alpha, d, m, S, F, N): """ Eqn (13) from Greenhall2004 """ L = m/F+m*d # length of filter applied to phase samples M = 1 + np.floor(S*(N-L) / m) J = min(M, (d+1)*S) inv_edf = (1.0/(pow(greenhall_sz(0, F, alpha, d), 2)*M))* \ greenhall_BasicSum(J, M, S, F, alpha, d) return 1.0/inv_edf
returns Equivalent degrees of freedom Parameters ---------- alpha: int noise type, +2...-4 d: int 1 first-difference variance 2 Allan variance 3 Hadamard variance require alpha+2*d>1 m: int averaging factor tau = m*tau0 = m*(1/rate) N: int number of phase observations (length of time-series) overlapping: bool True for oadev, ohdev modified: bool True for mdev, tdev Returns ------- edf: float Equivalent degrees of freedom Greenhall, Riley, 2004 https://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/20050061319.pdf UNCERTAINTY OF STABILITY VARIANCES BASED ON FINITE DIFFERENCES Notes ----- Used for the following deviations (see http://www.wriley.com/CI2.pdf page 8) adev() oadev() mdev() tdev() hdev() ohdev()
def edf_greenhall(alpha, d, m, N, overlapping=False, modified=False, verbose=False): """ returns Equivalent degrees of freedom Parameters ---------- alpha: int noise type, +2...-4 d: int 1 first-difference variance 2 Allan variance 3 Hadamard variance require alpha+2*d>1 m: int averaging factor tau = m*tau0 = m*(1/rate) N: int number of phase observations (length of time-series) overlapping: bool True for oadev, ohdev modified: bool True for mdev, tdev Returns ------- edf: float Equivalent degrees of freedom Greenhall, Riley, 2004 https://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/20050061319.pdf UNCERTAINTY OF STABILITY VARIANCES BASED ON FINITE DIFFERENCES Notes ----- Used for the following deviations (see http://www.wriley.com/CI2.pdf page 8) adev() oadev() mdev() tdev() hdev() ohdev() """ if modified: F = 1 # F filter factor, 1 modified variance, m unmodified variance else: F = int(m) if overlapping: S = int(m) # S stride factor, 1 nonoverlapped estimator, m overlapped estimator (estimator stride = tau/S ) else: S = 1 assert(alpha+2*d > 1.0) L = m/F+m*d # length of filter applied to phase samples M = 1 + np.floor(S*(N-L) / m) J = min(M, (d+1)*S) J_max = 100 r = M/S if int(F) == 1 and modified: # case 1, modified variances, all alpha if J <= J_max: inv_edf = (1.0/(pow(greenhall_sz(0, 1, alpha, d), 2)*M))* \ greenhall_BasicSum(J, M, S, 1, alpha, d) if verbose: print("case 1.1 edf= %3f" % float(1.0/inv_edf)) return 1.0/inv_edf elif r > d+1: (a0, a1) = greenhall_table1(alpha, d) inv_edf = (1.0/r)*(a0-a1/r) if verbose: print("case 1.2 edf= %3f" % float(1.0/inv_edf)) return 1.0/inv_edf else: m_prime = J_max/r inv_edf = (1.0/(pow(greenhall_sz(0, F, alpha, d), 2)*J_max))* \ greenhall_BasicSum(J_max, J_max, m_prime, 1, alpha, d) if verbose: print("case 1.3 edf= %3f" % float(1.0/inv_edf)) return 1.0/inv_edf elif int(F) == int(m) and int(alpha) <= 0 and not modified: # case 2, unmodified variances, alpha <= 0 if J <= J_max: if m*(d+1) <= J_max: m_prime = m variant = "a" else: m_prime = float('inf') variant = "b" inv_edf = (1.0/(pow(greenhall_sz(0, m_prime, alpha, d), 2)*M))* \ greenhall_BasicSum(J, M, S, m_prime, alpha, d) if verbose: print("case 2.1%s edf= %3f" % (variant, float(1.0/inv_edf))) return 1.0/inv_edf elif r > d+1: (a0, a1) = greenhall_table2(alpha, d) inv_edf = (1.0/r)*(a0-a1/r) if verbose: print("case 2.2 edf= %3f" % float(1.0/inv_edf)) return 1.0/inv_edf else: m_prime = J_max/r inv_edf = (1.0/(pow(greenhall_sz(0, float('inf'), alpha, d), 2)*J_max))* \ greenhall_BasicSum(J_max, J_max, m_prime, float('inf'), alpha, d) if verbose: print("case 2.3 edf= %3f" % float(1.0/inv_edf)) return 1.0/inv_edf elif int(F) == int(m) and int(alpha) == 1 and not modified: # case 3, unmodified variances, alpha=1 if J <= J_max: inv_edf = (1.0/(pow(greenhall_sz(0, m, 1, d), 2)*M))* \ greenhall_BasicSum(J, M, S, m, 1, d) # note: m<1e6 to avoid roundoff if verbose: print("case 3.1 edf= %3f" % float(1.0/inv_edf)) return 1.0/inv_edf elif r > d+1: (a0, a1) = greenhall_table2(alpha, d) (b0, b1) = greenhall_table3(alpha, d) inv_edf = (1.0/(pow(b0+b1*np.log(m), 2)*r))*(a0-a1/r) if verbose: print("case 3.2 edf= %3f" % float(1.0/inv_edf)) return 1.0/inv_edf else: m_prime = J_max/r (b0, b1) = greenhall_table3(alpha, d) inv_edf = (1.0/(pow(b0+b1*np.log(m), 2)*J_max))* \ greenhall_BasicSum(J_max, J_max, m_prime, m_prime, 1, d) if verbose: print("case 3.3 edf= %3f" % float(1.0/inv_edf)) return 1.0/inv_edf elif int(F) == int(m) and int(alpha) == 2 and not modified: # case 4, unmodified variances, alpha=2 K = np.ceil(r) if K <= d: raise NotImplementedError # FIXME: add formula from the paper here! else: a0 = scipy.special.binom(4*d, 2*d) / pow(scipy.special.binom(2*d, d), 2) a1 = d/2.0 inv_edf = (1.0/M)*(a0-a1/r) if verbose: print("case 4.2 edf= %3f" % float(1.0/inv_edf)) return 1.0/inv_edf print("greenhall_edf() no matching case!") raise NotImplementedError
Eqn (10) from Greenhall2004
def greenhall_BasicSum(J, M, S, F, alpha, d): """ Eqn (10) from Greenhall2004 """ first = pow(greenhall_sz(0, F, alpha, d), 2) second = (1-float(J)/float(M))*pow(greenhall_sz(float(J)/float(S), F, alpha, d), 2) third = 0 for j in range(1, int(J)): third += 2*(1.0-float(j)/float(M))*pow(greenhall_sz(float(j)/float(S), F, alpha, d), 2) return first+second+third
Eqn (9) from Greenhall2004
def greenhall_sz(t, F, alpha, d): """ Eqn (9) from Greenhall2004 """ if d == 1: a = 2*greenhall_sx(t, F, alpha) b = greenhall_sx(t-1.0, F, alpha) c = greenhall_sx(t+1.0, F, alpha) return a-b-c elif d == 2: a = 6*greenhall_sx(t, F, alpha) b = 4*greenhall_sx(t-1.0, F, alpha) c = 4*greenhall_sx(t+1.0, F, alpha) dd = greenhall_sx(t-2.0, F, alpha) e = greenhall_sx(t+2.0, F, alpha) return a-b-c+dd+e elif d == 3: a = 20.0*greenhall_sx(t, F, alpha) b = 15.0*greenhall_sx(t-1.0, F, alpha) c = 15.0*greenhall_sx(t+1.0, F, alpha) dd = 6.0*greenhall_sx(t-2.0, F, alpha) e = 6.0*greenhall_sx(t+2.0, F, alpha) f = greenhall_sx(t-3.0, F, alpha) g = greenhall_sx(t+3.0, F, alpha) return a-b-c+dd+e-f-g assert(0)
Eqn (8) from Greenhall2004
def greenhall_sx(t, F, alpha): """ Eqn (8) from Greenhall2004 """ if F == float('inf'): return greenhall_sw(t, alpha+2) a = 2*greenhall_sw(t, alpha) b = greenhall_sw(t-1.0/float(F), alpha) c = greenhall_sw(t+1.0/float(F), alpha) return pow(F, 2)*(a-b-c)
Eqn (7) from Greenhall2004
def greenhall_sw(t, alpha): """ Eqn (7) from Greenhall2004 """ alpha = int(alpha) if alpha == 2: return -np.abs(t) elif alpha == 1: if t == 0: return 0 else: return pow(t, 2)*np.log(np.abs(t)) elif alpha == 0: return np.abs(pow(t, 3)) elif alpha == -1: if t == 0: return 0 else: return pow(t, 4)*np.log(np.abs(t)) elif alpha == -2: return np.abs(pow(t, 5)) elif alpha == -3: if t == 0: return 0 else: return pow(t, 6)*np.log(np.abs(t)) elif alpha == -4: return np.abs(pow(t, 7)) assert(0)
Table 2 from Greenhall 2004
def greenhall_table2(alpha, d): """ Table 2 from Greenhall 2004 """ row_idx = int(-alpha+2) # map 2-> row0 and -4-> row6 assert(row_idx in [0, 1, 2, 3, 4, 5]) col_idx = int(d-1) table2 = [[(3.0/2.0, 1.0/2.0), (35.0/18.0, 1.0), (231.0/100.0, 3.0/2.0)], # alpha=+2 [(78.6, 25.2), (790.0, 410.0), (9950.0, 6520.0)], [(2.0/3.0, 1.0/6.0), (2.0/3.0, 1.0/3.0), (7.0/9.0, 1.0/2.0)], # alpha=0 [(-1, -1), (0.852, 0.375), (0.997, 0.617)], # -1 [(-1, -1), (1.079, 0.368), (1.033, 0.607)], #-2 [(-1, -1), (-1, -1), (1.053, 0.553)], #-3 [(-1, -1), (-1, -1), (1.302, 0.535)], # alpha=-4 ] #print("table2 = ", table2[row_idx][col_idx]) return table2[row_idx][col_idx]
Table 1 from Greenhall 2004
def greenhall_table1(alpha, d): """ Table 1 from Greenhall 2004 """ row_idx = int(-alpha+2) # map 2-> row0 and -4-> row6 col_idx = int(d-1) table1 = [[(2.0/3.0, 1.0/3.0), (7.0/9.0, 1.0/2.0), (22.0/25.0, 2.0/3.0)], # alpha=+2 [(0.840, 0.345), (0.997, 0.616), (1.141, 0.843)], [(1.079, 0.368), (1.033, 0.607), (1.184, 0.848)], [(-1, -1), (1.048, 0.534), (1.180, 0.816)], # -1 [(-1, -1), (1.302, 0.535), (1.175, 0.777)], #-2 [(-1, -1), (-1, -1), (1.194, 0.703)], #-3 [(-1, -1), (-1, -1), (1.489, 0.702)], # alpha=-4 ] #print("table1 = ", table1[row_idx][col_idx]) return table1[row_idx][col_idx]
Equivalent degrees of freedom for Total Deviation FIXME: what is the right behavior for alpha outside 0,-1,-2? NIST SP1065 page 41, Table 7
def edf_totdev(N, m, alpha): """ Equivalent degrees of freedom for Total Deviation FIXME: what is the right behavior for alpha outside 0,-1,-2? NIST SP1065 page 41, Table 7 """ alpha = int(alpha) if alpha in [0, -1, -2]: # alpha 0 WFM # alpha -1 FFM # alpha -2 RWFM NIST_SP1065_table7 = [(1.50, 0.0), (1.17, 0.22), (0.93, 0.36)] (b, c) = NIST_SP1065_table7[int(abs(alpha))] return b*(float(N)/float(m))-c else: return edf_simple(N, m, alpha)
Equivalent degrees of freedom for Modified Total Deviation NIST SP1065 page 41, Table 8
def edf_mtotdev(N, m, alpha): """ Equivalent degrees of freedom for Modified Total Deviation NIST SP1065 page 41, Table 8 """ assert(alpha in [2, 1, 0, -1, -2]) NIST_SP1065_table8 = [(1.90, 2.1), (1.20, 1.40), (1.10, 1.2), (0.85, 0.50), (0.75, 0.31)] #(b, c) = NIST_SP1065_table8[ abs(alpha-2) ] (b, c) = NIST_SP1065_table8[abs(alpha-2)] edf = b*(float(N)/float(m))-c print("mtotdev b,c= ", (b, c), " edf=", edf) return edf
Equivalent degrees of freedom. Simple approximate formulae. Parameters ---------- N : int the number of phase samples m : int averaging factor, tau = m * tau0 alpha: int exponent of f for the frequency PSD: 'wp' returns white phase noise. alpha=+2 'wf' returns white frequency noise. alpha= 0 'fp' returns flicker phase noise. alpha=+1 'ff' returns flicker frequency noise. alpha=-1 'rf' returns random walk frequency noise. alpha=-2 If the input is not recognized, it defaults to idealized, uncorrelated noise with (N-1) degrees of freedom. Notes ----- S. Stein, Frequency and Time - Their Measurement and Characterization. Precision Frequency Control Vol 2, 1985, pp 191-416. http://tf.boulder.nist.gov/general/pdf/666.pdf Returns ------- edf : float Equivalent degrees of freedom
def edf_simple(N, m, alpha): """Equivalent degrees of freedom. Simple approximate formulae. Parameters ---------- N : int the number of phase samples m : int averaging factor, tau = m * tau0 alpha: int exponent of f for the frequency PSD: 'wp' returns white phase noise. alpha=+2 'wf' returns white frequency noise. alpha= 0 'fp' returns flicker phase noise. alpha=+1 'ff' returns flicker frequency noise. alpha=-1 'rf' returns random walk frequency noise. alpha=-2 If the input is not recognized, it defaults to idealized, uncorrelated noise with (N-1) degrees of freedom. Notes ----- S. Stein, Frequency and Time - Their Measurement and Characterization. Precision Frequency Control Vol 2, 1985, pp 191-416. http://tf.boulder.nist.gov/general/pdf/666.pdf Returns ------- edf : float Equivalent degrees of freedom """ N = float(N) m = float(m) if alpha in [2, 1, 0, -1, -2]: # NIST SP 1065, Table 5 if alpha == +2: edf = (N + 1) * (N - 2*m) / (2 * (N - m)) if alpha == 0: edf = (((3 * (N - 1) / (2 * m)) - (2 * (N - 2) / N)) * ((4*pow(m, 2)) / ((4*pow(m, 2)) + 5))) if alpha == 1: a = (N - 1)/(2 * m) b = (2 * m + 1) * (N - 1) / 4 edf = np.exp(np.sqrt(np.log(a) * np.log(b))) if alpha == -1: if m == 1: edf = 2 * (N - 2) /(2.3 * N - 4.9) if m >= 2: edf = 5 * N**2 / (4 * m * (N + (3 * m))) if alpha == -2: a = (N - 2) / (m * (N - 3)**2) b = (N - 1)**2 c = 3 * m * (N - 1) d = 4 * m **2 edf = a * (b - c + d) else: edf = (N - 1) print("Noise type not recognized. Defaulting to N - 1 degrees of freedom.") return edf
Compute the GRADEV of a white phase noise. Compares two different scenarios. 1) The original data and 2) ADEV estimate with gap robust ADEV.
def example1(): """ Compute the GRADEV of a white phase noise. Compares two different scenarios. 1) The original data and 2) ADEV estimate with gap robust ADEV. """ N = 1000 f = 1 y = np.random.randn(1,N)[0,:] x = [xx for xx in np.linspace(1,len(y),len(y))] x_ax, y_ax, (err_l, err_h), ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.errorbar(x_ax, y_ax,yerr=[err_l,err_h],label='GRADEV, no gaps') y[int(np.floor(0.4*N)):int(np.floor(0.6*N))] = np.NaN # Simulate missing data x_ax, y_ax, (err_l, err_h) , ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.errorbar(x_ax, y_ax,yerr=[err_l,err_h], label='GRADEV, with gaps') plt.xscale('log') plt.yscale('log') plt.grid() plt.legend() plt.xlabel('Tau / s') plt.ylabel('Overlapping Allan deviation') plt.show()
Compute the GRADEV of a nonstationary white phase noise.
def example2(): """ Compute the GRADEV of a nonstationary white phase noise. """ N=1000 # number of samples f = 1 # data samples per second s=1+5/N*np.arange(0,N) y=s*np.random.randn(1,N)[0,:] x = [xx for xx in np.linspace(1,len(y),len(y))] x_ax, y_ax, (err_l, err_h) , ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.loglog(x_ax, y_ax,'b.',label="No gaps") y[int(0.4*N):int(0.6*N,)] = np.NaN # Simulate missing data x_ax, y_ax, (err_l, err_h), ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.loglog(x_ax, y_ax,'g.',label="With gaps") plt.grid() plt.legend() plt.xlabel('Tau / s') plt.ylabel('Overlapping Allan deviation') plt.show()
Time deviation. Based on modified Allan variance. .. math:: \\sigma^2_{TDEV}( \\tau ) = { \\tau^2 \\over 3 } \\sigma^2_{MDEV}( \\tau ) Note that TDEV has a unit of seconds. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus, tdev, tdev_error, ns): tuple Tuple of values taus: np.array Tau values for which td computed tdev: np.array Computed time deviations (in seconds) for each tau value tdev_errors: np.array Time deviation errors ns: np.array Values of N used in mdev_phase() Notes ----- http://en.wikipedia.org/wiki/Time_deviation
def tdev(data, rate=1.0, data_type="phase", taus=None): """ Time deviation. Based on modified Allan variance. .. math:: \\sigma^2_{TDEV}( \\tau ) = { \\tau^2 \\over 3 } \\sigma^2_{MDEV}( \\tau ) Note that TDEV has a unit of seconds. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus, tdev, tdev_error, ns): tuple Tuple of values taus: np.array Tau values for which td computed tdev: np.array Computed time deviations (in seconds) for each tau value tdev_errors: np.array Time deviation errors ns: np.array Values of N used in mdev_phase() Notes ----- http://en.wikipedia.org/wiki/Time_deviation """ phase = input_to_phase(data, rate, data_type) (taus, md, mde, ns) = mdev(phase, rate=rate, taus=taus) td = taus * md / np.sqrt(3.0) tde = td / np.sqrt(ns) return taus, td, tde, ns
Modified Allan deviation. Used to distinguish between White and Flicker Phase Modulation. .. math:: \\sigma^2_{MDEV}(m\\tau_0) = { 1 \\over 2 (m \\tau_0 )^2 (N-3m+1) } \\sum_{j=1}^{N-3m+1} \\lbrace \\sum_{i=j}^{j+m-1} {x}_{i+2m} - 2x_{i+m} + x_{i} \\rbrace^2 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, md, mde, ns): tuple Tuple of values taus2: np.array Tau values for which td computed md: np.array Computed mdev for each tau value mde: np.array mdev errors ns: np.array Values of N used in each mdev calculation Notes ----- see http://www.leapsecond.com/tools/adev_lib.c NIST SP 1065 eqn (14) and (15), page 17
def mdev(data, rate=1.0, data_type="phase", taus=None): """ Modified Allan deviation. Used to distinguish between White and Flicker Phase Modulation. .. math:: \\sigma^2_{MDEV}(m\\tau_0) = { 1 \\over 2 (m \\tau_0 )^2 (N-3m+1) } \\sum_{j=1}^{N-3m+1} \\lbrace \\sum_{i=j}^{j+m-1} {x}_{i+2m} - 2x_{i+m} + x_{i} \\rbrace^2 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, md, mde, ns): tuple Tuple of values taus2: np.array Tau values for which td computed md: np.array Computed mdev for each tau value mde: np.array mdev errors ns: np.array Values of N used in each mdev calculation Notes ----- see http://www.leapsecond.com/tools/adev_lib.c NIST SP 1065 eqn (14) and (15), page 17 """ phase = input_to_phase(data, rate, data_type) (phase, ms, taus_used) = tau_generator(phase, rate, taus=taus) data, taus = np.array(phase), np.array(taus) md = np.zeros_like(ms) mderr = np.zeros_like(ms) ns = np.zeros_like(ms) # this is a 'loop-unrolled' algorithm following # http://www.leapsecond.com/tools/adev_lib.c for idx, m in enumerate(ms): m = int(m) # without this we get: VisibleDeprecationWarning: # using a non-integer number instead of an integer # will result in an error in the future tau = taus_used[idx] # First loop sum d0 = phase[0:m] d1 = phase[m:2*m] d2 = phase[2*m:3*m] e = min(len(d0), len(d1), len(d2)) v = np.sum(d2[:e] - 2* d1[:e] + d0[:e]) s = v * v # Second part of sum d3 = phase[3*m:] d2 = phase[2*m:] d1 = phase[1*m:] d0 = phase[0:] e = min(len(d0), len(d1), len(d2), len(d3)) n = e + 1 v_arr = v + np.cumsum(d3[:e] - 3 * d2[:e] + 3 * d1[:e] - d0[:e]) s = s + np.sum(v_arr * v_arr) s /= 2.0 * m * m * tau * tau * n s = np.sqrt(s) md[idx] = s mderr[idx] = (s / np.sqrt(n)) ns[idx] = n return remove_small_ns(taus_used, md, mderr, ns)
Allan deviation. Classic - use only if required - relatively poor confidence. .. math:: \\sigma^2_{ADEV}(\\tau) = { 1 \\over 2 \\tau^2 } \\langle ( {x}_{n+2} - 2x_{n+1} + x_{n} )^2 \\rangle = { 1 \\over 2 (N-2) \\tau^2 } \\sum_{n=1}^{N-2} ( {x}_{n+2} - 2x_{n+1} + x_{n} )^2 where :math:`x_n` is the time-series of phase observations, spaced by the measurement interval :math:`\\tau`, and with length :math:`N`. Or alternatively calculated from a time-series of fractional frequency: .. math:: \\sigma^{2}_{ADEV}(\\tau) = { 1 \\over 2 } \\langle ( \\bar{y}_{n+1} - \\bar{y}_n )^2 \\rangle where :math:`\\bar{y}_n` is the time-series of fractional frequency at averaging time :math:`\\tau` NIST SP 1065 eqn (6) and (7), pages 14 and 15 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, ad, ade, ns): tuple Tuple of values taus2: np.array Tau values for which td computed ad: np.array Computed adev for each tau value ade: np.array adev errors ns: np.array Values of N used in each adev calculation
def adev(data, rate=1.0, data_type="phase", taus=None): """ Allan deviation. Classic - use only if required - relatively poor confidence. .. math:: \\sigma^2_{ADEV}(\\tau) = { 1 \\over 2 \\tau^2 } \\langle ( {x}_{n+2} - 2x_{n+1} + x_{n} )^2 \\rangle = { 1 \\over 2 (N-2) \\tau^2 } \\sum_{n=1}^{N-2} ( {x}_{n+2} - 2x_{n+1} + x_{n} )^2 where :math:`x_n` is the time-series of phase observations, spaced by the measurement interval :math:`\\tau`, and with length :math:`N`. Or alternatively calculated from a time-series of fractional frequency: .. math:: \\sigma^{2}_{ADEV}(\\tau) = { 1 \\over 2 } \\langle ( \\bar{y}_{n+1} - \\bar{y}_n )^2 \\rangle where :math:`\\bar{y}_n` is the time-series of fractional frequency at averaging time :math:`\\tau` NIST SP 1065 eqn (6) and (7), pages 14 and 15 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, ad, ade, ns): tuple Tuple of values taus2: np.array Tau values for which td computed ad: np.array Computed adev for each tau value ade: np.array adev errors ns: np.array Values of N used in each adev calculation """ phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) ad = np.zeros_like(taus_used) ade = np.zeros_like(taus_used) adn = np.zeros_like(taus_used) for idx, mj in enumerate(m): # loop through each tau value m(j) (ad[idx], ade[idx], adn[idx]) = calc_adev_phase(phase, rate, mj, mj) return remove_small_ns(taus_used, ad, ade, adn)
Main algorithm for adev() (stride=mj) and oadev() (stride=1) see http://www.leapsecond.com/tools/adev_lib.c stride = mj for nonoverlapping allan deviation Parameters ---------- phase: np.array Phase data in seconds. rate: float The sampling rate for phase or frequency, in Hz mj: int M index value for stride stride: int Size of stride Returns ------- (dev, deverr, n): tuple Array of computed values. Notes ----- stride = mj for nonoverlapping Allan deviation stride = 1 for overlapping Allan deviation References ---------- * http://en.wikipedia.org/wiki/Allan_variance * http://www.leapsecond.com/tools/adev_lib.c NIST SP 1065, eqn (7) and (11) page 16
def calc_adev_phase(phase, rate, mj, stride): """ Main algorithm for adev() (stride=mj) and oadev() (stride=1) see http://www.leapsecond.com/tools/adev_lib.c stride = mj for nonoverlapping allan deviation Parameters ---------- phase: np.array Phase data in seconds. rate: float The sampling rate for phase or frequency, in Hz mj: int M index value for stride stride: int Size of stride Returns ------- (dev, deverr, n): tuple Array of computed values. Notes ----- stride = mj for nonoverlapping Allan deviation stride = 1 for overlapping Allan deviation References ---------- * http://en.wikipedia.org/wiki/Allan_variance * http://www.leapsecond.com/tools/adev_lib.c NIST SP 1065, eqn (7) and (11) page 16 """ mj = int(mj) stride = int(stride) d2 = phase[2 * mj::stride] d1 = phase[1 * mj::stride] d0 = phase[::stride] n = min(len(d0), len(d1), len(d2)) if n == 0: RuntimeWarning("Data array length is too small: %i" % len(phase)) n = 1 v_arr = d2[:n] - 2 * d1[:n] + d0[:n] s = np.sum(v_arr * v_arr) dev = np.sqrt(s / (2.0 * n)) / mj * rate deverr = dev / np.sqrt(n) return dev, deverr, n
Overlapping Hadamard deviation. Better confidence than normal Hadamard. .. math:: \\sigma^2_{OHDEV}(m\\tau_0) = { 1 \\over 6 (m \\tau_0 )^2 (N-3m) } \\sum_{i=1}^{N-3m} ( {x}_{i+3m} - 3x_{i+2m} + 3x_{i+m} - x_{i} )^2 where :math:`x_i` is the time-series of phase observations, spaced by the measurement interval :math:`\\tau_0`, and with length :math:`N`. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, hd, hde, ns): tuple Tuple of values taus2: np.array Tau values for which td computed hd: np.array Computed hdev for each tau value hde: np.array hdev errors ns: np.array Values of N used in each hdev calculation
def ohdev(data, rate=1.0, data_type="phase", taus=None): """ Overlapping Hadamard deviation. Better confidence than normal Hadamard. .. math:: \\sigma^2_{OHDEV}(m\\tau_0) = { 1 \\over 6 (m \\tau_0 )^2 (N-3m) } \\sum_{i=1}^{N-3m} ( {x}_{i+3m} - 3x_{i+2m} + 3x_{i+m} - x_{i} )^2 where :math:`x_i` is the time-series of phase observations, spaced by the measurement interval :math:`\\tau_0`, and with length :math:`N`. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, hd, hde, ns): tuple Tuple of values taus2: np.array Tau values for which td computed hd: np.array Computed hdev for each tau value hde: np.array hdev errors ns: np.array Values of N used in each hdev calculation """ phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) hdevs = np.zeros_like(taus_used) hdeverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(m): (hdevs[idx], hdeverrs[idx], ns[idx]) = calc_hdev_phase(phase, rate, mj, 1) return remove_small_ns(taus_used, hdevs, hdeverrs, ns)
main calculation fungtion for HDEV and OHDEV Parameters ---------- phase: np.array Phase data in seconds. rate: float The sampling rate for phase or frequency, in Hz mj: int M index value for stride stride: int Size of stride Returns ------- (dev, deverr, n): tuple Array of computed values. Notes ----- http://www.leapsecond.com/tools/adev_lib.c 1 N-3 s2y(t) = --------------- sum [x(i+3) - 3x(i+2) + 3x(i+1) - x(i) ]^2 6*tau^2 (N-3m) i=1 N=M+1 phase measurements m is averaging factor NIST SP 1065 eqn (18) and (20) pages 20 and 21
def calc_hdev_phase(phase, rate, mj, stride): """ main calculation fungtion for HDEV and OHDEV Parameters ---------- phase: np.array Phase data in seconds. rate: float The sampling rate for phase or frequency, in Hz mj: int M index value for stride stride: int Size of stride Returns ------- (dev, deverr, n): tuple Array of computed values. Notes ----- http://www.leapsecond.com/tools/adev_lib.c 1 N-3 s2y(t) = --------------- sum [x(i+3) - 3x(i+2) + 3x(i+1) - x(i) ]^2 6*tau^2 (N-3m) i=1 N=M+1 phase measurements m is averaging factor NIST SP 1065 eqn (18) and (20) pages 20 and 21 """ tau0 = 1.0 / float(rate) mj = int(mj) stride = int(stride) d3 = phase[3 * mj::stride] d2 = phase[2 * mj::stride] d1 = phase[1 * mj::stride] d0 = phase[::stride] n = min(len(d0), len(d1), len(d2), len(d3)) v_arr = d3[:n] - 3 * d2[:n] + 3 * d1[:n] - d0[:n] s = np.sum(v_arr * v_arr) if n == 0: n = 1 h = np.sqrt(s / 6.0 / float(n)) / float(tau0 * mj) e = h / np.sqrt(n) return h, e, n
Total deviation. Better confidence at long averages for Allan. .. math:: \\sigma^2_{TOTDEV}( m\\tau_0 ) = { 1 \\over 2 (m\\tau_0)^2 (N-2) } \\sum_{i=2}^{N-1} ( {x}^*_{i-m} - 2x^*_{i} + x^*_{i+m} )^2 Where :math:`x^*_i` is a new time-series of length :math:`3N-4` derived from the original phase time-series :math:`x_n` of length :math:`N` by reflection at both ends. FIXME: better description of reflection operation. the original data x is in the center of x*: x*(1-j) = 2x(1) - x(1+j) for j=1..N-2 x*(i) = x(i) for i=1..N x*(N+j) = 2x(N) - x(N-j) for j=1..N-2 x* has length 3N-4 tau = m*tau0 FIXME: bias correction http://www.wriley.com/CI2.pdf page 5 Parameters ---------- phase: np.array Phase data in seconds. Provide either phase or frequency. frequency: np.array Fractional frequency data (nondimensional). Provide either frequency or phase. rate: float The sampling rate for phase or frequency, in Hz taus: np.array Array of tau values for which to compute measurement References ---------- David A. Howe, *The total deviation approach to long-term characterization of frequency stability*, IEEE tr. UFFC vol 47 no 5 (2000) NIST SP 1065 eqn (25) page 23
def totdev(data, rate=1.0, data_type="phase", taus=None): """ Total deviation. Better confidence at long averages for Allan. .. math:: \\sigma^2_{TOTDEV}( m\\tau_0 ) = { 1 \\over 2 (m\\tau_0)^2 (N-2) } \\sum_{i=2}^{N-1} ( {x}^*_{i-m} - 2x^*_{i} + x^*_{i+m} )^2 Where :math:`x^*_i` is a new time-series of length :math:`3N-4` derived from the original phase time-series :math:`x_n` of length :math:`N` by reflection at both ends. FIXME: better description of reflection operation. the original data x is in the center of x*: x*(1-j) = 2x(1) - x(1+j) for j=1..N-2 x*(i) = x(i) for i=1..N x*(N+j) = 2x(N) - x(N-j) for j=1..N-2 x* has length 3N-4 tau = m*tau0 FIXME: bias correction http://www.wriley.com/CI2.pdf page 5 Parameters ---------- phase: np.array Phase data in seconds. Provide either phase or frequency. frequency: np.array Fractional frequency data (nondimensional). Provide either frequency or phase. rate: float The sampling rate for phase or frequency, in Hz taus: np.array Array of tau values for which to compute measurement References ---------- David A. Howe, *The total deviation approach to long-term characterization of frequency stability*, IEEE tr. UFFC vol 47 no 5 (2000) NIST SP 1065 eqn (25) page 23 """ phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) N = len(phase) # totdev requires a new dataset # Begin by adding reflected data before dataset x1 = 2.0 * phase[0] * np.ones((N - 2,)) x1 = x1 - phase[1:-1] x1 = x1[::-1] # Reflected data at end of dataset x2 = 2.0 * phase[-1] * np.ones((N - 2,)) x2 = x2 - phase[1:-1][::-1] # check length of new dataset assert len(x1)+len(phase)+len(x2) == 3*N - 4 # Combine into a single array x = np.zeros((3*N - 4)) x[0:N-2] = x1 x[N-2:2*(N-2)+2] = phase # original data in the middle x[2*(N-2)+2:] = x2 devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) mid = len(x1) for idx, mj in enumerate(m): mj = int(mj) d0 = x[mid + 1:] d1 = x[mid + mj + 1:] d1n = x[mid - mj + 1:] e = min(len(d0), len(d1), len(d1n)) v_arr = d1n[:e] - 2.0 * d0[:e] + d1[:e] dev = np.sum(v_arr[:mid] * v_arr[:mid]) dev /= float(2 * pow(mj / rate, 2) * (N - 2)) dev = np.sqrt(dev) devs[idx] = dev deverrs[idx] = dev / np.sqrt(mid) ns[idx] = mid return remove_small_ns(taus_used, devs, deverrs, ns)
Time Total Deviation modified total variance scaled by tau^2 / 3 NIST SP 1065 eqn (28) page 26 <--- formula should have tau squared !?!
def ttotdev(data, rate=1.0, data_type="phase", taus=None): """ Time Total Deviation modified total variance scaled by tau^2 / 3 NIST SP 1065 eqn (28) page 26 <--- formula should have tau squared !?! """ (taus, mtotdevs, mde, ns) = mtotdev(data, data_type=data_type, rate=rate, taus=taus) td = taus*mtotdevs / np.sqrt(3.0) tde = td / np.sqrt(ns) return taus, td, tde, ns
PRELIMINARY - REQUIRES FURTHER TESTING. Modified Total deviation. Better confidence at long averages for modified Allan FIXME: bias-correction http://www.wriley.com/CI2.pdf page 6 The variance is scaled up (divided by this number) based on the noise-type identified. WPM 0.94 FPM 0.83 WFM 0.73 FFM 0.70 RWFM 0.69 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. NIST SP 1065 eqn (27) page 25
def mtotdev(data, rate=1.0, data_type="phase", taus=None): """ PRELIMINARY - REQUIRES FURTHER TESTING. Modified Total deviation. Better confidence at long averages for modified Allan FIXME: bias-correction http://www.wriley.com/CI2.pdf page 6 The variance is scaled up (divided by this number) based on the noise-type identified. WPM 0.94 FPM 0.83 WFM 0.73 FFM 0.70 RWFM 0.69 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. NIST SP 1065 eqn (27) page 25 """ phase = input_to_phase(data, rate, data_type) (phase, ms, taus_used) = tau_generator(phase, rate, taus, maximum_m=float(len(phase))/3.0) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(ms): devs[idx], deverrs[idx], ns[idx] = calc_mtotdev_phase(phase, rate, mj) return remove_small_ns(taus_used, devs, deverrs, ns)
PRELIMINARY - REQUIRES FURTHER TESTING. Hadamard Total deviation. Better confidence at long averages for Hadamard deviation FIXME: bias corrections from http://www.wriley.com/CI2.pdf W FM 0.995 alpha= 0 F FM 0.851 alpha=-1 RW FM 0.771 alpha=-2 FW FM 0.717 alpha=-3 RR FM 0.679 alpha=-4 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation.
def htotdev(data, rate=1.0, data_type="phase", taus=None): """ PRELIMINARY - REQUIRES FURTHER TESTING. Hadamard Total deviation. Better confidence at long averages for Hadamard deviation FIXME: bias corrections from http://www.wriley.com/CI2.pdf W FM 0.995 alpha= 0 F FM 0.851 alpha=-1 RW FM 0.771 alpha=-2 FW FM 0.717 alpha=-3 RR FM 0.679 alpha=-4 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. """ if data_type == "phase": phase = data freq = phase2frequency(phase, rate) elif data_type == "freq": phase = frequency2phase(data, rate) freq = data else: raise Exception("unknown data_type: " + data_type) rate = float(rate) (freq, ms, taus_used) = tau_generator(freq, rate, taus, maximum_m=float(len(freq))/3.0) phase = np.array(phase) freq = np.array(freq) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) # NOTE at mj==1 we use ohdev(), based on comment from here: # http://www.wriley.com/paper4ht.htm # "For best consistency, the overlapping Hadamard variance is used # instead of the Hadamard total variance at m=1" # FIXME: this uses both freq and phase datasets, which uses double the memory really needed... for idx, mj in enumerate(ms): if int(mj) == 1: (devs[idx], deverrs[idx], ns[idx]) = calc_hdev_phase(phase, rate, mj, 1) else: (devs[idx], deverrs[idx], ns[idx]) = calc_htotdev_freq(freq, mj) return remove_small_ns(taus_used, devs, deverrs, ns)
PRELIMINARY - REQUIRES FURTHER TESTING. calculation of htotdev for one averaging factor m tau = m*tau0 Parameters ---------- frequency: np.array Fractional frequency data (nondimensional). m: int Averaging factor. tau = m*tau0, where tau0=1/rate.
def calc_htotdev_freq(freq, m): """ PRELIMINARY - REQUIRES FURTHER TESTING. calculation of htotdev for one averaging factor m tau = m*tau0 Parameters ---------- frequency: np.array Fractional frequency data (nondimensional). m: int Averaging factor. tau = m*tau0, where tau0=1/rate. """ N = int(len(freq)) # frequency data, N points m = int(m) n = 0 # number of terms in the sum, for error estimation dev = 0.0 # the deviation we are computing for i in range(0, N-3*int(m)+1): # subsequence of length 3m, from the original phase data xs = freq[i:i+3*m] assert len(xs) == 3*m # remove linear trend. by averaging first/last half, # computing slope, and subtracting half1_idx = int(np.floor(3*m/2.0)) half2_idx = int(np.ceil(3*m/2.0)) # m # 1 0:1 2:2 mean1 = np.mean(xs[:half1_idx]) mean2 = np.mean(xs[half2_idx:]) if int(3*m)%2 == 1: # m is odd # 3m = 2k+1 is odd, with the averages at both ends over k points # the distance between the averages is then k+1 = (3m-1)/2 +1 slope = (mean2-mean1) / ((0.5*(3*m-1)+1)) else: # m is even # 3m = 2k is even, so distance between averages is k=3m/2 slope = (mean2-mean1) / (0.5*3*m) # remove the linear trend x0 = [x - slope*(idx-np.floor(3*m/2)) for (idx, x) in enumerate(xs)] x0_flip = x0[::-1] # left-right flipped version of array # extended sequence of length 9m, by uninverted even reflection xstar = np.concatenate((x0_flip, x0, x0_flip)) assert len(xstar) == 9*m # now compute totdev on these 9m points # 6m unique groups of m-point averages, # all possible overlapping second differences # one term in the 6m sum: [ x_i - 2 x_i+m + x_i+2m ]^2 squaresum = 0.0 k = 0 for j in range(0, 6*int(m)): # summation of the 6m terms. xmean1 = np.mean(xstar[j+0*m : j+1*m]) xmean2 = np.mean(xstar[j+1*m : j+2*m]) xmean3 = np.mean(xstar[j+2*m : j+3*m]) squaresum += pow(xmean1-2.0*xmean2+xmean3, 2) k = k+1 assert k == 6*int(m) squaresum = (1.0/(6.0*k)) * squaresum dev += squaresum n = n+1 # scaling in front of double-sum assert n == N-3*int(m)+1 # sanity check on the number of terms n dev = dev* 1.0/ (N-3*m+1) dev = np.sqrt(dev) error = dev / np.sqrt(n) return (dev, error, n)
PRELIMINARY - REQUIRES FURTHER TESTING. Theo1 is a two-sample variance with improved confidence and extended averaging factor range. .. math:: \\sigma^2_{THEO1}(m\\tau_0) = { 1 \\over (m \\tau_0 )^2 (N-m) } \\sum_{i=1}^{N-m} \\sum_{\\delta=0}^{m/2-1} {1\\over m/2-\\delta}\\lbrace ({x}_{i} - x_{i-\\delta +m/2}) + (x_{i+m}- x_{i+\\delta +m/2}) \\rbrace^2 Where :math:`10<=m<=N-1` is even. FIXME: bias correction NIST SP 1065 eq (30) page 29 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation.
def theo1(data, rate=1.0, data_type="phase", taus=None): """ PRELIMINARY - REQUIRES FURTHER TESTING. Theo1 is a two-sample variance with improved confidence and extended averaging factor range. .. math:: \\sigma^2_{THEO1}(m\\tau_0) = { 1 \\over (m \\tau_0 )^2 (N-m) } \\sum_{i=1}^{N-m} \\sum_{\\delta=0}^{m/2-1} {1\\over m/2-\\delta}\\lbrace ({x}_{i} - x_{i-\\delta +m/2}) + (x_{i+m}- x_{i+\\delta +m/2}) \\rbrace^2 Where :math:`10<=m<=N-1` is even. FIXME: bias correction NIST SP 1065 eq (30) page 29 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. """ phase = input_to_phase(data, rate, data_type) tau0 = 1.0/rate (phase, ms, taus_used) = tau_generator(phase, rate, taus, even=True) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) N = len(phase) for idx, m in enumerate(ms): m = int(m) # to avoid: VisibleDeprecationWarning: using a # non-integer number instead of an integer will # result in an error in the future assert m % 2 == 0 # m must be even dev = 0 n = 0 for i in range(int(N-m)): s = 0 for d in range(int(m/2)): # inner sum pre = 1.0 / (float(m)/2 - float(d)) s += pre*pow(phase[i]-phase[i-d+int(m/2)] + phase[i+m]-phase[i+d+int(m/2)], 2) n = n+1 dev += s assert n == (N-m)*m/2 # N-m outer sums, m/2 inner sums dev = dev/(0.75*(N-m)*pow(m*tau0, 2)) # factor 0.75 used here? http://tf.nist.gov/general/pdf/1990.pdf # but not here? http://tf.nist.gov/timefreq/general/pdf/2220.pdf page 29 devs[idx] = np.sqrt(dev) deverrs[idx] = devs[idx] / np.sqrt(N-m) ns[idx] = n return remove_small_ns(taus_used, devs, deverrs, ns)
Time Interval Error RMS. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation.
def tierms(data, rate=1.0, data_type="phase", taus=None): """ Time Interval Error RMS. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. """ phase = input_to_phase(data, rate, data_type) (data, m, taus_used) = tau_generator(phase, rate, taus) count = len(phase) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(m): mj = int(mj) # This seems like an unusual way to phases = np.column_stack((phase[:-mj], phase[mj:])) p_max = np.max(phases, axis=1) p_min = np.min(phases, axis=1) phases = p_max - p_min tie = np.sqrt(np.mean(phases * phases)) ncount = count - mj devs[idx] = tie deverrs[idx] = 0 / np.sqrt(ncount) # TODO! I THINK THIS IS WRONG! ns[idx] = ncount return remove_small_ns(taus_used, devs, deverrs, ns)
Make an ndarray with a rolling window of the last dimension, from http://mail.scipy.org/pipermail/numpy-discussion/2011-January/054401.html Parameters ---------- a : array_like Array to add rolling window to window : int Size of rolling window Returns ------- Array that is a view of the original array with a added dimension of size window.
def mtie_rolling_window(a, window): """ Make an ndarray with a rolling window of the last dimension, from http://mail.scipy.org/pipermail/numpy-discussion/2011-January/054401.html Parameters ---------- a : array_like Array to add rolling window to window : int Size of rolling window Returns ------- Array that is a view of the original array with a added dimension of size window. """ if window < 1: raise ValueError("`window` must be at least 1.") if window > a.shape[-1]: raise ValueError("`window` is too long.") shape = a.shape[:-1] + (a.shape[-1] - window + 1, window) strides = a.strides + (a.strides[-1],) return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
Maximum Time Interval Error. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Notes ----- this seems to correspond to Stable32 setting "Fast(u)" Stable32 also has "Decade" and "Octave" modes where the dataset is extended somehow?
def mtie(data, rate=1.0, data_type="phase", taus=None): """ Maximum Time Interval Error. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Notes ----- this seems to correspond to Stable32 setting "Fast(u)" Stable32 also has "Decade" and "Octave" modes where the dataset is extended somehow? """ phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(m): rw = mtie_rolling_window(phase, int(mj + 1)) win_max = np.max(rw, axis=1) win_min = np.min(rw, axis=1) tie = win_max - win_min dev = np.max(tie) ncount = phase.shape[0] - mj devs[idx] = dev deverrs[idx] = dev / np.sqrt(ncount) ns[idx] = ncount return remove_small_ns(taus_used, devs, deverrs, ns)
fast binary decomposition algorithm for MTIE See: STEFANO BREGNI "Fast Algorithms for TVAR and MTIE Computation in Characterization of Network Synchronization Performance"
def mtie_phase_fast(phase, rate=1.0, data_type="phase", taus=None): """ fast binary decomposition algorithm for MTIE See: STEFANO BREGNI "Fast Algorithms for TVAR and MTIE Computation in Characterization of Network Synchronization Performance" """ rate = float(rate) phase = np.asarray(phase) k_max = int(np.floor(np.log2(len(phase)))) phase = phase[0:pow(2, k_max)] # truncate data to 2**k_max datapoints assert len(phase) == pow(2, k_max) #k = 1 taus = [ pow(2,k) for k in range(k_max)] #while k <= k_max: # tau = pow(2, k) # taus.append(tau) #print tau # k += 1 print("taus N=", len(taus), " ",taus) devs = np.zeros(len(taus)) deverrs = np.zeros(len(taus)) ns = np.zeros(len(taus)) taus_used = np.array(taus) # [(1.0/rate)*t for t in taus] # matrices to store results mtie_max = np.zeros((len(phase)-1, k_max)) mtie_min = np.zeros((len(phase)-1, k_max)) for kidx in range(k_max): k = kidx+1 imax = len(phase)-pow(2, k)+1 #print k, imax tie = np.zeros(imax) ns[kidx]=imax #print np.max( tie ) for i in range(imax): if k == 1: mtie_max[i, kidx] = max(phase[i], phase[i+1]) mtie_min[i, kidx] = min(phase[i], phase[i+1]) else: p = int(pow(2, k-1)) mtie_max[i, kidx] = max(mtie_max[i, kidx-1], mtie_max[i+p, kidx-1]) mtie_min[i, kidx] = min(mtie_min[i, kidx-1], mtie_min[i+p, kidx-1]) #for i in range(imax): tie[i] = mtie_max[i, kidx] - mtie_min[i, kidx] #print tie[i] devs[kidx] = np.amax(tie) # maximum along axis #print "maximum %2.4f" % devs[kidx] #print np.amax( tie ) #for tau in taus: #for devs = np.array(devs) print("devs N=",len(devs)," ",devs) print("taus N=", len(taus_used), " ",taus_used) return remove_small_ns(taus_used, devs, deverrs, ns)
gap resistant overlapping Allan deviation Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). Warning : phase data works better (frequency data is first trantformed into phase using numpy.cumsum() function, which can lead to poor results). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. ci: float the total confidence interval desired, i.e. if ci = 0.9, the bounds will be at 0.05 and 0.95. noisetype: string the type of noise desired: 'wp' returns white phase noise. 'wf' returns white frequency noise. 'fp' returns flicker phase noise. 'ff' returns flicker frequency noise. 'rf' returns random walk frequency noise. If the input is not recognized, it defaults to idealized, uncorrelated noise with (N-1) degrees of freedom. Returns ------- taus: np.array list of tau vales in seconds adev: np.array deviations [err_l, err_h] : list of len()==2, np.array the upper and lower bounds of the confidence interval taken as distances from the the estimated two sample variance. ns: np.array numper of terms n in the adev estimate.
def gradev(data, rate=1.0, data_type="phase", taus=None, ci=0.9, noisetype='wp'): """ gap resistant overlapping Allan deviation Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). Warning : phase data works better (frequency data is first trantformed into phase using numpy.cumsum() function, which can lead to poor results). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. ci: float the total confidence interval desired, i.e. if ci = 0.9, the bounds will be at 0.05 and 0.95. noisetype: string the type of noise desired: 'wp' returns white phase noise. 'wf' returns white frequency noise. 'fp' returns flicker phase noise. 'ff' returns flicker frequency noise. 'rf' returns random walk frequency noise. If the input is not recognized, it defaults to idealized, uncorrelated noise with (N-1) degrees of freedom. Returns ------- taus: np.array list of tau vales in seconds adev: np.array deviations [err_l, err_h] : list of len()==2, np.array the upper and lower bounds of the confidence interval taken as distances from the the estimated two sample variance. ns: np.array numper of terms n in the adev estimate. """ if (data_type == "freq"): print("Warning : phase data is preferred as input to gradev()") phase = input_to_phase(data, rate, data_type) (data, m, taus_used) = tau_generator(phase, rate, taus) ad = np.zeros_like(taus_used) ade_l = np.zeros_like(taus_used) ade_h = np.zeros_like(taus_used) adn = np.zeros_like(taus_used) for idx, mj in enumerate(m): (dev, deverr, n) = calc_gradev_phase(data, rate, mj, 1, ci, noisetype) # stride=1 for overlapping ADEV ad[idx] = dev ade_l[idx] = deverr[0] ade_h[idx] = deverr[1] adn[idx] = n # Note that errors are split in 2 arrays return remove_small_ns(taus_used, ad, [ade_l, ade_h], adn)
see http://www.leapsecond.com/tools/adev_lib.c stride = mj for nonoverlapping allan deviation stride = 1 for overlapping allan deviation see http://en.wikipedia.org/wiki/Allan_variance 1 1 s2y(t) = --------- sum [x(i+2) - 2x(i+1) + x(i) ]^2 2*tau^2
def calc_gradev_phase(data, rate, mj, stride, confidence, noisetype): """ see http://www.leapsecond.com/tools/adev_lib.c stride = mj for nonoverlapping allan deviation stride = 1 for overlapping allan deviation see http://en.wikipedia.org/wiki/Allan_variance 1 1 s2y(t) = --------- sum [x(i+2) - 2x(i+1) + x(i) ]^2 2*tau^2 """ d2 = data[2 * int(mj)::int(stride)] d1 = data[1 * int(mj)::int(stride)] d0 = data[::int(stride)] n = min(len(d0), len(d1), len(d2)) if n == 0: RuntimeWarning("Data array length is too small: %i" % len(data)) n = 1 v_arr = d2[:n] - 2 * d1[:n] + d0[:n] n = len(np.where(np.isnan(v_arr) == False)[0]) # only average for non-nans N = len(np.where(np.isnan(data) == False)[0]) s = np.nansum(v_arr * v_arr) # a summation robust to nans dev = np.sqrt(s / (2.0 * n)) / mj * rate #deverr = dev / np.sqrt(n) # old simple errorbars if (noisetype == 'wp'): alpha = 2 elif (noisetype == 'wf'): alpha = 0 elif (noisetype == 'fp'): alpha = -2 else: alpha = None if n > 1: edf = ci.edf_simple(N, mj, alpha) deverr = ci.confidence_interval(dev, confidence, edf) else: deverr = [0, 0] return dev, deverr, n
Take either phase or frequency as input and return phase
def input_to_phase(data, rate, data_type): """ Take either phase or frequency as input and return phase """ if data_type == "phase": return data elif data_type == "freq": return frequency2phase(data, rate) else: raise Exception("unknown data_type: " + data_type)
pre-processing of the tau-list given by the user (Helper function) Does sanity checks, sorts data, removes duplicates and invalid values. Generates a tau-list based on keywords 'all', 'decade', 'octave'. Uses 'octave' by default if no taus= argument is given. Parameters ---------- data: np.array data array rate: float Sample rate of data in Hz. Time interval between measurements is 1/rate seconds. taus: np.array Array of tau values for which to compute measurement. Alternatively one of the keywords: "all", "octave", "decade". Defaults to "octave" if omitted. v: verbose output if True even: require even m, where tau=m*tau0, for Theo1 statistic maximum_m: limit m, where tau=m*tau0, to this value. used by mtotdev() and htotdev() to limit maximum tau. Returns ------- (data, m, taus): tuple List of computed values data: np.array Data m: np.array Tau in units of data points taus: np.array Cleaned up list of tau values
def tau_generator(data, rate, taus=None, v=False, even=False, maximum_m=-1): """ pre-processing of the tau-list given by the user (Helper function) Does sanity checks, sorts data, removes duplicates and invalid values. Generates a tau-list based on keywords 'all', 'decade', 'octave'. Uses 'octave' by default if no taus= argument is given. Parameters ---------- data: np.array data array rate: float Sample rate of data in Hz. Time interval between measurements is 1/rate seconds. taus: np.array Array of tau values for which to compute measurement. Alternatively one of the keywords: "all", "octave", "decade". Defaults to "octave" if omitted. v: verbose output if True even: require even m, where tau=m*tau0, for Theo1 statistic maximum_m: limit m, where tau=m*tau0, to this value. used by mtotdev() and htotdev() to limit maximum tau. Returns ------- (data, m, taus): tuple List of computed values data: np.array Data m: np.array Tau in units of data points taus: np.array Cleaned up list of tau values """ if rate == 0: raise RuntimeError("Warning! rate==0") if taus is None: # empty or no tau-list supplied taus = "octave" # default to octave elif isinstance(taus, list) and taus == []: taus = "octave" if taus is "all": taus = (1.0/rate)*np.linspace(1.0, len(data), len(data)) elif taus is "octave": maxn = np.floor(np.log2(len(data))) taus = (1.0/rate)*np.logspace(0, maxn, maxn+1, base=2.0) elif taus is "decade": # 1, 2, 4, 10, 20, 40, spacing similar to Stable32 maxn = np.floor(np.log10(len(data))) taus = [] for k in range(int(maxn+1)): taus.append(1.0*(1.0/rate)*pow(10.0, k)) taus.append(2.0*(1.0/rate)*pow(10.0, k)) taus.append(4.0*(1.0/rate)*pow(10.0, k)) data, taus = np.array(data), np.array(taus) rate = float(rate) m = [] # integer averaging factor. tau = m*tau0 if maximum_m == -1: # if no limit given maximum_m = len(data) # FIXME: should we use a "stop-ratio" like Stable32 # found in Table III, page 9 of "Evolution of frequency stability analysis software" # max(AF) = len(phase)/stop_ratio, where # function stop_ratio # adev 5 # oadev 4 # mdev 4 # tdev 4 # hdev 5 # ohdev 4 # totdev 2 # tierms 4 # htotdev 3 # mtie 2 # theo1 1 # theoH 1 # mtotdev 2 # ttotdev 2 taus_valid1 = taus < (1 / float(rate)) * float(len(data)) taus_valid2 = taus > 0 taus_valid3 = taus <= (1 / float(rate)) * float(maximum_m) taus_valid = taus_valid1 & taus_valid2 & taus_valid3 m = np.floor(taus[taus_valid] * rate) m = m[m != 0] # m is tau in units of datapoints m = np.unique(m) # remove duplicates and sort if v: print("tau_generator: ", m) if len(m) == 0: print("Warning: sanity-check on tau failed!") print(" len(data)=", len(data), " rate=", rate, "taus= ", taus) taus2 = m / float(rate) if even: # used by Theo1 m_even_mask = ((m % 2) == 0) m = m[m_even_mask] taus2 = taus2[m_even_mask] return data, m, taus2
Reduce the number of taus to maximum of n per decade (Helper function) takes in a tau list and reduces the number of taus to a maximum amount per decade. This is only useful if more than the "decade" and "octave" but less than the "all" taus are wanted. E.g. to show certain features of the data one might want 100 points per decade. NOTE: The algorithm is slightly inaccurate for ms under n_per_decade, and will also remove some points in this range, which is usually fine. Typical use would be something like: (data,m,taus)=tau_generator(data,rate,taus="all") (m,taus)=tau_reduction(m,rate,n_per_decade) Parameters ---------- ms: array of integers List of m values (assumed to be an "all" list) to remove points from. rate: float Sample rate of data in Hz. Time interval between measurements is 1/rate seconds. Used to convert to taus. n_per_decade: int Number of ms/taus to keep per decade. Returns ------- m: np.array Reduced list of m values taus: np.array Reduced list of tau values
def tau_reduction(ms, rate, n_per_decade): """Reduce the number of taus to maximum of n per decade (Helper function) takes in a tau list and reduces the number of taus to a maximum amount per decade. This is only useful if more than the "decade" and "octave" but less than the "all" taus are wanted. E.g. to show certain features of the data one might want 100 points per decade. NOTE: The algorithm is slightly inaccurate for ms under n_per_decade, and will also remove some points in this range, which is usually fine. Typical use would be something like: (data,m,taus)=tau_generator(data,rate,taus="all") (m,taus)=tau_reduction(m,rate,n_per_decade) Parameters ---------- ms: array of integers List of m values (assumed to be an "all" list) to remove points from. rate: float Sample rate of data in Hz. Time interval between measurements is 1/rate seconds. Used to convert to taus. n_per_decade: int Number of ms/taus to keep per decade. Returns ------- m: np.array Reduced list of m values taus: np.array Reduced list of tau values """ ms = np.int64(ms) keep = np.bool8(np.rint(n_per_decade*np.log10(ms[1:])) - np.rint(n_per_decade*np.log10(ms[:-1]))) # Adjust ms size to fit above-defined mask ms = ms[:-1] assert len(ms) == len(keep) ms = ms[keep] taus = ms/float(rate) return ms, taus
Remove results with small number of samples. If n is small (==1), reject the result Parameters ---------- taus: array List of tau values for which deviation were computed devs: array List of deviations deverrs: array or list of arrays List of estimated errors (possibly a list containing two arrays : upper and lower values) ns: array Number of samples for each point Returns ------- (taus, devs, deverrs, ns): tuple Identical to input, except that values with low ns have been removed.
def remove_small_ns(taus, devs, deverrs, ns): """ Remove results with small number of samples. If n is small (==1), reject the result Parameters ---------- taus: array List of tau values for which deviation were computed devs: array List of deviations deverrs: array or list of arrays List of estimated errors (possibly a list containing two arrays : upper and lower values) ns: array Number of samples for each point Returns ------- (taus, devs, deverrs, ns): tuple Identical to input, except that values with low ns have been removed. """ ns_big_enough = ns > 1 o_taus = taus[ns_big_enough] o_devs = devs[ns_big_enough] o_ns = ns[ns_big_enough] if isinstance(deverrs, list): assert len(deverrs) < 3 o_deverrs = [deverrs[0][ns_big_enough], deverrs[1][ns_big_enough]] else: o_deverrs = deverrs[ns_big_enough] if len(o_devs)==0: print("remove_small_ns() nothing remains!?") raise UserWarning return o_taus, o_devs, o_deverrs, o_ns
Trim leading and trailing NaNs from dataset This is done by browsing the array from each end and store the index of the first non-NaN in each case, the return the appropriate slice of the array
def trim_data(x): """ Trim leading and trailing NaNs from dataset This is done by browsing the array from each end and store the index of the first non-NaN in each case, the return the appropriate slice of the array """ # Find indices for first and last valid data first = 0 while np.isnan(x[first]): first += 1 last = len(x) while np.isnan(x[last - 1]): last -= 1 return x[first:last]
Three Cornered Hat Method Given three clocks A, B, C, we seek to find their variances :math:`\\sigma^2_A`, :math:`\\sigma^2_B`, :math:`\\sigma^2_C`. We measure three phase differences, assuming no correlation between the clocks, the measurements have variances: .. math:: \\sigma^2_{AB} = \\sigma^2_{A} + \\sigma^2_{B} \\sigma^2_{BC} = \\sigma^2_{B} + \\sigma^2_{C} \\sigma^2_{CA} = \\sigma^2_{C} + \\sigma^2_{A} Which allows solving for the variance of one clock as: .. math:: \\sigma^2_{A} = {1 \\over 2} ( \\sigma^2_{AB} + \\sigma^2_{CA} - \\sigma^2_{BC} ) and similarly cyclic permutations for :math:`\\sigma^2_B` and :math:`\\sigma^2_C` Parameters ---------- phasedata_ab: np.array phase measurements between clock A and B, in seconds phasedata_bc: np.array phase measurements between clock B and C, in seconds phasedata_ca: np.array phase measurements between clock C and A, in seconds rate: float The sampling rate for phase, in Hz taus: np.array The tau values for deviations, in seconds function: allantools deviation function The type of statistic to compute, e.g. allantools.oadev Returns ------- tau_ab: np.array Tau values corresponding to output deviations dev_a: np.array List of computed values for clock A References ---------- http://www.wriley.com/3-CornHat.htm
def three_cornered_hat_phase(phasedata_ab, phasedata_bc, phasedata_ca, rate, taus, function): """ Three Cornered Hat Method Given three clocks A, B, C, we seek to find their variances :math:`\\sigma^2_A`, :math:`\\sigma^2_B`, :math:`\\sigma^2_C`. We measure three phase differences, assuming no correlation between the clocks, the measurements have variances: .. math:: \\sigma^2_{AB} = \\sigma^2_{A} + \\sigma^2_{B} \\sigma^2_{BC} = \\sigma^2_{B} + \\sigma^2_{C} \\sigma^2_{CA} = \\sigma^2_{C} + \\sigma^2_{A} Which allows solving for the variance of one clock as: .. math:: \\sigma^2_{A} = {1 \\over 2} ( \\sigma^2_{AB} + \\sigma^2_{CA} - \\sigma^2_{BC} ) and similarly cyclic permutations for :math:`\\sigma^2_B` and :math:`\\sigma^2_C` Parameters ---------- phasedata_ab: np.array phase measurements between clock A and B, in seconds phasedata_bc: np.array phase measurements between clock B and C, in seconds phasedata_ca: np.array phase measurements between clock C and A, in seconds rate: float The sampling rate for phase, in Hz taus: np.array The tau values for deviations, in seconds function: allantools deviation function The type of statistic to compute, e.g. allantools.oadev Returns ------- tau_ab: np.array Tau values corresponding to output deviations dev_a: np.array List of computed values for clock A References ---------- http://www.wriley.com/3-CornHat.htm """ (tau_ab, dev_ab, err_ab, ns_ab) = function(phasedata_ab, data_type='phase', rate=rate, taus=taus) (tau_bc, dev_bc, err_bc, ns_bc) = function(phasedata_bc, data_type='phase', rate=rate, taus=taus) (tau_ca, dev_ca, err_ca, ns_ca) = function(phasedata_ca, data_type='phase', rate=rate, taus=taus) var_ab = dev_ab * dev_ab var_bc = dev_bc * dev_bc var_ca = dev_ca * dev_ca assert len(var_ab) == len(var_bc) == len(var_ca) var_a = 0.5 * (var_ab + var_ca - var_bc) var_a[var_a < 0] = 0 # don't return imaginary deviations (?) dev_a = np.sqrt(var_a) err_a = [d/np.sqrt(nn) for (d, nn) in zip(dev_a, ns_ab)] return tau_ab, dev_a, err_a, ns_ab
integrate fractional frequency data and output phase data Parameters ---------- freqdata: np.array Data array of fractional frequency measurements (nondimensional) rate: float The sampling rate for phase or frequency, in Hz Returns ------- phasedata: np.array Time integral of fractional frequency data, i.e. phase (time) data in units of seconds. For phase in units of radians, see phase2radians()
def frequency2phase(freqdata, rate): """ integrate fractional frequency data and output phase data Parameters ---------- freqdata: np.array Data array of fractional frequency measurements (nondimensional) rate: float The sampling rate for phase or frequency, in Hz Returns ------- phasedata: np.array Time integral of fractional frequency data, i.e. phase (time) data in units of seconds. For phase in units of radians, see phase2radians() """ dt = 1.0 / float(rate) # Protect against NaN values in input array (issue #60) # Reintroduces data trimming as in commit 503cb82 freqdata = trim_data(freqdata) phasedata = np.cumsum(freqdata) * dt phasedata = np.insert(phasedata, 0, 0) # FIXME: why do we do this? # so that phase starts at zero and len(phase)=len(freq)+1 ?? return phasedata
Convert phase in seconds to phase in radians Parameters ---------- phasedata: np.array Data array of phase in seconds v0: float Nominal oscillator frequency in Hz Returns ------- fi: phase data in radians
def phase2radians(phasedata, v0): """ Convert phase in seconds to phase in radians Parameters ---------- phasedata: np.array Data array of phase in seconds v0: float Nominal oscillator frequency in Hz Returns ------- fi: phase data in radians """ fi = [2*np.pi*v0*xx for xx in phasedata] return fi
Convert frequency in Hz to fractional frequency Parameters ---------- frequency: np.array Data array of frequency in Hz mean_frequency: float (optional) The nominal mean frequency, in Hz if omitted, defaults to mean frequency=np.mean(frequency) Returns ------- y: Data array of fractional frequency
def frequency2fractional(frequency, mean_frequency=-1): """ Convert frequency in Hz to fractional frequency Parameters ---------- frequency: np.array Data array of frequency in Hz mean_frequency: float (optional) The nominal mean frequency, in Hz if omitted, defaults to mean frequency=np.mean(frequency) Returns ------- y: Data array of fractional frequency """ if mean_frequency == -1: mu = np.mean(frequency) else: mu = mean_frequency y = [(x-mu)/mu for x in frequency] return y
Optionnal method if you chose not to set inputs on init Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional) rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic
def set_input(self, data, rate=1.0, data_type="phase", taus=None): """ Optionnal method if you chose not to set inputs on init Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional) rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic """ self.inp["data"] = data self.inp["rate"] = rate self.inp["data_type"] = data_type self.inp["taus"] = taus
Evaluate the passed function with the supplied data. Stores result in self.out. Parameters ---------- function: str Name of the :mod:`allantools` function to evaluate Returns ------- result: dict The results of the calculation.
def compute(self, function): """Evaluate the passed function with the supplied data. Stores result in self.out. Parameters ---------- function: str Name of the :mod:`allantools` function to evaluate Returns ------- result: dict The results of the calculation. """ try: func = getattr(allantools, function) except AttributeError: raise AttributeError("function must be defined in allantools") whitelisted = ["theo1", "mtie", "tierms"] if function[-3:] != "dev" and function not in whitelisted: # this should probably raise a custom exception type so # it's easier to distinguish from other bad things raise RuntimeError("function must be one of the 'dev' functions") result = func(self.inp["data"], rate=self.inp["rate"], data_type=self.inp["data_type"], taus=self.inp["taus"]) keys = ["taus", "stat", "stat_err", "stat_n"] result = {key: result[i] for i, key in enumerate(keys)} self.out = result.copy() self.out["stat_id"] = function return result
compute average of many PSDs
def many_psds(k=2,fs=1.0, b0=1.0, N=1024): """ compute average of many PSDs """ psd=[] for j in range(k): print j x = noise.white(N=2*4096,b0=b0,fs=fs) f, tmp = noise.numpy_psd(x,fs) if j==0: psd = tmp else: psd = psd + tmp return f, psd/k
Find organization that has the current identity as the owner or as the member
def list_my(self): """ Find organization that has the current identity as the owner or as the member """ org_list = self.call_contract_command("Registry", "listOrganizations", []) rez_owner = [] rez_member = [] for idx, org_id in enumerate(org_list): (found, org_id, org_name, owner, members, serviceNames, repositoryNames) = self.call_contract_command("Registry", "getOrganizationById", [org_id]) if (not found): raise Exception("Organization was removed during this call. Please retry."); if self.ident.address == owner: rez_owner.append((org_name, bytes32_to_str(org_id))) if self.ident.address in members: rez_member.append((org_name, bytes32_to_str(org_id))) if (rez_owner): self._printout("# Organizations you are the owner of") self._printout("# OrgName OrgId") for n,i in rez_owner: self._printout("%s %s"%(n,i)) if (rez_member): self._printout("# Organizations you are the member of") self._printout("# OrgName OrgId") for n,i in rez_member: self._printout("%s %s"%(n,i))
Return new group_id in base64
def add_group(self, group_name, payment_address): """ Return new group_id in base64 """ if (self.is_group_name_exists(group_name)): raise Exception("the group \"%s\" is already present"%str(group_name)) group_id_base64 = base64.b64encode(secrets.token_bytes(32)) self.m["groups"] += [{"group_name" : group_name , "group_id" : group_id_base64.decode("ascii"), "payment_address" : payment_address}] return group_id_base64
check if group with given name is already exists
def is_group_name_exists(self, group_name): """ check if group with given name is already exists """ groups = self.m["groups"] for g in groups: if (g["group_name"] == group_name): return True return False
return group with given group_id (return None if doesn't exists)
def get_group_by_group_id(self, group_id): """ return group with given group_id (return None if doesn't exists) """ group_id_base64 = base64.b64encode(group_id).decode('ascii') groups = self.m["groups"] for g in groups: if (g["group_id"] == group_id_base64): return g return None
In all getter function in case of single payment group, group_name can be None
def get_group_name_nonetrick(self, group_name = None): """ In all getter function in case of single payment group, group_name can be None """ groups = self.m["groups"] if (len(groups) == 0): raise Exception("Cannot find any groups in metadata") if (not group_name): if (len(groups) > 1): raise Exception("We have more than one payment group in metadata, so group_name should be specified") return groups[0]["group_name"] return group_name
make tar from protodir/*proto, and publish this tar in ipfs return base58 encoded ipfs hash
def publish_proto_in_ipfs(ipfs_client, protodir): """ make tar from protodir/*proto, and publish this tar in ipfs return base58 encoded ipfs hash """ if (not os.path.isdir(protodir)): raise Exception("Directory %s doesn't exists"%protodir) files = glob.glob(os.path.join(protodir, "*.proto")) if (len(files) == 0): raise Exception("Cannot find any %s files"%(os.path.join(protodir, "*.proto")) ) # We are sorting files before we add them to the .tar since an archive containing the same files in a different # order will produce a different content hash; files.sort() tarbytes = io.BytesIO() tar = tarfile.open(fileobj=tarbytes, mode="w") for f in files: tar.add(f, os.path.basename(f)) tar.close() return ipfs_client.add_bytes(tarbytes.getvalue())
Get file from ipfs We must check the hash becasue we cannot believe that ipfs_client wasn't been compromise
def get_from_ipfs_and_checkhash(ipfs_client, ipfs_hash_base58, validate=True): """ Get file from ipfs We must check the hash becasue we cannot believe that ipfs_client wasn't been compromise """ if validate: from snet_cli.resources.proto.unixfs_pb2 import Data from snet_cli.resources.proto.merckledag_pb2 import MerkleNode # No nice Python library to parse ipfs blocks, so do it ourselves. block_data = ipfs_client.block_get(ipfs_hash_base58) mn = MerkleNode() mn.ParseFromString(block_data) unixfs_data = Data() unixfs_data.ParseFromString(mn.Data) assert unixfs_data.Type == unixfs_data.DataType.Value('File'), "IPFS hash must be a file" data = unixfs_data.Data # multihash has a badly registered base58 codec, overwrite it... multihash.CodecReg.register('base58', base58.b58encode, base58.b58decode) # create a multihash object from our ipfs hash mh = multihash.decode(ipfs_hash_base58.encode('ascii'), 'base58') # Convenience method lets us directly use a multihash to verify data if not mh.verify(block_data): raise Exception("IPFS hash mismatch with data") else: data = ipfs_client.cat(ipfs_hash_base58) return data
Convert in and from bytes uri format used in Registry contract
def hash_to_bytesuri(s): """ Convert in and from bytes uri format used in Registry contract """ # TODO: we should pad string with zeros till closest 32 bytes word because of a bug in processReceipt (in snet_cli.contract.process_receipt) s = "ipfs://" + s return s.encode("ascii").ljust(32 * (len(s)//32 + 1), b"\0")
Tar files might be dangerous (see https://bugs.python.org/issue21109, and https://docs.python.org/3/library/tarfile.html, TarFile.extractall warning) we extract only simple files
def safe_extract_proto_from_ipfs(ipfs_client, ipfs_hash, protodir): """ Tar files might be dangerous (see https://bugs.python.org/issue21109, and https://docs.python.org/3/library/tarfile.html, TarFile.extractall warning) we extract only simple files """ spec_tar = get_from_ipfs_and_checkhash(ipfs_client, ipfs_hash) with tarfile.open(fileobj=io.BytesIO(spec_tar)) as f: for m in f.getmembers(): if (os.path.dirname(m.name) != ""): raise Exception("tarball has directories. We do not support it.") if (not m.isfile()): raise Exception("tarball contains %s which is not a files"%m.name) fullname = os.path.join(protodir, m.name) if (os.path.exists(fullname)): raise Exception("%s already exists."%fullname) # now it is safe to call extractall f.extractall(protodir)
import protobuf and return stub and request class
def _get_stub_and_request_classes(self, service_name): """ import protobuf and return stub and request class """ # Compile protobuf if needed codegen_dir = Path.home().joinpath(".snet", "mpe_client", "control_service") proto_dir = Path(__file__).absolute().parent.joinpath("resources", "proto") if (not codegen_dir.joinpath("control_service_pb2.py").is_file()): compile_proto(proto_dir, codegen_dir, proto_file = "control_service.proto") stub_class, request_class, _ = import_protobuf_from_dir(codegen_dir, service_name) return stub_class, request_class
Safely run StartClaim for given channels
def _start_claim_channels(self, grpc_channel, channels_ids): """ Safely run StartClaim for given channels """ unclaimed_payments = self._call_GetListUnclaimed(grpc_channel) unclaimed_payments_dict = {p["channel_id"] : p for p in unclaimed_payments} to_claim = [] for channel_id in channels_ids: if (channel_id not in unclaimed_payments_dict or unclaimed_payments_dict[channel_id]["amount"] == 0): self._printout("There is nothing to claim for channel %i, we skip it"%channel_id) continue blockchain = self._get_channel_state_from_blockchain(channel_id) if (unclaimed_payments_dict[channel_id]["nonce"] != blockchain["nonce"]): self._printout("Old payment for channel %i is still in progress. Please run claim for this channel later."%channel_id) continue to_claim.append((channel_id, blockchain["nonce"])) payments = [self._call_StartClaim(grpc_channel, channel_id, nonce) for channel_id, nonce in to_claim] return payments
Claim all 'pending' payments in progress and after we claim given channels
def _claim_in_progress_and_claim_channels(self, grpc_channel, channels): """ Claim all 'pending' payments in progress and after we claim given channels """ # first we get the list of all 'payments in progress' in case we 'lost' some payments. payments = self._call_GetListInProgress(grpc_channel) if (len(payments) > 0): self._printout("There are %i payments in 'progress' (they haven't been claimed in blockchain). We will claim them."%len(payments)) self._blockchain_claim(payments) payments = self._start_claim_channels(grpc_channel, channels) self._blockchain_claim(payments)
Create default configuration if config file does not exist
def create_default_config(self): """ Create default configuration if config file does not exist """ # make config directory with the minimal possible permission self._config_file.parent.mkdir(mode=0o700, exist_ok=True) self["network.kovan"] = {"default_eth_rpc_endpoint": "https://kovan.infura.io", "default_gas_price" : "medium"} self["network.mainnet"] = {"default_eth_rpc_endpoint": "https://mainnet.infura.io", "default_gas_price" : "medium"} self["network.ropsten"] = {"default_eth_rpc_endpoint": "https://ropsten.infura.io", "default_gas_price" : "medium"} self["network.rinkeby"] = {"default_eth_rpc_endpoint": "https://rinkeby.infura.io", "default_gas_price" : "medium"} self["ipfs"] = {"default_ipfs_endpoint": "http://ipfs.singularitynet.io:80"} self["session"] = { "network": "kovan" } self._persist() print("We've created configuration file with default values in: %s\n"%str(self._config_file))
Dynamic import of grpc-protobuf from given directory (proto_dir) service_name should be provided only in the case of conflicting method names (two methods with the same name in difference services). Return stub_class, request_class, response_class ! We need response_class only for json payload encoding !
def import_protobuf_from_dir(proto_dir, method_name, service_name = None): """ Dynamic import of grpc-protobuf from given directory (proto_dir) service_name should be provided only in the case of conflicting method names (two methods with the same name in difference services). Return stub_class, request_class, response_class ! We need response_class only for json payload encoding ! """ proto_dir = Path(proto_dir) # <SERVICE>_pb2_grpc.py import <SERVICE>_pb2.py so we are forced to add proto_dir to path sys.path.append(str(proto_dir)) grpc_pyfiles = [str(os.path.basename(p)) for p in proto_dir.glob("*_pb2_grpc.py")] good_rez = [] for grpc_pyfile in grpc_pyfiles: is_found, rez = _import_protobuf_from_file(grpc_pyfile, method_name, service_name); if (is_found): good_rez.append(rez) if (len(good_rez) == 0): raise Exception("Error while loading protobuf. Cannot find method=%s"%method_name) if (len(good_rez) > 1): if (service_name): raise Exception("Error while loading protobuf. Found method %s.%s in multiply .proto files. We don't support packages yet!"%(service_name, method_name)) else: raise Exception("Error while loading protobuf. Found method %s in multiply .proto files. You could try to specify service_name."%method_name) return good_rez[0]
helper function which try to import method from the given _pb2_grpc.py file service_name should be provided only in case of name conflict return (False, None) in case of failure return (True, (stub_class, request_class, response_class)) in case of success
def _import_protobuf_from_file(grpc_pyfile, method_name, service_name = None): """ helper function which try to import method from the given _pb2_grpc.py file service_name should be provided only in case of name conflict return (False, None) in case of failure return (True, (stub_class, request_class, response_class)) in case of success """ prefix = grpc_pyfile[:-12] pb2 = __import__("%s_pb2"%prefix) pb2_grpc = __import__("%s_pb2_grpc"%prefix) # we take all objects from pb2_grpc module which endswith "Stub", and we remove this postfix to get service_name all_service_names = [stub_name[:-4] for stub_name in dir(pb2_grpc) if stub_name.endswith("Stub")] # if service_name was specified we take only this service_name if (service_name): if (service_name not in all_service_names): return False, None all_service_names = [service_name] found_services = [] for service_name in all_service_names: service_descriptor = getattr(pb2, "DESCRIPTOR").services_by_name[service_name] for method in service_descriptor.methods: if(method.name == method_name): request_class = method.input_type._concrete_class response_class = method.output_type._concrete_class stub_class = getattr(pb2_grpc, "%sStub"%service_name) found_services.append(service_name) if (len(found_services) == 0): return False, None if (len(found_services) > 1): raise Exception("Error while loading protobuf. We found methods %s in multiply services [%s]." " You should specify service_name."%(method_name, ", ".join(found_services))) return True, (stub_class, request_class, response_class)
Switch payload encoding to JSON for GRPC call
def switch_to_json_payload_encoding(call_fn, response_class): """ Switch payload encoding to JSON for GRPC call """ def json_serializer(*args, **kwargs): return bytes(json_format.MessageToJson(args[0], True, preserving_proto_field_name=True), "utf-8") def json_deserializer(*args, **kwargs): resp = response_class() json_format.Parse(args[0], resp, True) return resp call_fn._request_serializer = json_serializer call_fn._response_deserializer = json_deserializer
possible modifiers: file, b64encode, b64decode format: modifier1@[email protected]@k_final
def _transform_call_params(self, params): """ possible modifiers: file, b64encode, b64decode format: modifier1@[email protected]@k_final """ rez = {} for k, v in params.items(): if isinstance(v, dict): v = self._transform_call_params(v) k_final = k else: # k = modifier1@[email protected]@k_final k_split = k.split("@") k_final = k_split[-1] k_mods = k_split[:-1] for m in k_mods: if (m == "file"): with open(v, 'rb') as f: v = f.read() elif (m == "b64encode"): v = base64.b64encode(v) elif (m == "b64decode"): v = base64.b64decode(v) else: raise Exception("Unknown modifier ('%s') in call parameters. Possible modifiers: file, b64encode, b64decode"%m) rez[k_final] = v return rez
We get state of the channel (nonce, amount, unspent_amount) We do it by securely combine information from the server and blockchain https://github.com/singnet/wiki/blob/master/multiPartyEscrowContract/MultiPartyEscrow_stateless_client.md
def _get_channel_state_statelessly(self, grpc_channel, channel_id): """ We get state of the channel (nonce, amount, unspent_amount) We do it by securely combine information from the server and blockchain https://github.com/singnet/wiki/blob/master/multiPartyEscrowContract/MultiPartyEscrow_stateless_client.md """ server = self._get_channel_state_from_server (grpc_channel, channel_id) blockchain = self._get_channel_state_from_blockchain( channel_id) if (server["current_nonce"] == blockchain["nonce"]): unspent_amount = blockchain["value"] - server["current_signed_amount"] else: unspent_amount = None # in this case we cannot securely define unspent_amount yet return (server["current_nonce"], server["current_signed_amount"], unspent_amount)
Print balance of ETH, AGI, and MPE wallet
def print_agi_and_mpe_balances(self): """ Print balance of ETH, AGI, and MPE wallet """ if (self.args.account): account = self.args.account else: account = self.ident.address eth_wei = self.w3.eth.getBalance(account) agi_cogs = self.call_contract_command("SingularityNetToken", "balanceOf", [account]) mpe_cogs = self.call_contract_command("MultiPartyEscrow", "balances", [account]) # we cannot use _pprint here because it doesn't conserve order yet self._printout(" account: %s"%account) self._printout(" ETH: %s"%self.w3.fromWei(eth_wei, 'ether')) self._printout(" AGI: %s"%cogs2stragi(agi_cogs)) self._printout(" MPE: %s"%cogs2stragi(mpe_cogs))
Publish proto files in ipfs and print hash
def publish_proto_in_ipfs(self): """ Publish proto files in ipfs and print hash """ ipfs_hash_base58 = utils_ipfs.publish_proto_in_ipfs(self._get_ipfs_client(), self.args.protodir) self._printout(ipfs_hash_base58)
Publish protobuf model in ipfs and update existing metadata file
def publish_proto_metadata_update(self): """ Publish protobuf model in ipfs and update existing metadata file """ metadata = load_mpe_service_metadata(self.args.metadata_file) ipfs_hash_base58 = utils_ipfs.publish_proto_in_ipfs(self._get_ipfs_client(), self.args.protodir) metadata.set_simple_field("model_ipfs_hash", ipfs_hash_base58) metadata.save_pretty(self.args.metadata_file)
Metadata: add endpoint to the group
def metadata_add_endpoints(self): """ Metadata: add endpoint to the group """ metadata = load_mpe_service_metadata(self.args.metadata_file) group_name = metadata.get_group_name_nonetrick(self.args.group_name) for endpoint in self.args.endpoints: metadata.add_endpoint(group_name, endpoint) metadata.save_pretty(self.args.metadata_file)
Metadata: remove all endpoints from all groups
def metadata_remove_all_endpoints(self): """ Metadata: remove all endpoints from all groups """ metadata = load_mpe_service_metadata(self.args.metadata_file) metadata.remove_all_endpoints() metadata.save_pretty(self.args.metadata_file)
Metadata: Remove all endpoints from the group and add new ones
def metadata_update_endpoints(self): """ Metadata: Remove all endpoints from the group and add new ones """ metadata = load_mpe_service_metadata(self.args.metadata_file) group_name = metadata.get_group_name_nonetrick(self.args.group_name) metadata.remove_all_endpoints_for_group(group_name) for endpoint in self.args.endpoints: metadata.add_endpoint(group_name, endpoint) metadata.save_pretty(self.args.metadata_file)
get persistent storage for mpe
def _get_persistent_mpe_dir(self): """ get persistent storage for mpe """ mpe_address = self.get_mpe_address().lower() registry_address = self.get_registry_address().lower() return Path.home().joinpath(".snet", "mpe_client", "%s_%s"%(mpe_address, registry_address))
return {channel_id: channel}
def _get_initialized_channels_dict_for_service(self, org_id, service_id): '''return {channel_id: channel}''' fn = self._get_channels_info_file(org_id, service_id) if (os.path.isfile(fn)): return pickle.load( open( fn, "rb" ) ) else: return {}