code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def open(path, vcs=None):
import os
assert os.path.isdir(path), path + ' is not a directory'
vcs = vcs or probe(path)
cls = _get_repo_class(vcs)
return cls(path) | Open an existing repository
:param str path: The path of the repository
:param vcs: If specified, assume the given repository type to avoid
auto-detection. Either ``git``, ``hg``, or ``svn``.
:raises UnknownVCSType: if the repository type couldn't be inferred
If ``vcs`` is not specified, it is inferred via :func:`probe`. |
def _check_attributes(self, attributes, extra=None):
extra = extra or ()
unknown_keys = set(attributes) - set(self._possible_attributes) - set(extra)
if unknown_keys:
logger.warning('%s got unknown attributes: %s' %
(self.__class__.__name__, unknown_keys)) | Check if attributes given to the constructor can be used to
instanciate a valid node. |
def get(self, name, strict=True):
if not isinstance(name, str) or name.startswith('_'):
raise AttributeError(self.__class__.__name__, name)
elif strict and name not in self._possible_attributes:
raise AttributeError('%s is not a valid attribute of %r.' %
(name, self))
elif name in self._attributes:
return self._attributes[name]
else:
raise exceptions.AttributeNotProvided(name) | Get an attribute of the holder (read-only access). |
def addMonths(date, months):
# map from Qt information
if type(date).__name__ in ('QDate', 'QDateTime', 'QTime'):
date = date.toPython()
mult = months / abs(months)
years = mult * (abs(months) / 12)
months = mult * (abs(months) % 12)
# calculate the new month
month = date.month + months
if month < 1:
years -= 1
month = 12 - month
elif 12 < month:
years += 1
month %= 12
# calculate the new year
year = date.year + years
# calculate the new day
check = datetime.date(year, month, 1)
days = daysInMonth(check)
return datetime.date(year, month, min(date.day, days)) | Returns the new date based on the inputted months.
:param date | <datetime.date>
months | <int>
:return <datetime.date> |
def addYears(date, years):
# map from Qt information
if type(date).__name__ in ('QDate', 'QDateTime', 'QTime'):
date = date.toPython()
return datetime.date(date.year + years, date.month, date.day) | Returns the new date based on the inputted number of years.
:param date | <datetime.date>
years | <int>
:return <datetime.date> |
def daysInMonth(date):
# map from Qt information
if type(date).__name__ in ('QDate', 'QDateTime', 'QTime'):
date = date.toPython()
month = date.month
# look for a leap year
if month == 2 and not date.year % 4:
return 29
return DaysInMonth.get(month, -1) | Returns the number of the days in the month for the given date. This will
take into account leap years based on the inputted date's year.
:param date | <datetime.date>
:return <int> |
def daysInYear(date):
# map from Qt information
if type(date).__name__ in ('QDate', 'QDateTime', 'QTime'):
date = date.toPython()
if type(date) != int:
year = date.year
else:
year = date
if not year % 4:
return 366
return 365 | Returns the number of days in the year for the given date.
:param date | <datetime.date> || <int>
:return <int> |
def displayName(date, options=None, format='%b %d, %Y'):
# map from Qt information
if type(date).__name__ in ('QDate', 'QDateTime', 'QTime'):
date = date.toPython()
if isinstance(date, datetime.datetime):
time = ' @ ' + date.strftime('%I:%M%p').strip('0M').lower()
date = date.date()
else:
time = ''
today = datetime.date.today()
delta = date - today
if delta.days == 0:
return 'Today' + time
elif delta.days == -1:
return 'Yesterday' + time
elif delta.days == 1:
return 'Tomorrow' + time
elif abs(delta.days) < 8:
# look for different weeks
if date.isocalendar()[1] != today.isocalendar()[1]:
qualifier = 'Last ' if delta.days < 0 else 'Next '
else:
qualifier = ''
return qualifier + date.strftime('%A') + time
else:
return date.strftime(format) | Returns the display name for the inputted date, given the list of options.
:param date | <datetime.date>
options | <projex.dates.Names>
format | <str>
:return <str> |
def weekdays(start, end):
# don't bother calculating anything for the same inputted date
if start == end:
return int(start.isoweekday() not in (6, 7))
elif end < start:
return -weekdays(end, start)
else:
strt_weekday = start.isoweekday()
end_weekday = end.isoweekday()
# calculate in the positive direction
if end < start:
return -weekdays(end, start)
# calculate from the monday after the start
if 5 < strt_weekday:
start = start + datetime.timedelta(days=8 - strt_weekday)
# calculate from the friday before the end
if 5 < end_weekday:
end = end - datetime.timedelta(days=end_weekday - 5)
remainder = end.isoweekday() - start.isoweekday()
end = end - datetime.timedelta(days=remainder)
# if the end is now before the start, then both dates fell on a weekend
if end < start:
return 0
# otherwise, if the dates normalized to each other, then return the
# remainder
elif end == start:
return remainder + 1
# remove the number of weekends from the start and end dates
days = ((end - start).days + 1)
total_days = abs(days)
multiplier = days / total_days
weekends = int(round(total_days / 7.0) * 2)
week_days = ((total_days - weekends) + remainder) * multiplier
return week_days | Returns the number of weekdays between the inputted start and end dates.
This would be the equivalent of doing (end - start) to get the number of
calendar days between the two dates.
:param start | <datetime.date>
end | <datetime.date>
:return <int> |
def main(args=None):
if args is None:
args = tag.cli.parser().parse_args()
assert args.cmd in mains
mainmethod = mains[args.cmd]
mainmethod(args) | Entry point for the tag CLI.
Isolated as a method so that the CLI can be called by other Python code
(e.g. for testing), in which case the arguments are passed to the function.
If no arguments are passed to the function, parse them from the command
line. |
def _build_request(request):
msg = bytes([request['cmd']])
if 'dest' in request:
msg += bytes([request['dest']])
else:
msg += b'\0'
if 'sha' in request:
msg += request['sha']
else:
for dummy in range(64):
msg += b'0'
logging.debug("Request (%d): %s", len(msg), msg)
return msg | Build message to transfer over the socket from a request. |
def main():
__async__ = True
logging.basicConfig(format="%(levelname)-10s %(message)s",
level=logging.DEBUG)
if len(sys.argv) != 2:
logging.error("Must specify configuration file")
sys.exit()
config = configparser.ConfigParser()
config.read(sys.argv[1])
password = config.get('default', 'password')
if __async__:
client = Client(config.get('default', 'host'),
config.getint('default', 'port'), password, _callback)
else:
client = Client(config.get('default', 'host'),
config.getint('default', 'port'),
password)
status = client.messages()
msg = status[0]
print(msg)
print(client.mp3(msg['sha'].encode('utf-8')))
while True:
continue | Show example using the API. |
def start(self):
if not self._thread:
logging.info("Starting asterisk mbox thread")
# Ensure signal queue is empty
try:
while True:
self.signal.get(False)
except queue.Empty:
pass
self._thread = threading.Thread(target=self._loop)
self._thread.setDaemon(True)
self._thread.start() | Start thread. |
def stop(self):
if self._thread:
self.signal.put("Stop")
self._thread.join()
if self._soc:
self._soc.shutdown()
self._soc.close()
self._thread = None | Stop thread. |
def _connect(self):
self._soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._soc.connect((self._ipaddr, self._port))
self._soc.send(_build_request({'cmd': cmd.CMD_MESSAGE_PASSWORD,
'sha': self._password})) | Connect to server. |
def _recv_msg(self):
command = ord(recv_blocking(self._soc, 1))
msglen = recv_blocking(self._soc, 4)
msglen = ((msglen[0] << 24) + (msglen[1] << 16) +
(msglen[2] << 8) + msglen[3])
msg = recv_blocking(self._soc, msglen)
return command, msg | Read a message from the server. |
def _loop(self):
request = {}
connected = False
while True:
timeout = None
sockets = [self.request_queue, self.signal]
if not connected:
try:
self._clear_request(request)
self._connect()
self._soc.send(_build_request(
{'cmd': cmd.CMD_MESSAGE_LIST}))
self._soc.send(_build_request(
{'cmd': cmd.CMD_MESSAGE_CDR_AVAILABLE}))
connected = True
except ConnectionRefusedError:
timeout = 5.0
if connected:
sockets.append(self._soc)
readable, _writable, _errored = select.select(
sockets, [], [], timeout)
if self.signal in readable:
break
if self._soc in readable:
# We have incoming data
try:
command, msg = self._recv_msg()
self._handle_msg(command, msg, request)
except (RuntimeError, ConnectionResetError):
logging.warning("Lost connection")
connected = False
self._clear_request(request)
if self.request_queue in readable:
request = self.request_queue.get()
self.request_queue.task_done()
if not connected:
self._clear_request(request)
else:
if (request['cmd'] == cmd.CMD_MESSAGE_LIST and
self._status and
(not self._callback or 'sync' in request)):
self.result_queue.put(
[cmd.CMD_MESSAGE_LIST, self._status])
request = {}
else:
self._soc.send(_build_request(request)) | Handle data. |
def mp3(self, sha, **kwargs):
return self._queue_msg({'cmd': cmd.CMD_MESSAGE_MP3,
'sha': _get_bytes(sha)}, **kwargs) | Get raw MP3 of a message. |
def delete(self, sha, **kwargs):
return self._queue_msg({'cmd': cmd.CMD_MESSAGE_DELETE,
'sha': _get_bytes(sha)}, **kwargs) | Delete a message. |
def get_cdr(self, start=0, count=-1, **kwargs):
sha = encode_to_sha("{:d},{:d}".format(start, count))
return self._queue_msg({'cmd': cmd.CMD_MESSAGE_CDR,
'sha': sha}, **kwargs) | Request range of CDR messages |
def path(self) -> Path:
args = list(self._iter_translated_field_names(self.get_path_pattern_list()))
args.append(self.get_name())
return Path(*args) | A Path for this name object joining field names from `self.get_path_pattern_list` with this object's name |
def fold(self, predicate):
childs = {x:y.fold(predicate) for (x,y) in self._attributes.items()
if isinstance(y, SerializableTypedAttributesHolder)}
return predicate(self, childs) | Takes a predicate and applies it to each node starting from the
leaves and making the return value propagate. |
def the_one(cls):
if cls.THE_ONE is None:
cls.THE_ONE = cls(settings.HELP_TOKENS_INI_FILE)
return cls.THE_ONE | Get the single global HelpUrlExpert object. |
def get_config_value(self, section_name, option, default_option="default"):
if self.config is None:
self.config = configparser.ConfigParser()
self.config.read(self.ini_file_name)
if option:
try:
return self.config.get(section_name, option)
except configparser.NoOptionError:
log.debug(
"Didn't find a configuration option for '%s' section and '%s' option",
section_name, option,
)
return self.config.get(section_name, default_option) | Read a value from the configuration, with a default.
Args:
section_name (str): name of the section in the configuration from which
the option should be found.
option (str): name of the configuration option.
default_option (str): name of the default configuration option whose
value should be returned if the requested option is not found.
Returns:
str: the value from the ini file. |
def url_for_token(self, token):
book_url = self.get_config_value("pages", token)
book, _, url_tail = book_url.partition(':')
book_base = settings.HELP_TOKENS_BOOKS[book]
url = book_base
lang = getattr(settings, "HELP_TOKENS_LANGUAGE_CODE", None)
if lang is not None:
lang = self.get_config_value("locales", lang)
url += "/" + lang
version = getattr(settings, "HELP_TOKENS_VERSION", None)
if version is not None:
url += "/" + version
url += "/" + url_tail
return url | Find the full URL for a help token. |
def search_data_std(Channel, RunNos, RepeatNos, directoryPath='.'):
files = glob('{}/*'.format(directoryPath))
files_CorrectChannel = []
for file_ in files:
if 'CH{}'.format(Channel) in file_:
files_CorrectChannel.append(file_)
files_CorrectRunNo = []
for RunNo in RunNos:
files_match = _fnmatch.filter(
files_CorrectChannel, '*RUN*0{}_*'.format(RunNo))
for file_ in files_match:
files_CorrectRunNo.append(file_)
files_CorrectRepeatNo = []
for RepeatNo in RepeatNos:
files_match = _fnmatch.filter(
files_CorrectRunNo, '*REPEAT*0{}.*'.format(RepeatNo))
for file_ in files_match:
files_CorrectRepeatNo.append(file_)
return files_CorrectRepeatNo | Lets you find multiple datasets at once assuming they have a
filename which contains a pattern of the form:
CH<ChannelNo>_RUN00...<RunNo>_REPEAT00...<RepeatNo>
Parameters
----------
Channel : int
The channel you want to load
RunNos : sequence
Sequence of run numbers you want to load
RepeatNos : sequence
Sequence of repeat numbers you want to load
directoryPath : string, optional
The path to the directory housing the data
The default is the current directory
Returns
-------
Data_filepaths : list
A list containing the filepaths to the matching files |
def multi_load_data(Channel, RunNos, RepeatNos, directoryPath='.', calcPSD=True, NPerSegmentPSD=1000000):
matching_files = search_data_std(Channel=Channel, RunNos=RunNos, RepeatNos=RepeatNos, directoryPath=directoryPath)
#data = []
#for filepath in matching_files_:
# data.append(load_data(filepath, calcPSD=calcPSD, NPerSegmentPSD=NPerSegmentPSD))
cpu_count = _cpu_count()
workerPool = _Pool(cpu_count)
load_data_partial = _partial(load_data, calcPSD=calcPSD, NPerSegmentPSD=NPerSegmentPSD)
data = workerPool.map(load_data_partial, matching_files)
workerPool.close()
workerPool.terminate()
workerPool.join()
#with _Pool(cpu_count) as workerPool:
#load_data_partial = _partial(load_data, calcPSD=calcPSD, NPerSegmentPSD=NPerSegmentPSD)
#data = workerPool.map(load_data_partial, files_CorrectRepeatNo)
return data | Lets you load multiple datasets at once assuming they have a
filename which contains a pattern of the form:
CH<ChannelNo>_RUN00...<RunNo>_REPEAT00...<RepeatNo>
Parameters
----------
Channel : int
The channel you want to load
RunNos : sequence
Sequence of run numbers you want to load
RepeatNos : sequence
Sequence of repeat numbers you want to load
directoryPath : string, optional
The path to the directory housing the data
The default is the current directory
Returns
-------
Data : list
A list containing the DataObjects that were loaded. |
def multi_load_data_custom(Channel, TraceTitle, RunNos, directoryPath='.', calcPSD=True, NPerSegmentPSD=1000000):
# files = glob('{}/*'.format(directoryPath))
# files_CorrectChannel = []
# for file_ in files:
# if 'C{}'.format(Channel) in file_:
# files_CorrectChannel.append(file_)
# files_CorrectRunNo = []
# for RunNo in RunNos:
# files_match = _fnmatch.filter(
# files_CorrectChannel, '*C{}'.format(Channel)+TraceTitle+str(RunNo).zfill(5)+'.*')
# for file_ in files_match:
# files_CorrectRunNo.append(file_)
matching_files = search_data_custom(Channel, TraceTitle, RunNos, directoryPath)
cpu_count = _cpu_count()
workerPool = _Pool(cpu_count)
# for filepath in files_CorrectRepeatNo:
# print(filepath)
# data.append(load_data(filepath))
load_data_partial = _partial(load_data, calcPSD=calcPSD, NPerSegmentPSD=NPerSegmentPSD)
data = workerPool.map(load_data_partial, matching_files)
workerPool.close()
workerPool.terminate()
workerPool.join()
return data | Lets you load multiple datasets named with the LeCroy's custom naming scheme at once.
Parameters
----------
Channel : int
The channel you want to load
TraceTitle : string
The custom trace title of the files.
RunNos : sequence
Sequence of run numbers you want to load
RepeatNos : sequence
Sequence of repeat numbers you want to load
directoryPath : string, optional
The path to the directory housing the data
The default is the current directory
Returns
-------
Data : list
A list containing the DataObjects that were loaded. |
def search_data_custom(Channel, TraceTitle, RunNos, directoryPath='.'):
files = glob('{}/*'.format(directoryPath))
files_CorrectChannel = []
for file_ in files:
if 'C{}'.format(Channel) in file_:
files_CorrectChannel.append(file_)
files_CorrectRunNo = []
for RunNo in RunNos:
files_match = _fnmatch.filter(
files_CorrectChannel, '*C{}'.format(Channel)+TraceTitle+str(RunNo).zfill(5)+'.*')
for file_ in files_match:
files_CorrectRunNo.append(file_)
print("loading the following files: {}".format(files_CorrectRunNo))
paths = files_CorrectRunNo
return paths | Lets you create a list with full file paths of the files
named with the LeCroy's custom naming scheme.
Parameters
----------
Channel : int
The channel you want to load
TraceTitle : string
The custom trace title of the files.
RunNos : sequence
Sequence of run numbers you want to load
RepeatNos : sequence
Sequence of repeat numbers you want to load
directoryPath : string, optional
The path to the directory housing the data
The default is the current directory
Returns
-------
Paths : list
A list containing the full file paths of the files you were looking for. |
def calc_temp(Data_ref, Data):
T = 300 * ((Data.A * Data_ref.Gamma) / (Data_ref.A * Data.Gamma))
Data.T = T
return T | Calculates the temperature of a data set relative to a reference.
The reference is assumed to be at 300K.
Parameters
----------
Data_ref : DataObject
Reference data set, assumed to be 300K
Data : DataObject
Data object to have the temperature calculated for
Returns
-------
T : uncertainties.ufloat
The temperature of the data set |
def calc_gamma_components(Data_ref, Data):
A_prime = Data_ref.A/Data_ref.Gamma
Gamma0 = Data.A/A_prime
delta_Gamma = Data.Gamma - Gamma0
return Gamma0, delta_Gamma | Calculates the components of Gamma (Gamma0 and delta_Gamma),
assuming that the Data_ref is uncooled data (ideally at 3mbar
for best fitting). It uses the fact that A_prime=A/Gamma0 should
be constant for a particular particle under changes in pressure
and therefore uses the reference save to calculate A_prime (assuming
the Gamma value found for the uncooled data is actually equal to Gamma0
since only collisions should be causing the damping. Therefore for
the cooled data Gamma0 should equal A/A_prime and therefore we
can extract Gamma0 and delta_Gamma.
A_prime = ConvFactor**2 * (2*k_B*T0/(pi*m))
Parameters
----------
Data_ref : DataObject
Reference data set, assumed to be 300K
Data : DataObject
Data object to have the temperature calculated for
Returns
-------
Gamma0 : uncertainties.ufloat
Damping due to the environment
delta_Gamma : uncertainties.ufloat
Damping due to other effects (e.g. feedback cooling) |
def fit_curvefit(p0, datax, datay, function, **kwargs):
pfit, pcov = \
_curve_fit(function, datax, datay, p0=p0,
epsfcn=0.0001, **kwargs)
error = []
for i in range(len(pfit)):
try:
error.append(_np.absolute(pcov[i][i])**0.5)
except:
error.append(_np.NaN)
pfit_curvefit = pfit
perr_curvefit = _np.array(error)
return pfit_curvefit, perr_curvefit | Fits the data to a function using scipy.optimise.curve_fit
Parameters
----------
p0 : array_like
initial parameters to use for fitting
datax : array_like
x data to use for fitting
datay : array_like
y data to use for fitting
function : function
funcion to be fit to the data
kwargs
keyword arguments to be passed to scipy.optimise.curve_fit
Returns
-------
pfit_curvefit : array
Optimal values for the parameters so that the sum of
the squared residuals of ydata is minimized
perr_curvefit : array
One standard deviation errors in the optimal values for
the parameters |
def moving_average(array, n=3):
ret = _np.cumsum(array, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n | Calculates the moving average of an array.
Parameters
----------
array : array
The array to have the moving average taken of
n : int
The number of points of moving average to take
Returns
-------
MovingAverageArray : array
The n-point moving average of the input array |
def take_closest(myList, myNumber):
pos = _bisect_left(myList, myNumber)
if pos == 0:
return myList[0]
if pos == len(myList):
return myList[-1]
before = myList[pos - 1]
after = myList[pos]
if after - myNumber < myNumber - before:
return after
else:
return before | Assumes myList is sorted. Returns closest value to myNumber.
If two numbers are equally close, return the smallest number.
Parameters
----------
myList : array
The list in which to find the closest value to myNumber
myNumber : float
The number to find the closest to in MyList
Returns
-------
closestValue : float
The number closest to myNumber in myList |
def _position_autocorrelation_fitting_eqn(t, Gamma, AngTrapFreq):
return _np.exp(-t*Gamma/2)* ( _np.cos(t* _np.sqrt(AngTrapFreq**2-Gamma**2/4)) + Gamma* _np.sin(t* _np.sqrt(AngTrapFreq**2-Gamma**2/4))/(2* _np.sqrt(AngTrapFreq**2-Gamma**2/4)) ) | The value of the fitting equation:
exp(-t*Gamma/2) * (cos(t* sqrt(Omega**2 - Gamma**2 /4)) + Gamma* sin(t* sqrt(Omega**2-Gamma**2 /4))/(2* sqrt(Omega**2 - Gamma**2 /4)))
[eqn 4.20 taken from DOI: DOI: 10.1007/978-1-4614-6031-2]
to be fit to the autocorrelation-exponential decay
Parameters
----------
t : float
time
Gamma : float
Big Gamma (in radians), i.e. damping
AngTrapFreq : float
Angular Trapping Frequency in Radians
Returns
-------
Value : float
The value of the fitting equation |
def PSD_fitting_eqn(A, OmegaTrap, Gamma, omega):
return A / ((OmegaTrap**2 - omega**2)**2 + omega**2 * (Gamma)**2) | The value of the fitting equation:
A / ((OmegaTrap**2 - omega**2)**2 + (omega * Gamma)**2)
to be fit to the PSD
Parameters
----------
A : float
Fitting constant A
A = γ**2*Γ_0*(2*K_b*T_0)/(π*m)
where:
γ = conversionFactor
Γ_0 = Damping factor due to environment
π = pi
OmegaTrap : float
The trapping frequency in the axis of interest
(in angular frequency)
Gamma : float
The damping factor Gamma = Γ = Γ_0 + δΓ
where:
Γ_0 = Damping factor due to environment
δΓ = extra damping due to feedback or other effects
omega : float
The angular frequency to calculate the value of the
fitting equation at
Returns
-------
Value : float
The value of the fitting equation |
def PSD_fitting_eqn_with_background(A, OmegaTrap, Gamma, FlatBackground, omega):
return A / ((OmegaTrap**2 - omega**2)**2 + omega**2 * (Gamma)**2) + FlatBackground | The value of the fitting equation:
A / ((OmegaTrap**2 - omega**2)**2 + (omega * Gamma)**2) + FlatBackground
to be fit to the PSD
Parameters
----------
A : float
Fitting constant A
A = γ**2*Γ_0*(2*K_b*T_0)/(π*m)
where:
γ = conversionFactor
Γ_0 = Damping factor due to environment
π = pi
OmegaTrap : float
The trapping frequency in the axis of interest
(in angular frequency)
Gamma : float
The damping factor Gamma = Γ = Γ_0 + δΓ
where:
Γ_0 = Damping factor due to environment
δΓ = extra damping due to feedback or other effects
FlatBackground : float
Adds a constant offset to the peak to account for a flat
noise background
omega : float
The angular frequency to calculate the value of the
fitting equation at
Returns
-------
Value : float
The value of the fitting equation |
def get_ZXY_freqs(Data, zfreq, xfreq, yfreq, bandwidth=5000):
trapfreqs = []
for freq in [zfreq, xfreq, yfreq]:
z_f_fit_lower = take_closest(Data.freqs, freq - bandwidth / 2)
z_f_fit_upper = take_closest(Data.freqs, freq + bandwidth / 2)
z_indx_fit_lower = int(_np.where(Data.freqs == z_f_fit_lower)[0][0])
z_indx_fit_upper = int(_np.where(Data.freqs == z_f_fit_upper)[0][0])
z_index_OmegaTrap = _np.where(Data.PSD == max(
Data.PSD[z_indx_fit_lower:z_indx_fit_upper]))[0][0]
# find highest point in region about guess for trap frequency
# use that as guess for trap frequency and recalculate region
# about the trap frequency
z_OmegaTrap = Data.freqs[z_index_OmegaTrap]
trapfreqs.append(z_OmegaTrap)
return trapfreqs | Determines the exact z, x and y peak frequencies from approximate
frequencies by finding the highest peak in the PSD "close to" the
approximate peak frequency. By "close to" I mean within the range:
approxFreq - bandwidth/2 to approxFreq + bandwidth/2
Parameters
----------
Data : DataObject
DataObject containing the data for which you want to determine the
z, x and y frequencies.
zfreq : float
An approximate frequency for the z peak
xfreq : float
An approximate frequency for the z peak
yfreq : float
An approximate frequency for the z peak
bandwidth : float, optional
The bandwidth around the approximate peak to look for the actual peak. The default value is 5000
Returns
-------
trapfreqs : list
List containing the trap frequencies in the following order (z, x, y) |
def IFFT_filter(Signal, SampleFreq, lowerFreq, upperFreq, PyCUDA = False):
if PyCUDA==True:
Signalfft=calc_fft_with_PyCUDA(Signal)
else:
print("starting fft")
Signalfft = scipy.fftpack.fft(Signal)
print("starting freq calc")
freqs = _np.fft.fftfreq(len(Signal)) * SampleFreq
print("starting bin zeroing")
Signalfft[_np.where(freqs < lowerFreq)] = 0
Signalfft[_np.where(freqs > upperFreq)] = 0
if PyCUDA==True:
FilteredSignal = 2 * calc_ifft_with_PyCUDA(Signalfft)
else:
print("starting ifft")
FilteredSignal = 2 * scipy.fftpack.ifft(Signalfft)
print("done")
return _np.real(FilteredSignal) | Filters data using fft -> zeroing out fft bins -> ifft
Parameters
----------
Signal : ndarray
Signal to be filtered
SampleFreq : float
Sample frequency of signal
lowerFreq : float
Lower frequency of bandpass to allow through filter
upperFreq : float
Upper frequency of bandpass to allow through filter
PyCUDA : bool, optional
If True, uses PyCUDA to accelerate the FFT and IFFT
via using your NVIDIA-GPU
If False, performs FFT and IFFT with conventional
scipy.fftpack
Returns
-------
FilteredData : ndarray
Array containing the filtered data |
def calc_fft_with_PyCUDA(Signal):
print("starting fft")
Signal = Signal.astype(_np.float32)
Signal_gpu = _gpuarray.to_gpu(Signal)
Signalfft_gpu = _gpuarray.empty(len(Signal)//2+1,_np.complex64)
plan = _Plan(Signal.shape,_np.float32,_np.complex64)
_fft(Signal_gpu, Signalfft_gpu, plan)
Signalfft = Signalfft_gpu.get() #only 2N+1 long
Signalfft = _np.hstack((Signalfft,_np.conj(_np.flipud(Signalfft[1:len(Signal)//2]))))
print("fft done")
return Signalfft | Calculates the FFT of the passed signal by using
the scikit-cuda libary which relies on PyCUDA
Parameters
----------
Signal : ndarray
Signal to be transformed into Fourier space
Returns
-------
Signalfft : ndarray
Array containing the signal's FFT |
def calc_ifft_with_PyCUDA(Signalfft):
print("starting ifft")
Signalfft = Signalfft.astype(_np.complex64)
Signalfft_gpu = _gpuarray.to_gpu(Signalfft[0:len(Signalfft)//2+1])
Signal_gpu = _gpuarray.empty(len(Signalfft),_np.float32)
plan = _Plan(len(Signalfft),_np.complex64,_np.float32)
_ifft(Signalfft_gpu, Signal_gpu, plan)
Signal = Signal_gpu.get()/(2*len(Signalfft)) #normalising as CUDA IFFT is un-normalised
print("ifft done")
return Signal | Calculates the inverse-FFT of the passed FFT-signal by
using the scikit-cuda libary which relies on PyCUDA
Parameters
----------
Signalfft : ndarray
FFT-Signal to be transformed into Real space
Returns
-------
Signal : ndarray
Array containing the ifft signal |
def butterworth_filter(Signal, SampleFreq, lowerFreq, upperFreq):
b, a = make_butterworth_b_a(lowerFreq, upperFreq, SampleFreq)
FilteredSignal = scipy.signal.filtfilt(b, a, Signal)
return _np.real(FilteredSignal) | Filters data using by constructing a 5th order butterworth
IIR filter and using scipy.signal.filtfilt, which does
phase correction after implementing the filter (as IIR
filter apply a phase change)
Parameters
----------
Signal : ndarray
Signal to be filtered
SampleFreq : float
Sample frequency of signal
lowerFreq : float
Lower frequency of bandpass to allow through filter
upperFreq : float
Upper frequency of bandpass to allow through filter
Returns
-------
FilteredData : ndarray
Array containing the filtered data |
def make_butterworth_b_a(lowcut, highcut, SampleFreq, order=5, btype='band'):
nyq = 0.5 * SampleFreq
low = lowcut / nyq
high = highcut / nyq
if btype.lower() == 'band':
b, a = scipy.signal.butter(order, [low, high], btype = btype)
elif btype.lower() == 'low':
b, a = scipy.signal.butter(order, low, btype = btype)
elif btype.lower() == 'high':
b, a = scipy.signal.butter(order, high, btype = btype)
else:
raise ValueError('Filter type unknown')
return b, a | Generates the b and a coefficients for a butterworth IIR filter.
Parameters
----------
lowcut : float
frequency of lower bandpass limit
highcut : float
frequency of higher bandpass limit
SampleFreq : float
Sample frequency of filter
order : int, optional
order of IIR filter. Is 5 by default
btype : string, optional
type of filter to make e.g. (band, low, high)
Returns
-------
b : ndarray
coefficients multiplying the current and past inputs (feedforward coefficients)
a : ndarray
coefficients multiplying the past outputs (feedback coefficients) |
def make_butterworth_bandpass_b_a(CenterFreq, bandwidth, SampleFreq, order=5, btype='band'):
lowcut = CenterFreq-bandwidth/2
highcut = CenterFreq+bandwidth/2
b, a = make_butterworth_b_a(lowcut, highcut, SampleFreq, order, btype)
return b, a | Generates the b and a coefficients for a butterworth bandpass IIR filter.
Parameters
----------
CenterFreq : float
central frequency of bandpass
bandwidth : float
width of the bandpass from centre to edge
SampleFreq : float
Sample frequency of filter
order : int, optional
order of IIR filter. Is 5 by default
btype : string, optional
type of filter to make e.g. (band, low, high)
Returns
-------
b : ndarray
coefficients multiplying the current and past inputs (feedforward coefficients)
a : ndarray
coefficients multiplying the past outputs (feedback coefficients) |
def IIR_filter_design(CentralFreq, bandwidth, transitionWidth, SampleFreq, GainStop=40, GainPass=0.01):
NyquistFreq = SampleFreq / 2
if (CentralFreq + bandwidth / 2 + transitionWidth > NyquistFreq):
raise ValueError(
"Need a higher Sample Frequency for this Central Freq, Bandwidth and transition Width")
CentralFreqNormed = CentralFreq / NyquistFreq
bandwidthNormed = bandwidth / NyquistFreq
transitionWidthNormed = transitionWidth / NyquistFreq
bandpass = [CentralFreqNormed - bandwidthNormed /
2, CentralFreqNormed + bandwidthNormed / 2]
bandstop = [CentralFreqNormed - bandwidthNormed / 2 - transitionWidthNormed,
CentralFreqNormed + bandwidthNormed / 2 + transitionWidthNormed]
print(bandpass, bandstop)
b, a = scipy.signal.iirdesign(bandpass, bandstop, GainPass, GainStop)
return b, a | Function to calculate the coefficients of an IIR filter,
IMPORTANT NOTE: make_butterworth_bandpass_b_a and make_butterworth_b_a
can produce IIR filters with higher sample rates and are prefereable
due to this.
Parameters
----------
CentralFreq : float
Central frequency of the IIR filter to be designed
bandwidth : float
The width of the passband to be created about the central frequency
transitionWidth : float
The width of the transition band between the pass-band and stop-band
SampleFreq : float
The sample frequency (rate) of the data to be filtered
GainStop : float, optional
The dB of attenuation within the stopband (i.e. outside the passband)
GainPass : float, optional
The dB attenuation inside the passband (ideally close to 0 for a bandpass filter)
Returns
-------
b : ndarray
coefficients multiplying the current and past inputs (feedforward coefficients)
a : ndarray
coefficients multiplying the past outputs (feedback coefficients) |
def multi_subplots_time(DataArray, SubSampleN=1, units='s', xlim=None, ylim=None, LabelArray=[], show_fig=True):
unit_prefix = units[:-1] # removed the last char
NumDataSets = len(DataArray)
if LabelArray == []:
LabelArray = ["DataSet {}".format(i)
for i in _np.arange(0, len(DataArray), 1)]
fig, axs = _plt.subplots(NumDataSets, 1)
for i, data in enumerate(DataArray):
axs[i].plot(unit_conversion(data.time.get_array()[::SubSampleN], unit_prefix), data.voltage[::SubSampleN],
alpha=0.8, label=LabelArray[i])
axs[i].set_xlabel("time ({})".format(units))
axs[i].grid(which="major")
axs[i].legend(loc="best")
axs[i].set_ylabel("voltage (V)")
if xlim != None:
axs[i].set_xlim(xlim)
if ylim != None:
axs[i].set_ylim(ylim)
if show_fig == True:
_plt.show()
return fig, axs | plot the time trace on multiple axes
Parameters
----------
DataArray : array-like
array of DataObject instances for which to plot the PSDs
SubSampleN : int, optional
Number of intervals between points to remove (to sub-sample data so
that you effectively have lower sample rate to make plotting easier
and quicker.
xlim : array-like, optional
2 element array specifying the lower and upper x limit for which to
plot the time signal
LabelArray : array-like, optional
array of labels for each data-set to be plotted
show_fig : bool, optional
If True runs plt.show() before returning figure
if False it just returns the figure object.
(the default is True, it shows the figure)
Returns
-------
fig : matplotlib.figure.Figure object
The figure object created
axs : list of matplotlib.axes.Axes objects
The list of axes object created |
def calc_PSD(Signal, SampleFreq, NPerSegment=1000000, window="hann"):
freqs, PSD = scipy.signal.welch(Signal, SampleFreq,
window=window, nperseg=NPerSegment)
PSD = PSD[freqs.argsort()]
freqs.sort()
return freqs, PSD | Extracts the pulse spectral density (PSD) from the data.
Parameters
----------
Signal : array-like
Array containing the signal to have the PSD calculated for
SampleFreq : float
Sample frequency of the signal array
NPerSegment : int, optional
Length of each segment used in scipy.welch
default = 1000000
window : str or tuple or array_like, optional
Desired window to use. See get_window for a list of windows
and required parameters. If window is array_like it will be
used directly as the window and its length will be used for
nperseg.
default = "hann"
Returns
-------
freqs : ndarray
Array containing the frequencies at which the PSD has been
calculated
PSD : ndarray
Array containing the value of the PSD at the corresponding
frequency value in V**2/Hz |
def calc_autocorrelation(Signal, FFT=False, PyCUDA=False):
if FFT==True:
Signal_padded = scipy.fftpack.ifftshift((Signal-_np.average(Signal))/_np.std(Signal))
n, = Signal_padded.shape
Signal_padded = _np.r_[Signal_padded[:n//2], _np.zeros_like(Signal_padded), Signal_padded[n//2:]]
if PyCUDA==True:
f = calc_fft_with_PyCUDA(Signal_padded)
else:
f = scipy.fftpack.fft(Signal_padded)
p = _np.absolute(f)**2
if PyCUDA==True:
autocorr = calc_ifft_with_PyCUDA(p)
else:
autocorr = scipy.fftpack.ifft(p)
return _np.real(autocorr)[:n//2]/(_np.arange(n//2)[::-1]+n//2)
else:
Signal = Signal - _np.mean(Signal)
autocorr = scipy.signal.correlate(Signal, Signal, mode='full')
return autocorr[autocorr.size//2:]/autocorr[autocorr.size//2] | Calculates the autocorrelation from a given Signal via using
Parameters
----------
Signal : array-like
Array containing the signal to have the autocorrelation calculated for
FFT : optional, bool
Uses FFT to accelerate autocorrelation calculation, but assumes certain
certain periodicity on the signal to autocorrelate. Zero-padding is added
to account for this periodicity assumption.
PyCUDA : bool, optional
If True, uses PyCUDA to accelerate the FFT and IFFT
via using your NVIDIA-GPU
If False, performs FFT and IFFT with conventional
scipy.fftpack
Returns
-------
Autocorrelation : ndarray
Array containing the value of the autocorrelation evaluated
at the corresponding amount of shifted array-index. |
def _GetRealImagArray(Array):
ImagArray = _np.array([num.imag for num in Array])
RealArray = _np.array([num.real for num in Array])
return RealArray, ImagArray | Returns the real and imaginary components of each element in an array and returns them in 2 resulting arrays.
Parameters
----------
Array : ndarray
Input array
Returns
-------
RealArray : ndarray
The real components of the input array
ImagArray : ndarray
The imaginary components of the input array |
def _GetComplexConjugateArray(Array):
ConjArray = _np.array([num.conj() for num in Array])
return ConjArray | Calculates the complex conjugate of each element in an array and returns the resulting array.
Parameters
----------
Array : ndarray
Input array
Returns
-------
ConjArray : ndarray
The complex conjugate of the input array. |
def fm_discriminator(Signal):
S_analytic = _hilbert(Signal)
S_analytic_star = _GetComplexConjugateArray(S_analytic)
S_analytic_hat = S_analytic[1:] * S_analytic_star[:-1]
R, I = _GetRealImagArray(S_analytic_hat)
fmDiscriminator = _np.arctan2(I, R)
return fmDiscriminator | Calculates the digital FM discriminator from a real-valued time signal.
Parameters
----------
Signal : array-like
A real-valued time signal
Returns
-------
fmDiscriminator : array-like
The digital FM discriminator of the argument signal |
def _is_this_a_collision(ArgList):
value, mean_fmd, tolerance = ArgList
if not _approx_equal(mean_fmd, value, tolerance):
return True
else:
return False | Detects if a particular point is during collision after effect (i.e. a phase shift) or not.
Parameters
----------
ArgList : array_like
Contains the following elements:
value : float
value of the FM discriminator
mean_fmd : float
the mean value of the FM discriminator
tolerance : float
The tolerance in percentage that it must be away from the mean value for it
to be counted as a collision event.
Returns
-------
is_this_a_collision : bool
True if this is a collision event, false if not. |
def find_collisions(Signal, tolerance=50):
fmd = fm_discriminator(Signal)
mean_fmd = _np.mean(fmd)
Collisions = [_is_this_a_collision(
[value, mean_fmd, tolerance]) for value in fmd]
return Collisions | Finds collision events in the signal from the shift in phase of the signal.
Parameters
----------
Signal : array_like
Array containing the values of the signal of interest containing a single frequency.
tolerance : float
Percentage tolerance, if the value of the FM Discriminator varies from the mean by this
percentage it is counted as being during a collision event (or the aftermath of an event).
Returns
-------
Collisions : ndarray
Array of booleans, true if during a collision event, false otherwise. |
def count_collisions(Collisions):
CollisionCount = 0
CollisionIndicies = []
lastval = True
for i, val in enumerate(Collisions):
if val == True and lastval == False:
CollisionIndicies.append(i)
CollisionCount += 1
lastval = val
return CollisionCount, CollisionIndicies | Counts the number of unique collisions and gets the collision index.
Parameters
----------
Collisions : array_like
Array of booleans, containing true if during a collision event, false otherwise.
Returns
-------
CollisionCount : int
Number of unique collisions
CollisionIndicies : list
Indicies of collision occurance |
def parse_orgtable(lines):
def parseline(l):
w = l.split('|')[1:-1]
return [wi.strip() for wi in w]
columns = parseline(lines[0])
data = []
for line in lines[2:]:
data.append(map(str, parseline(line)))
dataframe = _pd.DataFrame(data=data, columns=columns)
dataframe.set_index("RunNo")
return dataframe | Parse an org-table (input as a list of strings split by newline)
into a Pandas data frame.
Parameters
----------
lines : string
an org-table input as a list of strings split by newline
Returns
-------
dataframe : pandas.DataFrame
A data frame containing the org-table's data |
def steady_state_potential(xdata,HistBins=100):
import numpy as _np
pops=_np.histogram(xdata,HistBins)[0]
bins=_np.histogram(xdata,HistBins)[1]
bins=bins[0:-1]
bins=bins+_np.mean(_np.diff(bins))
#normalise pops
pops=pops/float(_np.sum(pops))
return bins,-_np.log(pops) | Calculates the steady state potential. Used in
fit_radius_from_potentials.
Parameters
----------
xdata : ndarray
Position data for a degree of freedom
HistBins : int
Number of bins to use for histogram
of xdata. Number of position points
at which the potential is calculated.
Returns
-------
position : ndarray
positions at which potential has been
calculated
potential : ndarray
value of potential at the positions above |
def dynamical_potential(xdata, dt, order=3):
import numpy as _np
adata = calc_acceleration(xdata, dt)
xdata = xdata[2:] # removes first 2 values as differentiating twice means
# we have acceleration[n] corresponds to position[n-2]
z=_np.polyfit(xdata,adata,order)
p=_np.poly1d(z)
spring_pot=_np.polyint(p)
return -spring_pot | Computes potential from spring function
Parameters
----------
xdata : ndarray
Position data for a degree of freedom,
at which to calculate potential
dt : float
time between measurements
order : int
order of polynomial to fit
Returns
-------
Potential : ndarray
valued of potential at positions in
xdata |
def calc_acceleration(xdata, dt):
acceleration = _np.diff(_np.diff(xdata))/dt**2
return acceleration | Calculates the acceleration from the position
Parameters
----------
xdata : ndarray
Position data
dt : float
time between measurements
Returns
-------
acceleration : ndarray
values of acceleration from position
2 to N. |
def make_dynamical_potential_func(kBT_Gamma, density, SpringPotnlFunc):
def PotentialFunc(xdata, Radius):
"""
calculates the potential given the position (in volts)
and the radius of the particle.
Parameters
----------
xdata : ndarray
Positon data (in volts)
Radius : float
Radius in units of nm
Returns
-------
Potential : ndarray
Dynamical Spring Potential at positions given by xdata
"""
mass = ((4/3)*pi*((Radius*10**-9)**3))*density
yfit=(kBT_Gamma/mass)
Y = yfit*SpringPotnlFunc(xdata)
return Y
return PotentialFunc | Creates the function that calculates the potential given
the position (in volts) and the radius of the particle.
Parameters
----------
kBT_Gamma : float
Value of kB*T/Gamma
density : float
density of the nanoparticle
SpringPotnlFunc : function
Function which takes the value of position (in volts)
and returns the spring potential
Returns
-------
PotentialFunc : function
function that calculates the potential given
the position (in volts) and the radius of the
particle. |
def calc_z0_and_conv_factor_from_ratio_of_harmonics(z, z2, NA=0.999):
V1 = calc_mean_amp(z)
V2 = calc_mean_amp(z2)
ratio = V2/V1
beta = 4*ratio
laserWavelength = 1550e-9 # in m
k0 = (2*pi)/(laserWavelength)
WaistSize = laserWavelength/(pi*NA)
Zr = pi*WaistSize**2/laserWavelength
z0 = beta/(k0 - 1/Zr)
ConvFactor = V1/z0
T0 = 300
return z0, ConvFactor | Calculates the Conversion Factor and physical amplitude of motion in nms
by comparison of the ratio of the heights of the z signal and
second harmonic of z.
Parameters
----------
z : ndarray
array containing z signal in volts
z2 : ndarray
array containing second harmonic of z signal in volts
NA : float
NA of mirror used in experiment
Returns
-------
z0 : float
Physical average amplitude of motion in nms
ConvFactor : float
Conversion Factor between volts and nms |
def calc_mass_from_z0(z0, w0):
T0 = 300
mFromEquipartition = Boltzmann*T0/(w0**2 * z0**2)
return mFromEquipartition | Calculates the mass of the particle using the equipartition
from the angular frequency of the z signal and the average
amplitude of the z signal in nms.
Parameters
----------
z0 : float
Physical average amplitude of motion in nms
w0 : float
Angular Frequency of z motion
Returns
-------
mass : float
mass in kgs |
def calc_mass_from_fit_and_conv_factor(A, Damping, ConvFactor):
T0 = 300
mFromA = 2*Boltzmann*T0/(pi*A) * ConvFactor**2 * Damping
return mFromA | Calculates mass from the A parameter from fitting, the damping from
fitting in angular units and the Conversion factor calculated from
comparing the ratio of the z signal and first harmonic of z.
Parameters
----------
A : float
A factor calculated from fitting
Damping : float
damping in radians/second calcualted from fitting
ConvFactor : float
conversion factor between volts and nms
Returns
-------
mass : float
mass in kgs |
def get_time_slice(time, z, zdot=None, timeStart=None, timeEnd=None):
if timeStart == None:
timeStart = time[0]
if timeEnd == None:
timeEnd = time[-1]
StartIndex = _np.where(time == take_closest(time, timeStart))[0][0]
EndIndex = _np.where(time == take_closest(time, timeEnd))[0][0]
time_sliced = time[StartIndex:EndIndex]
z_sliced = z[StartIndex:EndIndex]
if zdot != None:
zdot_sliced = zdot[StartIndex:EndIndex]
else:
zdot_sliced = None
return time_sliced, z_sliced, zdot_sliced | Get slice of time, z and (if provided) zdot from timeStart to timeEnd.
Parameters
----------
time : ndarray
array of time values
z : ndarray
array of z values
zdot : ndarray, optional
array of zdot (velocity) values.
timeStart : float, optional
time at which to start the slice.
Defaults to beginnging of time trace
timeEnd : float, optional
time at which to end the slide.
Defaults to end of time trace
Returns
-------
time_sliced : ndarray
array of time values from timeStart to timeEnd
z_sliced : ndarray
array of z values from timeStart to timeEnd
zdot_sliced : ndarray
array of zdot values from timeStart to timeEnd.
None if zdot not provided |
def unit_conversion(array, unit_prefix, current_prefix=""):
UnitDict = {
'E': 1e18,
'P': 1e15,
'T': 1e12,
'G': 1e9,
'M': 1e6,
'k': 1e3,
'': 1,
'm': 1e-3,
'u': 1e-6,
'n': 1e-9,
'p': 1e-12,
'f': 1e-15,
'a': 1e-18,
}
try:
Desired_units = UnitDict[unit_prefix]
except KeyError:
raise ValueError("You entered {} for the unit_prefix, this is not a valid prefix".format(unit_prefix))
try:
Current_units = UnitDict[current_prefix]
except KeyError:
raise ValueError("You entered {} for the current_prefix, this is not a valid prefix".format(current_prefix))
conversion_multiplication = Current_units/Desired_units
converted_array = array*conversion_multiplication
return converted_array | Converts an array or value to of a certain
unit scale to another unit scale.
Accepted units are:
E - exa - 1e18
P - peta - 1e15
T - tera - 1e12
G - giga - 1e9
M - mega - 1e6
k - kilo - 1e3
m - milli - 1e-3
u - micro - 1e-6
n - nano - 1e-9
p - pico - 1e-12
f - femto - 1e-15
a - atto - 1e-18
Parameters
----------
array : ndarray
Array to be converted
unit_prefix : string
desired unit (metric) prefix (e.g. nm would be n, ms would be m)
current_prefix : optional, string
current prefix of units of data (assumed to be in SI units
by default (e.g. m or s)
Returns
-------
converted_array : ndarray
Array multiplied such as to be in the units specified |
def histogram_phase(phase_slices, phase, histbins=200, show_plot=False):
counts_array = _np.zeros([len(phase), histbins])
histedges = [phase_slices.min(), phase_slices.max()]
for i, phase_slice in enumerate(phase_slices): # for each value of phase
counts, bin_edges = _np.histogram(phase_slice, bins=histbins, range=histedges) # histogram the position distribution at that phase
counts_array[i] = counts
counts_array = _np.array(counts_array)
counts_array_transposed = _np.transpose(counts_array).astype(float)
if show_plot == True:
fig = _plt.figure(figsize=(12, 6))
ax = fig.add_subplot(111)
ax.set_title('Phase Distribution')
ax.set_xlabel("phase (°)")
ax.set_ylabel("x")
_plt.imshow(counts_array_transposed, cmap='hot', interpolation='nearest', extent=[phase[0], phase[-1], histedges[0], histedges[1]])
ax.set_aspect('auto')
_plt.show()
return counts_array_transposed, bin_edges | histograms the phase slices such as to build a histogram of the position
distribution at each phase value.
Parameters
----------
phase_slices : ndarray
2d array containing slices from many oscillations at each phase
phase : ndarray
1d array of phases corresponding to slices
histbins : int, optional (default=200)
number of bins to use in histogramming data
show_plot : bool, optional (default=False)
if true plots and shows the heatmap of the
phase against the positon distribution
Returns
-------
counts_array : ndarray
2d array containing the number of counts varying with
phase and position.
bin_edges : ndarray
positions of bin edges |
def get_wigner(z, freq, sample_freq, histbins=200, show_plot=False):
phase, phase_slices = extract_slices(z, freq, sample_freq, show_plot=False)
counts_array, bin_edges = histogram_phase(phase_slices, phase, histbins, show_plot=show_plot)
diff = bin_edges[1] - bin_edges[0]
bin_centres = bin_edges[:-1] + diff
iradon_output = _iradon_sart(counts_array, theta=phase)
#_plt.imshow(iradon_output, extent=[bin_centres[0], bin_centres[-1], bin_centres[0], bin_centres[-1]])
#_plt.show()
return iradon_output, bin_centres | Calculates an approximation to the wigner quasi-probability distribution
by splitting the z position array into slices of the length of one period
of the motion. This slice is then associated with phase from -180 to 180
degrees. These slices are then histogramed in order to get a distribution
of counts of where the particle is observed at each phase. The 2d array
containing the counts varying with position and phase is then passed through
the inverse radon transformation using the Simultaneous Algebraic
Reconstruction Technique approximation from the scikit-image package.
Parameters
----------
z : ndarray
trace of z motion
freq : float
frequency of motion
sample_freq : float
sample frequency of the z array
histbins : int, optional (default=200)
number of bins to use in histogramming data for each phase
show_plot : bool, optional (default=False)
Whether or not to plot the phase distribution
Returns
-------
iradon_output : ndarray
2d array of size (histbins x histbins)
bin_centres : ndarray
positions of the bin centres |
def calc_reduced_chi_squared(y_observed, y_model, observation_error, number_of_fitted_parameters):
observed = _np.array(y_observed)
expected = _np.array(y_model)
if observed.shape != expected.shape:
raise ValueError("y_observed should have same number of elements as y_model")
residuals = (observed - expected)
z = residuals / observation_error # residuals divided by known error in measurement
chi2 = _np.sum(z**2) # chi squared value
num_of_observations = len(observed)
v = num_of_observations - number_of_fitted_parameters # v = number of degrees of freedom
chi2_reduced = chi2/v
return chi2_reduced | Calculates the reduced chi-squared, used to compare a model to observations. For example can be used to calculate how good a fit is by using fitted y values for y_model along with observed y values and error in those y values. Reduced chi-squared should be close to 1 for a good fit, lower than 1 suggests you are overestimating the measurement error (observation_error you entered is higher than the true error in the measurement). A value higher than 1 suggests either your model is a bad fit OR you are underestimating the error in the measurement (observation_error you entered is lower than the true error in the measurement). See https://en.wikipedia.org/wiki/Reduced_chi-squared_statistic for more detail.
Parameters
----------
y_observed : ndarray
array of measured/observed values of some variable y which you are fitting to.
y_model : ndarray
array of y values predicted by your model/fit (predicted y values corresponding to y_observed)
observation_error : float
error in the measurements/observations of y
number_of_fitted_parameters : float
number of parameters in your model
Returns
-------
chi2_reduced : float
reduced chi-squared parameter |
def get_time_data(self, timeStart=None, timeEnd=None):
if timeStart == None:
timeStart = self.timeStart
if timeEnd == None:
timeEnd = self.timeEnd
time = self.time.get_array()
StartIndex = _np.where(time == take_closest(time, timeStart))[0][0]
EndIndex = _np.where(time == take_closest(time, timeEnd))[0][0]
if EndIndex == len(time) - 1:
EndIndex = EndIndex + 1 # so that it does not remove the last element
return time[StartIndex:EndIndex], self.voltage[StartIndex:EndIndex] | Gets the time and voltage data.
Parameters
----------
timeStart : float, optional
The time get data from.
By default it uses the first time point
timeEnd : float, optional
The time to finish getting data from.
By default it uses the last time point
Returns
-------
time : ndarray
array containing the value of time (in seconds) at which the
voltage is sampled
voltage : ndarray
array containing the sampled voltages |
def plot_time_data(self, timeStart=None, timeEnd=None, units='s', show_fig=True):
unit_prefix = units[:-1] # removed the last char
if timeStart == None:
timeStart = self.timeStart
if timeEnd == None:
timeEnd = self.timeEnd
time = self.time.get_array()
StartIndex = _np.where(time == take_closest(time, timeStart))[0][0]
EndIndex = _np.where(time == take_closest(time, timeEnd))[0][0]
fig = _plt.figure(figsize=properties['default_fig_size'])
ax = fig.add_subplot(111)
ax.plot(unit_conversion(time[StartIndex:EndIndex], unit_prefix),
self.voltage[StartIndex:EndIndex])
ax.set_xlabel("time ({})".format(units))
ax.set_ylabel("voltage (V)")
ax.set_xlim([timeStart, timeEnd])
if show_fig == True:
_plt.show()
return fig, ax | plot time data against voltage data.
Parameters
----------
timeStart : float, optional
The time to start plotting from.
By default it uses the first time point
timeEnd : float, optional
The time to finish plotting at.
By default it uses the last time point
units : string, optional
units of time to plot on the x axis - defaults to s
show_fig : bool, optional
If True runs plt.show() before returning figure
if False it just returns the figure object.
(the default is True, it shows the figure)
Returns
-------
fig : matplotlib.figure.Figure object
The figure object created
ax : matplotlib.axes.Axes object
The subplot object created |
def get_PSD(self, NPerSegment=1000000, window="hann", timeStart=None, timeEnd=None, override=False):
if timeStart == None and timeEnd == None:
freqs, PSD = calc_PSD(self.voltage, self.SampleFreq, NPerSegment=NPerSegment)
self.PSD = PSD
self.freqs = freqs
else:
if timeStart == None:
timeStart = self.timeStart
if timeEnd == None:
timeEnd = self.timeEnd
time = self.time.get_array()
StartIndex = _np.where(time == take_closest(time, timeStart))[0][0]
EndIndex = _np.where(time == take_closest(time, timeEnd))[0][0]
if EndIndex == len(time) - 1:
EndIndex = EndIndex + 1 # so that it does not remove the last element
freqs, PSD = calc_PSD(self.voltage[StartIndex:EndIndex], self.SampleFreq, NPerSegment=NPerSegment)
if override == True:
self.freqs = freqs
self.PSD = PSD
return freqs, PSD | Extracts the power spectral density (PSD) from the data.
Parameters
----------
NPerSegment : int, optional
Length of each segment used in scipy.welch
default = 1000000
window : str or tuple or array_like, optional
Desired window to use. See get_window for a list of windows
and required parameters. If window is array_like it will be
used directly as the window and its length will be used for
nperseg.
default = "hann"
Returns
-------
freqs : ndarray
Array containing the frequencies at which the PSD has been
calculated
PSD : ndarray
Array containing the value of the PSD at the corresponding
frequency value in V**2/Hz |
def plot_PSD(self, xlim=None, units="kHz", show_fig=True, timeStart=None, timeEnd=None, *args, **kwargs):
# self.get_PSD()
if timeStart == None and timeEnd == None:
freqs = self.freqs
PSD = self.PSD
else:
freqs, PSD = self.get_PSD(timeStart=timeStart, timeEnd=timeEnd)
unit_prefix = units[:-2]
if xlim == None:
xlim = [0, unit_conversion(self.SampleFreq/2, unit_prefix)]
fig = _plt.figure(figsize=properties['default_fig_size'])
ax = fig.add_subplot(111)
ax.semilogy(unit_conversion(freqs, unit_prefix), PSD, *args, **kwargs)
ax.set_xlabel("Frequency ({})".format(units))
ax.set_xlim(xlim)
ax.grid(which="major")
ax.set_ylabel("$S_{xx}$ ($V^2/Hz$)")
if show_fig == True:
_plt.show()
return fig, ax | plot the pulse spectral density.
Parameters
----------
xlim : array_like, optional
The x limits of the plotted PSD [LowerLimit, UpperLimit]
Default value is [0, SampleFreq/2]
units : string, optional
Units of frequency to plot on the x axis - defaults to kHz
show_fig : bool, optional
If True runs plt.show() before returning figure
if False it just returns the figure object.
(the default is True, it shows the figure)
Returns
-------
fig : matplotlib.figure.Figure object
The figure object created
ax : matplotlib.axes.Axes object
The subplot object created |
def calc_area_under_PSD(self, lowerFreq, upperFreq):
Freq_startAreaPSD = take_closest(self.freqs, lowerFreq)
index_startAreaPSD = int(_np.where(self.freqs == Freq_startAreaPSD)[0][0])
Freq_endAreaPSD = take_closest(self.freqs, upperFreq)
index_endAreaPSD = int(_np.where(self.freqs == Freq_endAreaPSD)[0][0])
AreaUnderPSD = sum(self.PSD[index_startAreaPSD: index_endAreaPSD])
return AreaUnderPSD | Sums the area under the PSD from lowerFreq to upperFreq.
Parameters
----------
lowerFreq : float
The lower limit of frequency to sum from
upperFreq : float
The upper limit of frequency to sum to
Returns
-------
AreaUnderPSD : float
The area under the PSD from lowerFreq to upperFreq |
def extract_parameters(self, P_mbar, P_Error, method="chang"):
[R, M, ConvFactor], [RErr, MErr, ConvFactorErr] = \
extract_parameters(P_mbar, P_Error,
self.A.n, self.A.std_dev,
self.Gamma.n, self.Gamma.std_dev,
method = method)
self.Radius = _uncertainties.ufloat(R, RErr)
self.Mass = _uncertainties.ufloat(M, MErr)
self.ConvFactor = _uncertainties.ufloat(ConvFactor, ConvFactorErr)
return self.Radius, self.Mass, self.ConvFactor | Extracts the Radius, mass and Conversion factor for a particle.
Parameters
----------
P_mbar : float
The pressure in mbar when the data was taken.
P_Error : float
The error in the pressure value (as a decimal e.g. 15% = 0.15)
Returns
-------
Radius : uncertainties.ufloat
The radius of the particle in m
Mass : uncertainties.ufloat
The mass of the particle in kg
ConvFactor : uncertainties.ufloat
The conversion factor between volts/m |
def get_value(self, ColumnName, RunNo):
Value = float(self.ORGTableData[self.ORGTableData.RunNo == '{}'.format(
RunNo)][ColumnName])
return Value | Retreives the value of the collumn named ColumnName associated
with a particular run number.
Parameters
----------
ColumnName : string
The name of the desired org-mode table's collumn
RunNo : int
The run number for which to retreive the pressure value
Returns
-------
Value : float
The value for the column's name and associated run number |
def steady_state_potential(xdata,HistBins=100):
import numpy as np
pops=np.histogram(xdata,HistBins)[0]
bins=np.histogram(xdata,HistBins)[1]
bins=bins[0:-1]
bins=bins+np.mean(np.diff(bins))
#normalise pops
pops=pops/float(np.sum(pops))
return bins,-np.log(pops) | Calculates the steady state potential.
Parameters
----------
xdata : ndarray
Position data for a degree of freedom
HistBins : int
Number of bins to use for histogram
of xdata. Number of position points
at which the potential is calculated.
Returns
-------
position : ndarray
positions at which potential has been
calculated
potential : ndarray
value of potential at the positions above |
def dynamical_potential(xdata, dt, order=3):
import numpy as np
adata = CalcAcceleration(xdata, dt)
xdata = xdata[2:] # removes first 2 values as differentiating twice means
# we have acceleration[n] corresponds to position[n-2]
z=np.polyfit(xdata,adata,order)
p=np.poly1d(z)
spring_pot=np.polyint(p)
return -spring_pot | Computes potential from spring function
Parameters
----------
xdata : ndarray
Position data for a degree of freedom,
at which to calculate potential
dt : float
time between measurements
order : int
order of polynomial to fit
Returns
-------
Potential : ndarray
valued of potential at positions in
xdata |
def CalcAcceleration(xdata, dt):
acceleration = np.diff(np.diff(xdata))/dt**2
return acceleration | Calculates the acceleration from the position
Parameters
----------
xdata : ndarray
Position data
dt : float
time between measurements
Returns
-------
acceleration : ndarray
values of acceleration from position
2 to N. |
def MakeDynamicPotentialFunc(kBT_Gamma, density, SpringPotnlFunc):
def PotentialFunc(xdata, Radius):
"""
calculates the potential given the position (in volts)
and the radius of the particle.
Parameters
----------
xdata : ndarray
Positon data (in volts)
Radius : float
Radius in units of nm
Returns
-------
Potential : ndarray
Dynamical Spring Potential at positions given by xdata
"""
mass = ((4/3)*np.pi*((Radius*10**-9)**3))*density
yfit=(kBT_Gamma/mass)
Y = yfit*SpringPotnlFunc(xdata)
return Y
return PotentialFunc | Creates the function that calculates the potential given
the position (in volts) and the radius of the particle.
Parameters
----------
kBT_Gamma : float
Value of kB*T/Gamma
density : float
density of the nanoparticle
SpringPotnlFunc : function
Function which takes the value of position (in volts)
and returns the spring potential
Returns
-------
PotentialFunc : function
function that calculates the potential given
the position (in volts) and the radius of the
particle. |
def finished(finished_status,
update_interval,
status_key,
edit_at_key):
return {
status_key: {"$gte": finished_status},
edit_at_key: {
"$gte": x_seconds_before_now(update_interval),
},
} | Create dict query for pymongo that getting all finished task.
:param finished_status: int, status code that greater or equal than this
will be considered as finished.
:param update_interval: int, the record will be updated every x seconds.
:param status_key: status code field key, support dot notation.
:param edit_at_key: edit_at time field key, support dot notation.
:return: dict, a pymongo filter.
**中文文档**
状态码大于某个值, 并且, 更新时间在最近一段时间以内. |
def unfinished(finished_status,
update_interval,
status_key,
edit_at_key):
return {
"$or": [
{status_key: {"$lt": finished_status}},
{edit_at_key: {"$lt": x_seconds_before_now(update_interval)}},
]
} | Create dict query for pymongo that getting all unfinished task.
:param finished_status: int, status code that less than this
will be considered as unfinished.
:param update_interval: int, the record will be updated every x seconds.
:param status_key: status code field key, support dot notation.
:param edit_at_key: edit_at time field key, support dot notation.
:return: dict, a pymongo filter.
**中文文档**
状态码小于某个值, 或者, 现在距离更新时间已经超过一定阈值. |
def getCommandLine(self):
commandLine = self.precursor + self.sep if self.precursor else ''
commandLine += self.cd + ' ' + self.path + self.sep if self.path else ''
commandLine += PosixCommand.getCommandLine(self)
return commandLine | Insert the precursor and change directory commands |
def _policy_psets(policy_instances):
if len(policy_instances) == 0:
# Special case: find any permission sets that don't have
# associated policy instances.
return PermissionSet.objects.filter(policyinstance__isnull=True)
else:
return PermissionSet.objects.filter(
policyinstance__policy__in=policy_instances).distinct() | Find all permission sets making use of all of a list of policy_instances.
The input is an array of policy instances. |
def _get_permission_set_tree(user):
if hasattr(user, CACHED_PSET_PROPERTY_KEY):
return getattr(user, CACHED_PSET_PROPERTY_KEY)
if user.is_authenticated():
try:
return user.permissionset.first().tree()
except AttributeError:
raise ObjectDoesNotExist
return PermissionSet.objects.get(anonymous_user=True).tree() | Helper to return cached permission set tree from user instance if set, else
generates and returns analyzed permission set tree. Does not cache set
automatically, that must be done explicitely. |
def ensure_permission_set_tree_cached(user):
if hasattr(user, CACHED_PSET_PROPERTY_KEY):
return
try:
setattr(
user, CACHED_PSET_PROPERTY_KEY, _get_permission_set_tree(user))
except ObjectDoesNotExist: # No permission set
pass | Helper to cache permission set tree on user instance |
def clear_user_policies(user):
if user is None:
try:
pset = PermissionSet.objects.get(anonymous_user=True)
pset.anonymous_user = False
pset.save()
except ObjectDoesNotExist:
return
else:
pset = user.permissionset.first()
if pset:
pset.refresh()
if user is not None:
pset.users.remove(user)
if pset.users.count() == 0 and not pset.anonymous_user:
pset.delete() | Remove all policies assigned to a user (or the anonymous user if
``user`` is ``None``). |
def assign_user_policies(user, *policies_roles):
clear_user_policies(user)
pset = PermissionSet.objects.by_policies_and_roles(policies_roles)
pset.refresh()
if user is None:
pset.anonymous_user = True
else:
pset.users.add(user)
pset.save()
cache.set(user_cache_key(user), None) | Assign a sequence of policies to a user (or the anonymous user is
``user`` is ``None``). (Also installed as ``assign_policies``
method on ``User`` model. |
def user_assigned_policies(user):
key = user_cache_key(user)
cached = cache.get(key)
if cached is not None:
return cached
if user is None:
pset = PermissionSet.objects.filter(anonymous_user=True).first()
else:
pset = user.permissionset.first()
if pset is None:
return []
res = []
skip_role_policies = False
skip_role = None
skip_role_variables = None
for pi in pset.policyinstance_set.select_related('policy', 'role'):
if skip_role_policies:
if pi.role == skip_role and pi.variables == skip_role_variables:
continue
else:
skip_role_policies = False
if pi.role:
res.append(pi.role)
skip_role = pi.role
skip_role_variables = pi.variables
skip_role_policies = True
else:
if pi.variables != '{}':
res.append((pi.policy, json.loads(pi.variables)))
else:
res.append(pi.policy)
cache.set(key, res)
return res | Return sequence of policies assigned to a user (or the anonymous
user is ``user`` is ``None``). (Also installed as
``assigned_policies`` method on ``User`` model. |
def parsed(self):
if not self._parsed:
self._parsed = json.loads(self.content)
return self._parsed | Get the JSON dictionary object which represents the content.
This property is cached and only parses the content once. |
def cleanup_logger(self):
self.log_handler.close()
self.log.removeHandler(self.log_handler) | Clean up logger to close out file handles.
After this is called, writing to self.log will get logs ending up
getting discarded. |
def update_configs(self, release):
git_repo = release['git_repo']
git_cache = release['git_cache']
if not os.path.isdir(git_cache):
self.call(['git', 'clone', '--mirror', git_repo, git_cache])
else:
self.call(['git', 'fetch', '--all', '--prune'], cwd=git_cache)
git_dir = release['git_dir'] = os.path.join(release['tmp_dir'],
os.path.basename(git_repo))
self.call(['git', 'clone', '-b', release['git_branch'],
git_cache, git_dir])
if release['delete_repo_files']:
for repo_file in glob.glob(os.path.join(git_dir, '*.repo')):
self.log.info('Deleting %s' % repo_file)
os.unlink(repo_file) | Update the fedora-atomic.git repositories for a given release |
def mock_cmd(self, release, *cmd, **kwargs):
fmt = '{mock_cmd}'
if kwargs.get('new_chroot') is True:
fmt +=' --new-chroot'
fmt += ' --configdir={mock_dir}'
return self.call(fmt.format(**release).split()
+ list(cmd)) | Run a mock command in the chroot for a given release |
def init_mock(self, release):
root = '/var/lib/mock/%s' % release['mock']
if not os.path.isdir(root):
self.mock_cmd(release, '--init')
self.log.info('mock chroot initialized')
else:
if release.get('mock_clean'):
self.mock_cmd(release, '--clean')
self.mock_cmd(release, '--init')
self.log.info('mock chroot cleaned & initialized')
else:
self.mock_cmd(release, '--update')
self.log.info('mock chroot updated') | Initialize/update our mock chroot |
def generate_mock_config(self, release):
mock_tmpl = pkg_resources.resource_string(__name__, 'templates/mock.mako')
mock_dir = release['mock_dir'] = os.path.join(release['tmp_dir'], 'mock')
mock_cfg = os.path.join(release['mock_dir'], release['mock'] + '.cfg')
os.mkdir(mock_dir)
for cfg in ('site-defaults.cfg', 'logging.ini'):
os.symlink('/etc/mock/%s' % cfg, os.path.join(mock_dir, cfg))
with file(mock_cfg, 'w') as cfg:
mock_out = Template(mock_tmpl).render(**release)
self.log.debug('Writing %s:\n%s', mock_cfg, mock_out)
cfg.write(mock_out) | Dynamically generate our mock configuration |
def mock_chroot(self, release, cmd, **kwargs):
return self.mock_cmd(release, '--chroot', cmd, **kwargs) | Run a commend in the mock container for a release |
def generate_repo_files(self, release):
repo_tmpl = pkg_resources.resource_string(__name__, 'templates/repo.mako')
repo_file = os.path.join(release['git_dir'], '%s.repo' % release['repo'])
with file(repo_file, 'w') as repo:
repo_out = Template(repo_tmpl).render(**release)
self.log.debug('Writing repo file %s:\n%s', repo_file, repo_out)
repo.write(repo_out)
self.log.info('Wrote repo configuration to %s', repo_file) | Dynamically generate our yum repo configuration |
def ostree_init(self, release):
out = release['output_dir'].rstrip('/')
base = os.path.dirname(out)
if not os.path.isdir(base):
self.log.info('Creating %s', base)
os.makedirs(base, mode=0755)
if not os.path.isdir(out):
self.mock_chroot(release, release['ostree_init']) | Initialize the OSTree for a release |
def ostree_compose(self, release):
start = datetime.utcnow()
treefile = os.path.join(release['git_dir'], 'treefile.json')
cmd = release['ostree_compose'] % treefile
with file(treefile, 'w') as tree:
json.dump(release['treefile'], tree)
# Only use new_chroot for the invocation, as --clean and --new-chroot are buggy together right now
out, err, rcode = self.mock_chroot(release, cmd, new_chroot=True)
ref = None
commitid = None
for line in out.split('\n'):
if ' => ' in line:
# This line is the: ref => commitid line
line = line.replace('\n', '')
ref, _, commitid = line.partition(' => ')
self.log.info('rpm-ostree compose complete (%s), ref %s, commitid %s',
datetime.utcnow() - start, ref, commitid)
return ref, commitid | Compose the OSTree in the mock container |
def update_ostree_summary(self, release):
self.log.info('Updating the ostree summary for %s', release['name'])
self.mock_chroot(release, release['ostree_summary'])
return os.path.join(release['output_dir'], 'summary') | Update the ostree summary file and return a path to it |
def sync_in(self, release):
tree = release['canonical_dir']
if os.path.exists(tree) and release.get('rsync_in_objs'):
out = release['output_dir']
if not os.path.isdir(out):
self.log.info('Creating %s', out)
os.makedirs(out)
self.call(release['rsync_in_objs'])
self.call(release['rsync_in_rest']) | Sync the canonical repo to our local working directory |
def sync_out(self, release):
if release.get('rsync_out_objs'):
tree = release['canonical_dir']
if not os.path.isdir(tree):
self.log.info('Creating %s', tree)
os.makedirs(tree)
self.call(release['rsync_out_objs'])
self.call(release['rsync_out_rest']) | Sync our tree to the canonical location |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.