text
stringlengths
0
828
""""""Sets plot labels, according to predefined options
:param name: The type of plot to create labels for. Options: calibration, tuning, anything else labels for spike counts
:type name: str
""""""
if name == ""calibration"":
self.setWindowTitle(""Calibration Curve"")
self.setTitle(""Calibration Curve"")
self.setLabel('bottom', ""Frequency"", units='Hz')
self.setLabel('left', 'Recorded Intensity (dB SPL)')
elif name == ""tuning"":
self.setWindowTitle(""Tuning Curve"")
self.setTitle(""Tuning Curve"")
self.setLabel('bottom', ""Frequency"", units=""Hz"")
self.setLabel('left', ""Spike Count (mean)"")
else:
self.setWindowTitle(""Spike Counts"")
self.setTitle(""Spike Counts"")
self.setLabel('bottom', ""Test Number"", units='')
self.setLabel('left', ""Spike Count (mean)"", units='')"
42,"def loadCurve(data, groups, thresholds, absvals, fs, xlabels):
""""""Accepts a data set from a whole test, averages reps and re-creates the
progress plot as the same as it was during live plotting. Number of thresholds
must match the size of the channel dimension""""""
xlims = (xlabels[0], xlabels[-1])
pw = ProgressWidget(groups, xlims)
spike_counts = []
# skip control
for itrace in range(data.shape[0]):
count = 0
for ichan in range(data.shape[2]):
flat_reps = data[itrace,:,ichan,:].flatten()
count += len(spikestats.spike_times(flat_reps, thresholds[ichan], fs, absvals[ichan]))
spike_counts.append(count/(data.shape[1]*data.shape[2])) #mean spikes per rep
i = 0
for g in groups:
for x in xlabels:
pw.setPoint(x, g, spike_counts[i])
i +=1
return pw"
43,"def setBins(self, bins):
""""""Sets the bin centers (x values)
:param bins: time bin centers
:type bins: numpy.ndarray
""""""
self._bins = bins
self._counts = np.zeros_like(self._bins)
bar_width = bins[0]*1.5
self.histo.setOpts(x=bins, height=self._counts, width=bar_width)
self.setXlim((0, bins[-1]))"
44,"def clearData(self):
""""""Clears all histograms (keeps bins)""""""
self._counts = np.zeros_like(self._bins)
self.histo.setOpts(height=self._counts)"
45,"def appendData(self, bins, repnum=None):
""""""Increases the values at bins (indexes)
:param bins: bin center values to increment counts for, to increment a time bin more than once include multiple items in list with that bin center value
:type bins: numpy.ndarray
""""""
# only if the last sample was above threshold, but last-1 one wasn't
bins[bins >= len(self._counts)] = len(self._counts) -1
bin_totals = np.bincount(bins)
self._counts[:len(bin_totals)] += bin_totals
self.histo.setOpts(height=np.array(self._counts))"
46,"def processData(self, times, response, test_num, trace_num, rep_num):
""""""Calulate spike times from raw response data""""""
# invert polarity affects spike counting
response = response * self._polarity
if rep_num == 0:
# reset
self.spike_counts = []
self.spike_latencies = []
self.spike_rates = []
fs = 1./(times[1] - times[0])
# process response; calculate spike times
spike_times = spikestats.spike_times(response, self._threshold, fs)
self.spike_counts.append(len(spike_times))
if len(spike_times) > 0:
self.spike_latencies.append(spike_times[0])
else:
self.spike_latencies.append(np.nan)
self.spike_rates.append(spikestats.firing_rate(spike_times, times))
binsz = self._bins[1] - self._bins[0]
response_bins = spikestats.bin_spikes(spike_times, binsz)
# self.putnotify('spikes_found', (response_bins, rep_num))
self.appendData(response_bins, rep_num)"
47,"def setSr(self, fs):
""""""Sets the samplerate of the input operation being plotted""""""
self.tracePlot.setSr(fs)
self.stimPlot.setSr(fs)"
48,"def setWindowSize(self, winsz):
""""""Sets the size of scroll window""""""