text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Add related primary keys to a Question instance. <END_TASK> <USER_TASK:> Description: def question_default_add_related_pks(self, obj): """Add related primary keys to a Question instance."""
if not hasattr(obj, '_choice_pks'): obj._choice_pks = list(obj.choices.values_list('pk', flat=True))
<SYSTEM_TASK:> Convert a Choice to a cached instance representation. <END_TASK> <USER_TASK:> Description: def choice_default_serializer(self, obj): """Convert a Choice to a cached instance representation."""
if not obj: return None self.choice_default_add_related_pks(obj) return dict(( ('id', obj.id), ('choice_text', obj.choice_text), self.field_to_json( 'PK', 'question', model=Question, pk=obj.question_id), self.field_to_json( 'PKList', 'voters', model=User, pks=obj._voter_pks) ))
<SYSTEM_TASK:> Load a Choice from the database. <END_TASK> <USER_TASK:> Description: def choice_default_loader(self, pk): """Load a Choice from the database."""
try: obj = Choice.objects.get(pk=pk) except Choice.DoesNotExist: return None else: self.choice_default_add_related_pks(obj) return obj
<SYSTEM_TASK:> Add related primary keys to a Choice instance. <END_TASK> <USER_TASK:> Description: def choice_default_add_related_pks(self, obj): """Add related primary keys to a Choice instance."""
if not hasattr(obj, '_voter_pks'): obj._voter_pks = obj.voters.values_list('pk', flat=True)
<SYSTEM_TASK:> Invalidated cached items when the Choice changes. <END_TASK> <USER_TASK:> Description: def choice_default_invalidator(self, obj): """Invalidated cached items when the Choice changes."""
invalid = [('Question', obj.question_id, True)] for pk in obj.voters.values_list('pk', flat=True): invalid.append(('User', pk, False)) return invalid
<SYSTEM_TASK:> Get the Django cache interface. <END_TASK> <USER_TASK:> Description: def cache(self): """Get the Django cache interface. This allows disabling the cache with settings.USE_DRF_INSTANCE_CACHE=False. It also delays import so that Django Debug Toolbar will record cache requests. """
if not self._cache: use_cache = getattr(settings, 'USE_DRF_INSTANCE_CACHE', True) if use_cache: from django.core.cache import cache self._cache = cache return self._cache
<SYSTEM_TASK:> Delete all versions of a cached instance. <END_TASK> <USER_TASK:> Description: def delete_all_versions(self, model_name, obj_pk): """Delete all versions of a cached instance."""
if self.cache: for version in self.versions: key = self.key_for(version, model_name, obj_pk) self.cache.delete(key)
<SYSTEM_TASK:> Convert a field to a JSON-serializable representation. <END_TASK> <USER_TASK:> Description: def field_to_json(self, type_code, key, *args, **kwargs): """Convert a field to a JSON-serializable representation."""
assert ':' not in key to_json = self.field_function(type_code, 'to_json') key_and_type = "%s:%s" % (key, type_code) json_value = to_json(*args, **kwargs) return key_and_type, json_value
<SYSTEM_TASK:> Convert a JSON-serializable representation back to a field. <END_TASK> <USER_TASK:> Description: def field_from_json(self, key_and_type, json_value): """Convert a JSON-serializable representation back to a field."""
assert ':' in key_and_type key, type_code = key_and_type.split(':', 1) from_json = self.field_function(type_code, 'from_json') value = from_json(json_value) return key, value
<SYSTEM_TASK:> Get the cached native representation for one or more objects. <END_TASK> <USER_TASK:> Description: def get_instances(self, object_specs, version=None): """Get the cached native representation for one or more objects. Keyword arguments: object_specs - A sequence of triples (model name, pk, obj): - model name - the name of the model - pk - the primary key of the instance - obj - the instance, or None to load it version - The cache version to use, or None for default To get the 'new object' representation, set pk and obj to None Return is a dictionary: key - (model name, pk) value - (native representation, pk, object or None) """
ret = dict() spec_keys = set() cache_keys = [] version = version or self.default_version # Construct all the cache keys to fetch for model_name, obj_pk, obj in object_specs: assert model_name assert obj_pk # Get cache keys to fetch obj_key = self.key_for(version, model_name, obj_pk) spec_keys.add((model_name, obj_pk, obj, obj_key)) cache_keys.append(obj_key) # Fetch the cache keys if cache_keys and self.cache: cache_vals = self.cache.get_many(cache_keys) else: cache_vals = {} # Use cached representations, or recreate cache_to_set = {} for model_name, obj_pk, obj, obj_key in spec_keys: # Load cached objects obj_val = cache_vals.get(obj_key) obj_native = json.loads(obj_val) if obj_val else None # Invalid or not set - load from database if not obj_native: if not obj: loader = self.model_function(model_name, version, 'loader') obj = loader(obj_pk) serializer = self.model_function( model_name, version, 'serializer') obj_native = serializer(obj) or {} if obj_native: cache_to_set[obj_key] = json.dumps(obj_native) # Get fields to convert keys = [key for key in obj_native.keys() if ':' in key] for key in keys: json_value = obj_native.pop(key) name, value = self.field_from_json(key, json_value) assert name not in obj_native obj_native[name] = value if obj_native: ret[(model_name, obj_pk)] = (obj_native, obj_key, obj) # Save any new cached representations if cache_to_set and self.cache: self.cache.set_many(cache_to_set) return ret
<SYSTEM_TASK:> Create or update a cached instance. <END_TASK> <USER_TASK:> Description: def update_instance( self, model_name, pk, instance=None, version=None, update_only=False): """Create or update a cached instance. Keyword arguments are: model_name - The name of the model pk - The primary key of the instance instance - The Django model instance, or None to load it versions - Version to update, or None for all update_only - If False (default), then missing cache entries will be populated and will cause follow-on invalidation. If True, then only entries already in the cache will be updated and cause follow-on invalidation. Return is a list of tuples (model name, pk, immediate) that also needs to be updated. """
versions = [version] if version else self.versions invalid = [] for version in versions: serializer = self.model_function(model_name, version, 'serializer') loader = self.model_function(model_name, version, 'loader') invalidator = self.model_function( model_name, version, 'invalidator') if serializer is None and loader is None and invalidator is None: continue if self.cache is None: continue # Try to load the instance if not instance: instance = loader(pk) if serializer: # Get current value, if in cache key = self.key_for(version, model_name, pk) current_raw = self.cache.get(key) current = json.loads(current_raw) if current_raw else None # Get new value if update_only and current_raw is None: new = None else: new = serializer(instance) deleted = not instance # If cache is invalid, update cache invalidate = (current != new) or deleted if invalidate: if deleted: self.cache.delete(key) else: self.cache.set(key, json.dumps(new)) else: invalidate = True # Invalidate upstream caches if instance and invalidate: for upstream in invalidator(instance): if isinstance(upstream, str): self.cache.delete(upstream) else: m, i, immediate = upstream if immediate: invalidate_key = self.key_for(version, m, i) self.cache.delete(invalidate_key) invalid.append((m, i, version)) return invalid
<SYSTEM_TASK:> Convert json_val to a timedelta object. <END_TASK> <USER_TASK:> Description: def field_timedelta_from_json(self, json_val): """Convert json_val to a timedelta object. json_val contains total number of seconds in the timedelta. If json_val is a string it will be converted to a float. """
if isinstance(json_val, str): return timedelta(seconds=float(json_val)) elif json_val is None: return None else: return timedelta(seconds=json_val)
<SYSTEM_TASK:> Convert timedelta to value containing total number of seconds. <END_TASK> <USER_TASK:> Description: def field_timedelta_to_json(self, td): """Convert timedelta to value containing total number of seconds. If there are fractions of a second the return value will be a string, otherwise it will be an int. """
if isinstance(td, six.string_types): td = parse_duration(td) if not td: return None if td.microseconds > 0: return str(td.total_seconds()) else: return int(td.total_seconds())
<SYSTEM_TASK:> Load a PkOnlyQueryset from a JSON dict. <END_TASK> <USER_TASK:> Description: def field_pklist_from_json(self, data): """Load a PkOnlyQueryset from a JSON dict. This uses the same format as cached_queryset_from_json """
model = get_model(data['app'], data['model']) return PkOnlyQueryset(self, model, data['pks'])
<SYSTEM_TASK:> Convert a list of primary keys to a JSON dict. <END_TASK> <USER_TASK:> Description: def field_pklist_to_json(self, model, pks): """Convert a list of primary keys to a JSON dict. This uses the same format as cached_queryset_to_json """
app_label = model._meta.app_label model_name = model._meta.model_name return { 'app': app_label, 'model': model_name, 'pks': list(pks), }
<SYSTEM_TASK:> Update the cache when an instance is deleted. <END_TASK> <USER_TASK:> Description: def post_delete_update_cache(sender, instance, **kwargs): """Update the cache when an instance is deleted."""
name = sender.__name__ if name in cached_model_names: from .tasks import update_cache_for_instance update_cache_for_instance(name, instance.pk, instance)
<SYSTEM_TASK:> Update the cache when an instance is created or modified. <END_TASK> <USER_TASK:> Description: def post_save_update_cache(sender, instance, created, raw, **kwargs): """Update the cache when an instance is created or modified."""
if raw: return name = sender.__name__ if name in cached_model_names: delay_cache = getattr(instance, '_delay_cache', False) if not delay_cache: from .tasks import update_cache_for_instance update_cache_for_instance(name, instance.pk, instance)
<SYSTEM_TASK:> Get the queryset for the action. <END_TASK> <USER_TASK:> Description: def get_queryset(self): """Get the queryset for the action. If action is read action, return a CachedQueryset Otherwise, return a Django queryset """
queryset = super(CachedViewMixin, self).get_queryset() if self.action in ('list', 'retrieve'): return CachedQueryset(self.get_queryset_cache(), queryset=queryset) else: return queryset
<SYSTEM_TASK:> Return the object the view is displaying. <END_TASK> <USER_TASK:> Description: def get_object(self, queryset=None): """ Return the object the view is displaying. Same as rest_framework.generics.GenericAPIView, but: - Failed assertions instead of deprecations """
# Determine the base queryset to use. assert queryset is None, "Passing a queryset is disabled" queryset = self.filter_queryset(self.get_queryset()) # Perform the lookup filtering. lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field lookup = self.kwargs.get(lookup_url_kwarg, None) assert lookup is not None, "Other lookup methods are disabled" filter_kwargs = {self.lookup_field: lookup} obj = self.get_object_or_404(queryset, **filter_kwargs) # May raise a permission denied self.check_object_permissions(self.request, obj) return obj
<SYSTEM_TASK:> Return an object or raise a 404. <END_TASK> <USER_TASK:> Description: def get_object_or_404(self, queryset, *filter_args, **filter_kwargs): """Return an object or raise a 404. Same as Django's standard shortcut, but make sure to raise 404 if the filter_kwargs don't match the required types. """
if isinstance(queryset, CachedQueryset): try: return queryset.get(*filter_args, **filter_kwargs) except queryset.model.DoesNotExist: raise Http404( 'No %s matches the given query.' % queryset.model) else: return get_object_or_404(queryset, *filter_args, **filter_kwargs)
<SYSTEM_TASK:> Return the primary keys as a list. <END_TASK> <USER_TASK:> Description: def values_list(self, *args, **kwargs): """Return the primary keys as a list. The only valid call is values_list('pk', flat=True) """
flat = kwargs.pop('flat', False) assert flat is True assert len(args) == 1 assert args[0] == self.model._meta.pk.name return self.pks
<SYSTEM_TASK:> Return a count of instances. <END_TASK> <USER_TASK:> Description: def count(self): """Return a count of instances."""
if self._primary_keys is None: return self.queryset.count() else: return len(self.pks)
<SYSTEM_TASK:> Return the single item from the filtered queryset. <END_TASK> <USER_TASK:> Description: def get(self, *args, **kwargs): """Return the single item from the filtered queryset."""
assert not args assert list(kwargs.keys()) == ['pk'] pk = kwargs['pk'] model_name = self.model.__name__ object_spec = (model_name, pk, None) instances = self.cache.get_instances((object_spec,)) try: model_data = instances[(model_name, pk)][0] except KeyError: raise self.model.DoesNotExist( "No match for %r with args %r, kwargs %r" % (self.model, args, kwargs)) else: return CachedModel(self.model, model_data)
<SYSTEM_TASK:> Create list of matching packages for translation engine. <END_TASK> <USER_TASK:> Description: def get_paths(cls, packages): """Create list of matching packages for translation engine."""
allowable_packages = dict((app_config.name, app_config) for app_config in apps.get_app_configs()) app_configs = [allowable_packages[p] for p in packages if p in allowable_packages] # paths of requested packages return [os.path.join(app.path, 'locale') for app in app_configs]
<SYSTEM_TASK:> Return the number of plurals for this catalog language, or 2 if no <END_TASK> <USER_TASK:> Description: def _num_plurals(self, catalogue): """ Return the number of plurals for this catalog language, or 2 if no plural string is available. """
match = re.search(r'nplurals=\s*(\d+)', self.get_plural(catalogue) or '') if match: return int(match.groups()[0]) return 2
<SYSTEM_TASK:> Populate header with correct data from top-most locale file. <END_TASK> <USER_TASK:> Description: def make_header(self, locale, catalog): """Populate header with correct data from top-most locale file."""
return { "po-revision-date": self.get_catalogue_header_value(catalog, 'PO-Revision-Date'), "mime-version": self.get_catalogue_header_value(catalog, 'MIME-Version'), "last-translator": 'Automatic <[email protected]>', "x-generator": "Python", "language": self.get_catalogue_header_value(catalog, 'Language') or locale, "lang": locale, "content-transfer-encoding": self.get_catalogue_header_value(catalog, 'Content-Transfer-Encoding'), "project-id-version": self.get_catalogue_header_value(catalog, 'Project-Id-Version'), "pot-creation-date": self.get_catalogue_header_value(catalog, 'POT-Creation-Date'), "domain": self.domain, "report-msgid-bugs-to": self.get_catalogue_header_value(catalog, 'Report-Msgid-Bugs-To'), "content-type": self.get_catalogue_header_value(catalog, 'Content-Type'), "plural-forms": self.get_plural(catalog), "language-team": self.get_catalogue_header_value(catalog, 'Language-Team') }
<SYSTEM_TASK:> Decorator for adding wait animation to long running <END_TASK> <USER_TASK:> Description: def wait(animation='elipses', text='', speed=0.2): """ Decorator for adding wait animation to long running functions. Args: animation (str, tuple): String reference to animation or tuple with custom animation. speed (float): Number of seconds each cycle of animation. Examples: >>> @animation.wait('bar') >>> def long_running_function(): >>> ... 5 seconds later ... >>> return """
def decorator(func): func.animation = animation func.speed = speed func.text = text @wraps(func) def wrapper(*args, **kwargs): animation = func.animation text = func.text if not isinstance(animation, (list, tuple)) and \ not hasattr(animations, animation): text = animation if text == '' else text animation = 'elipses' wait = Wait(animation=animation, text=text, speed=func.speed) wait.start() try: ret = func(*args, **kwargs) finally: wait.stop() sys.stdout.write('\n') return ret return wrapper return decorator
<SYSTEM_TASK:> Decorator for adding simple text wait animation to <END_TASK> <USER_TASK:> Description: def simple_wait(func): """ Decorator for adding simple text wait animation to long running functions. Examples: >>> @animation.simple_wait >>> def long_running_function(): >>> ... 5 seconds later ... >>> return """
@wraps(func) def wrapper(*args, **kwargs): wait = Wait() wait.start() try: ret = func(*args, **kwargs) finally: wait.stop() sys.stdout.write('\n') return ret return wrapper
<SYSTEM_TASK:> Return the angle of the complex argument. <END_TASK> <USER_TASK:> Description: def angle(self, deg=False): """Return the angle of the complex argument. Args: deg (bool, optional): Return angle in degrees if True, radians if False (default). Returns: angle (Timeseries): The counterclockwise angle from the positive real axis on the complex plane, with dtype as numpy.float64. """
if self.dtype.str[1] != 'c': warnings.warn('angle() is intended for complex-valued timeseries', RuntimeWarning, 1) return Timeseries(np.angle(self, deg=deg), self.tspan, self.labels)
<SYSTEM_TASK:> Interchange two axes of a Timeseries. <END_TASK> <USER_TASK:> Description: def swapaxes(self, axis1, axis2): """Interchange two axes of a Timeseries."""
if self.ndim <=1 or axis1 == axis2: return self ar = np.asarray(self).swapaxes(axis1, axis2) if axis1 != 0 and axis2 != 0: # then axis 0 is unaffected by the swap labels = self.labels[:] labels[axis1], labels[axis2] = labels[axis2], labels[axis1] return Timeseries(ar, self.tspan, labels) return ar
<SYSTEM_TASK:> Permute the dimensions of a Timeseries. <END_TASK> <USER_TASK:> Description: def transpose(self, *axes): """Permute the dimensions of a Timeseries."""
if self.ndim <= 1: return self ar = np.asarray(self).transpose(*axes) if axes[0] != 0: # then axis 0 is unaffected by the transposition newlabels = [self.labels[ax] for ax in axes] return Timeseries(ar, self.tspan, newlabels) else: return ar
<SYSTEM_TASK:> Split a timeseries into multiple sub-timeseries <END_TASK> <USER_TASK:> Description: def split(self, indices_or_sections, axis=0): """Split a timeseries into multiple sub-timeseries"""
if not isinstance(indices_or_sections, numbers.Integral): raise Error('splitting by array of indices is not yet implemented') n = indices_or_sections if self.shape[axis] % n != 0: raise ValueError("Array split doesn't result in an equal division") step = self.shape[axis] / n pieces = [] start = 0 while start < self.shape[axis]: stop = start + step ix = [slice(None)] * self.ndim ix[axis] = slice(start, stop) ix = tuple(ix) pieces.append(self[ix]) start += step return pieces
<SYSTEM_TASK:> plot Welch estimate of power spectral density, using nperseg samples per <END_TASK> <USER_TASK:> Description: def psd(ts, nperseg=1500, noverlap=1200, plot=True): """plot Welch estimate of power spectral density, using nperseg samples per segment, with noverlap samples overlap and Hamming window."""
ts = ts.squeeze() if ts.ndim is 1: ts = ts.reshape((-1, 1)) fs = (len(ts) - 1.0) / (ts.tspan[-1] - ts.tspan[0]) window = signal.hamming(nperseg, sym=False) nfft = max(256, 2**np.int(np.log2(nperseg) + 1)) freqs, pxx = signal.welch(ts, fs, window, nperseg, noverlap, nfft, detrend='linear', axis=0) # Discard estimates for freq bins that are too low for the window size. # (require two full cycles to fit within the window) index = np.nonzero(freqs >= 2.0*fs/nperseg)[0][0] if index > 0: freqs = freqs[index:] pxx = pxx[index:] # Discard estimate for last freq bin as too high for Nyquist frequency: freqs = freqs[:-1] pxx = pxx[:-1] if plot is True: _plot_psd(ts, freqs, pxx) return freqs, pxx
<SYSTEM_TASK:> forward-backward butterworth low-pass filter <END_TASK> <USER_TASK:> Description: def lowpass(ts, cutoff_hz, order=3): """forward-backward butterworth low-pass filter"""
orig_ndim = ts.ndim if ts.ndim is 1: ts = ts[:, np.newaxis] channels = ts.shape[1] fs = (len(ts) - 1.0) / (ts.tspan[-1] - ts.tspan[0]) nyq = 0.5 * fs cutoff = cutoff_hz/nyq b, a = signal.butter(order, cutoff, btype='low') if not np.all(np.abs(np.roots(a)) < 1.0): raise ValueError('Filter will not be stable with these values.') dtype = ts.dtype output = np.zeros((len(ts), channels), dtype) for i in range(channels): output[:, i] = signal.filtfilt(b, a, ts[:, i]) if orig_ndim is 1: output = output[:, 0] return Timeseries(output, ts.tspan, labels=ts.labels)
<SYSTEM_TASK:> forward-backward butterworth band-pass filter <END_TASK> <USER_TASK:> Description: def bandpass(ts, low_hz, high_hz, order=3): """forward-backward butterworth band-pass filter"""
orig_ndim = ts.ndim if ts.ndim is 1: ts = ts[:, np.newaxis] channels = ts.shape[1] fs = (len(ts) - 1.0) / (ts.tspan[-1] - ts.tspan[0]) nyq = 0.5 * fs low = low_hz/nyq high = high_hz/nyq b, a = signal.butter(order, [low, high], btype='band') if not np.all(np.abs(np.roots(a)) < 1.0): raise ValueError('Filter will not be stable with these values.') dtype = ts.dtype output = np.zeros((len(ts), channels), dtype) for i in range(channels): output[:, i] = signal.filtfilt(b, a, ts[:, i]) if orig_ndim is 1: output = output[:, 0] return Timeseries(output, ts.tspan, labels=ts.labels)
<SYSTEM_TASK:> notch filter to remove remove a particular frequency <END_TASK> <USER_TASK:> Description: def notch(ts, freq_hz, bandwidth_hz=1.0): """notch filter to remove remove a particular frequency Adapted from code by Sturla Molden """
orig_ndim = ts.ndim if ts.ndim is 1: ts = ts[:, np.newaxis] channels = ts.shape[1] fs = (len(ts) - 1.0) / (ts.tspan[-1] - ts.tspan[0]) nyq = 0.5 * fs freq = freq_hz/nyq bandwidth = bandwidth_hz/nyq R = 1.0 - 3.0*(bandwidth/2.0) K = ((1.0 - 2.0*R*np.cos(np.pi*freq) + R**2) / (2.0 - 2.0*np.cos(np.pi*freq))) b, a = np.zeros(3), np.zeros(3) a[0] = 1.0 a[1] = -2.0*R*np.cos(np.pi*freq) a[2] = R**2 b[0] = K b[1] = -2*K*np.cos(np.pi*freq) b[2] = K if not np.all(np.abs(np.roots(a)) < 1.0): raise ValueError('Filter will not be stable with these values.') dtype = ts.dtype output = np.zeros((len(ts), channels), dtype) for i in range(channels): output[:, i] = signal.filtfilt(b, a, ts[:, i]) if orig_ndim is 1: output = output[:, 0] return Timeseries(output, ts.tspan, labels=ts.labels)
<SYSTEM_TASK:> Analytic signal, using the Hilbert transform <END_TASK> <USER_TASK:> Description: def hilbert(ts): """Analytic signal, using the Hilbert transform"""
output = signal.hilbert(signal.detrend(ts, axis=0), axis=0) return Timeseries(output, ts.tspan, labels=ts.labels)
<SYSTEM_TASK:> Amplitude of the analytic signal, using the Hilbert transform <END_TASK> <USER_TASK:> Description: def hilbert_amplitude(ts): """Amplitude of the analytic signal, using the Hilbert transform"""
output = np.abs(signal.hilbert(signal.detrend(ts, axis=0), axis=0)) return Timeseries(output, ts.tspan, labels=ts.labels)
<SYSTEM_TASK:> Phase of the analytic signal, using the Hilbert transform <END_TASK> <USER_TASK:> Description: def hilbert_phase(ts): """Phase of the analytic signal, using the Hilbert transform"""
output = np.angle(signal.hilbert(signal.detrend(ts, axis=0), axis=0)) return Timeseries(output, ts.tspan, labels=ts.labels)
<SYSTEM_TASK:> Continuous wavelet transform <END_TASK> <USER_TASK:> Description: def cwt(ts, freqs=np.logspace(0, 2), wavelet=cwtmorlet, plot=True): """Continuous wavelet transform Note the full results can use a huge amount of memory at 64-bit precision Args: ts: Timeseries of m variables, shape (n, m). Assumed constant timestep. freqs: list of frequencies (in Hz) to use for the tranform. (default is 50 frequency bins logarithmic from 1Hz to 100Hz) wavelet: the wavelet to use. may be complex. see scipy.signal.wavelets plot: whether to plot time-resolved power spectrum Returns: coefs: Continuous wavelet transform output array, shape (n,len(freqs),m) """
orig_ndim = ts.ndim if ts.ndim is 1: ts = ts[:, np.newaxis] channels = ts.shape[1] fs = (len(ts) - 1.0) / (1.0*ts.tspan[-1] - ts.tspan[0]) x = signal.detrend(ts, axis=0) dtype = wavelet(fs/freqs[0], fs/freqs[0]).dtype coefs = np.zeros((len(ts), len(freqs), channels), dtype) for i in range(channels): coefs[:, :, i] = roughcwt(x[:, i], cwtmorlet, fs/freqs).T if plot: _plot_cwt(ts, coefs, freqs) if orig_ndim is 1: coefs = coefs[:, :, 0] return coefs
<SYSTEM_TASK:> For an ensemble of time series, return the set of all time intervals <END_TASK> <USER_TASK:> Description: def first_return_times(dts, c=None, d=0.0): """For an ensemble of time series, return the set of all time intervals between successive returns to value c for all instances in the ensemble. If c is not given, the default is the mean across all times and across all time series in the ensemble. Args: dts (DistTimeseries) c (float): Optional target value (default is the ensemble mean value) d (float): Optional min distance from c to be attained between returns Returns: array of time intervals (Can take the mean of these to estimate the expected first return time for the whole ensemble) """
if c is None: c = dts.mean() vmrt = distob.vectorize(analyses1.first_return_times) all_intervals = vmrt(dts, c, d) if hasattr(type(all_intervals), '__array_interface__'): return np.ravel(all_intervals) else: return np.hstack([distob.gather(ilist) for ilist in all_intervals])
<SYSTEM_TASK:> For an ensemble of oscillators, return the set of periods lengths of <END_TASK> <USER_TASK:> Description: def periods(dts, phi=0.0): """For an ensemble of oscillators, return the set of periods lengths of all successive oscillations of all oscillators. An individual oscillation is defined to start and end when the phase passes phi (by default zero) after completing a full cycle. If the timeseries of an oscillator phase begins (or ends) exactly at phi, then the first (or last) oscillation will be included. Arguments: dts (DistTimeseries): where dts.shape[1] is 1 (single output variable representing phase) and axis 2 ranges over multiple realizations of the oscillator. phi=0.0: float A single oscillation starts and ends at phase phi (by default zero). """
vperiods = distob.vectorize(analyses1.periods) all_periods = vperiods(dts, phi) if hasattr(type(all_periods), '__array_interface__'): return np.ravel(all_periods) else: return np.hstack([distob.gather(plist) for plist in all_periods])
<SYSTEM_TASK:> Compute the Hurst exponent of X. If the output H=0.5,the behavior <END_TASK> <USER_TASK:> Description: def hurst(X): """ Compute the Hurst exponent of X. If the output H=0.5,the behavior of the time-series is similar to random walk. If H<0.5, the time-series cover less "distance" than a random walk, vice verse. Parameters ---------- X list a time series Returns ------- H float Hurst exponent Notes -------- Author of this function is Xin Liu Examples -------- >>> import pyeeg >>> from numpy.random import randn >>> a = randn(4096) >>> pyeeg.hurst(a) 0.5057444 """
X = numpy.array(X) N = X.size T = numpy.arange(1, N + 1) Y = numpy.cumsum(X) Ave_T = Y / T S_T = numpy.zeros(N) R_T = numpy.zeros(N) for i in range(N): S_T[i] = numpy.std(X[:i + 1]) X_T = Y - T * Ave_T[i] R_T[i] = numpy.ptp(X_T[:i + 1]) R_S = R_T / S_T R_S = numpy.log(R_S)[1:] n = numpy.log(T)[1:] A = numpy.column_stack((n, numpy.ones(n.size))) [m, c] = numpy.linalg.lstsq(A, R_S)[0] H = m return H
<SYSTEM_TASK:> Compute power in each frequency bin specified by Band from FFT result of <END_TASK> <USER_TASK:> Description: def bin_power(X, Band, Fs): """Compute power in each frequency bin specified by Band from FFT result of X. By default, X is a real signal. Note ----- A real signal can be synthesized, thus not real. Parameters ----------- Band list boundary frequencies (in Hz) of bins. They can be unequal bins, e.g. [0.5,4,7,12,30] which are delta, theta, alpha and beta respectively. You can also use range() function of Python to generate equal bins and pass the generated list to this function. Each element of Band is a physical frequency and shall not exceed the Nyquist frequency, i.e., half of sampling frequency. X list a 1-D real time series. Fs integer the sampling rate in physical frequency Returns ------- Power list spectral power in each frequency bin. Power_ratio list spectral power in each frequency bin normalized by total power in ALL frequency bins. """
C = numpy.fft.fft(X) C = abs(C) Power = numpy.zeros(len(Band) - 1) for Freq_Index in range(0, len(Band) - 1): Freq = float(Band[Freq_Index]) Next_Freq = float(Band[Freq_Index + 1]) Power[Freq_Index] = sum( C[numpy.floor( Freq / Fs * len(X) ): numpy.floor(Next_Freq / Fs * len(X))] ) Power_Ratio = Power / sum(Power) return Power, Power_Ratio
<SYSTEM_TASK:> Compute Hjorth Fractal Dimension of a time series X, kmax <END_TASK> <USER_TASK:> Description: def hfd(X, Kmax): """ Compute Hjorth Fractal Dimension of a time series X, kmax is an HFD parameter """
L = [] x = [] N = len(X) for k in range(1, Kmax): Lk = [] for m in range(0, k): Lmk = 0 for i in range(1, int(numpy.floor((N - m) / k))): Lmk += abs(X[m + i * k] - X[m + i * k - k]) Lmk = Lmk * (N - 1) / numpy.floor((N - m) / float(k)) / k Lk.append(Lmk) L.append(numpy.log(numpy.mean(Lk))) x.append([numpy.log(float(1) / k), 1]) (p, r1, r2, s) = numpy.linalg.lstsq(x, L) return p[0]
<SYSTEM_TASK:> Compute Detrended Fluctuation Analysis from a time series X and length of <END_TASK> <USER_TASK:> Description: def dfa(X, Ave=None, L=None): """Compute Detrended Fluctuation Analysis from a time series X and length of boxes L. The first step to compute DFA is to integrate the signal. Let original series be X= [x(1), x(2), ..., x(N)]. The integrated signal Y = [y(1), y(2), ..., y(N)] is obtained as follows y(k) = \sum_{i=1}^{k}{x(i)-Ave} where Ave is the mean of X. The second step is to partition/slice/segment the integrated sequence Y into boxes. At least two boxes are needed for computing DFA. Box sizes are specified by the L argument of this function. By default, it is from 1/5 of signal length to one (x-5)-th of the signal length, where x is the nearest power of 2 from the length of the signal, i.e., 1/16, 1/32, 1/64, 1/128, ... In each box, a linear least square fitting is employed on data in the box. Denote the series on fitted line as Yn. Its k-th elements, yn(k), corresponds to y(k). For fitting in each box, there is a residue, the sum of squares of all offsets, difference between actual points and points on fitted line. F(n) denotes the square root of average total residue in all boxes when box length is n, thus Total_Residue = \sum_{k=1}^{N}{(y(k)-yn(k))} F(n) = \sqrt(Total_Residue/N) The computing to F(n) is carried out for every box length n. Therefore, a relationship between n and F(n) can be obtained. In general, F(n) increases when n increases. Finally, the relationship between F(n) and n is analyzed. A least square fitting is performed between log(F(n)) and log(n). The slope of the fitting line is the DFA value, denoted as Alpha. To white noise, Alpha should be 0.5. Higher level of signal complexity is related to higher Alpha. Parameters ---------- X: 1-D Python list or numpy array a time series Ave: integer, optional The average value of the time series L: 1-D Python list of integers A list of box size, integers in ascending order Returns ------- Alpha: integer the result of DFA analysis, thus the slope of fitting line of log(F(n)) vs. log(n). where n is the Examples -------- >>> import pyeeg >>> from numpy.random import randn >>> print(pyeeg.dfa(randn(4096))) 0.490035110345 Reference --------- Peng C-K, Havlin S, Stanley HE, Goldberger AL. Quantification of scaling exponents and crossover phenomena in nonstationary heartbeat time series. _Chaos_ 1995;5:82-87 Notes ----- This value depends on the box sizes very much. When the input is a white noise, this value should be 0.5. But, some choices on box sizes can lead to the value lower or higher than 0.5, e.g. 0.38 or 0.58. Based on many test, I set the box sizes from 1/5 of signal length to one (x-5)-th of the signal length, where x is the nearest power of 2 from the length of the signal, i.e., 1/16, 1/32, 1/64, 1/128, ... You may generate a list of box sizes and pass in such a list as a parameter. """
X = numpy.array(X) if Ave is None: Ave = numpy.mean(X) Y = numpy.cumsum(X) Y -= Ave if L is None: L = numpy.floor(len(X) * 1 / ( 2 ** numpy.array(list(range(4, int(numpy.log2(len(X))) - 4)))) ) F = numpy.zeros(len(L)) # F(n) of different given box length n for i in range(0, len(L)): n = int(L[i]) # for each box length L[i] if n == 0: print("time series is too short while the box length is too big") print("abort") exit() for j in range(0, len(X), n): # for each box if j + n < len(X): c = list(range(j, j + n)) # coordinates of time in the box c = numpy.vstack([c, numpy.ones(n)]).T # the value of data in the box y = Y[j:j + n] # add residue in this box F[i] += numpy.linalg.lstsq(c, y)[1] F[i] /= ((len(X) / n) * n) F = numpy.sqrt(F) Alpha = numpy.linalg.lstsq(numpy.vstack( [numpy.log(L), numpy.ones(len(L))] ).T, numpy.log(F))[0][0] return Alpha
<SYSTEM_TASK:> Compute Permutation Entropy of a given time series x, specified by <END_TASK> <USER_TASK:> Description: def permutation_entropy(x, n, tau): """Compute Permutation Entropy of a given time series x, specified by permutation order n and embedding lag tau. Parameters ---------- x list a time series n integer Permutation order tau integer Embedding lag Returns ---------- PE float permutation entropy Notes ---------- Suppose the given time series is X =[x(1),x(2),x(3),...,x(N)]. We first build embedding matrix Em, of dimension(n*N-n+1), such that the ith row of Em is x(i),x(i+1),..x(i+n-1). Hence the embedding lag and the embedding dimension are 1 and n respectively. We build this matrix from a given time series, X, by calling pyEEg function embed_seq(x,1,n). We then transform each row of the embedding matrix into a new sequence, comprising a set of integers in range of 0,..,n-1. The order in which the integers are placed within a row is the same as those of the original elements:0 is placed where the smallest element of the row was and n-1 replaces the largest element of the row. To calculate the Permutation entropy, we calculate the entropy of PeSeq. In doing so, we count the number of occurrences of each permutation in PeSeq and write it in a sequence, RankMat. We then use this sequence to calculate entropy by using Shannon's entropy formula. Permutation entropy is usually calculated with n in range of 3 and 7. References ---------- Bandt, Christoph, and Bernd Pompe. "Permutation entropy: a natural complexity measure for time series." Physical Review Letters 88.17 (2002): 174102. Examples ---------- >>> import pyeeg >>> x = [1,2,4,5,12,3,4,5] >>> pyeeg.permutation_entropy(x,5,1) 2.0 """
PeSeq = [] Em = embed_seq(x, tau, n) for i in range(0, len(Em)): r = [] z = [] for j in range(0, len(Em[i])): z.append(Em[i][j]) for j in range(0, len(Em[i])): z.sort() r.append(z.index(Em[i][j])) z[z.index(Em[i][j])] = -1 PeSeq.append(r) RankMat = [] while len(PeSeq) > 0: RankMat.append(PeSeq.count(PeSeq[0])) x = PeSeq[0] for j in range(0, PeSeq.count(PeSeq[0])): PeSeq.pop(PeSeq.index(x)) RankMat = numpy.array(RankMat) RankMat = numpy.true_divide(RankMat, RankMat.sum()) EntropyMat = numpy.multiply(numpy.log2(RankMat), RankMat) PE = -1 * EntropyMat.sum() return PE
<SYSTEM_TASK:> Calculate largest Lyauponov exponent of a given time series x using <END_TASK> <USER_TASK:> Description: def LLE(x, tau, n, T, fs): """Calculate largest Lyauponov exponent of a given time series x using Rosenstein algorithm. Parameters ---------- x list a time series n integer embedding dimension tau integer Embedding lag fs integer Sampling frequency T integer Mean period Returns ---------- Lexp float Largest Lyapunov Exponent Notes ---------- A n-dimensional trajectory is first reconstructed from the observed data by use of embedding delay of tau, using pyeeg function, embed_seq(x, tau, n). Algorithm then searches for nearest neighbour of each point on the reconstructed trajectory; temporal separation of nearest neighbours must be greater than mean period of the time series: the mean period can be estimated as the reciprocal of the mean frequency in power spectrum Each pair of nearest neighbours is assumed to diverge exponentially at a rate given by largest Lyapunov exponent. Now having a collection of neighbours, a least square fit to the average exponential divergence is calculated. The slope of this line gives an accurate estimate of the largest Lyapunov exponent. References ---------- Rosenstein, Michael T., James J. Collins, and Carlo J. De Luca. "A practical method for calculating largest Lyapunov exponents from small data sets." Physica D: Nonlinear Phenomena 65.1 (1993): 117-134. Examples ---------- >>> import pyeeg >>> X = numpy.array([3,4,1,2,4,51,4,32,24,12,3,45]) >>> pyeeg.LLE(X,2,4,1,1) >>> 0.18771136179353307 """
Em = embed_seq(x, tau, n) M = len(Em) A = numpy.tile(Em, (len(Em), 1, 1)) B = numpy.transpose(A, [1, 0, 2]) square_dists = (A - B) ** 2 # square_dists[i,j,k] = (Em[i][k]-Em[j][k])^2 D = numpy.sqrt(square_dists[:,:,:].sum(axis=2)) # D[i,j] = ||Em[i]-Em[j]||_2 # Exclude elements within T of the diagonal band = numpy.tri(D.shape[0], k=T) - numpy.tri(D.shape[0], k=-T-1) band[band == 1] = numpy.inf neighbors = (D + band).argmin(axis=0) # nearest neighbors more than T steps away # in_bounds[i,j] = (i+j <= M-1 and i+neighbors[j] <= M-1) inc = numpy.tile(numpy.arange(M), (M, 1)) row_inds = (numpy.tile(numpy.arange(M), (M, 1)).T + inc) col_inds = (numpy.tile(neighbors, (M, 1)) + inc.T) in_bounds = numpy.logical_and(row_inds <= M - 1, col_inds <= M - 1) # Uncomment for old (miscounted) version #in_bounds = numpy.logical_and(row_inds < M - 1, col_inds < M - 1) row_inds[-in_bounds] = 0 col_inds[-in_bounds] = 0 # neighbor_dists[i,j] = ||Em[i+j]-Em[i+neighbors[j]]||_2 neighbor_dists = numpy.ma.MaskedArray(D[row_inds, col_inds], -in_bounds) J = (-neighbor_dists.mask).sum(axis=1) # number of in-bounds indices by row # Set invalid (zero) values to 1; log(1) = 0 so sum is unchanged neighbor_dists[neighbor_dists == 0] = 1 d_ij = numpy.sum(numpy.log(neighbor_dists.data), axis=1) mean_d = d_ij[J > 0] / J[J > 0] x = numpy.arange(len(mean_d)) X = numpy.vstack((x, numpy.ones(len(mean_d)))).T [m, c] = numpy.linalg.lstsq(X, mean_d)[0] Lexp = fs * m return Lexp
<SYSTEM_TASK:> For a single variable timeseries representing the phase of an oscillator, <END_TASK> <USER_TASK:> Description: def phase_crossings(ts, phi=0.0): """For a single variable timeseries representing the phase of an oscillator, find the times at which the phase crosses angle phi, with the condition that the phase must visit phi+pi between crossings. (Thus if noise causes the phase to wander back and forth across angle phi without the oscillator doing a full revolution, then this is recorded as a single crossing event, giving the time of the earliest arrival.) If the timeseries begins (or ends) exactly at phi, then time zero (or the ending time) is also included as a crossing event, so that the boundaries of the first and last oscillations are included. If the actual crossing time falls between two time steps, linear interpolation is used to estimate the crossing time. Arguments: ts: Timeseries (single variable) The timeseries of an angle variable (radians) phi (float): Critical phase angle (radians) at which to report crossings. Returns: array of float """
#TODO support multivariate time series ts = ts.squeeze() if ts.ndim is not 1: raise ValueError('Currently can only use on single variable timeseries') # Interpret the timeseries as belonging to a phase variable. # Map its range to the interval (-pi, pi] with critical angle at zero: ts = mod2pi(ts - phi) tsa = ts[0:-1] tsb = ts[1:] p2 = np.pi/2 # Time indices where phase crosses or reaches zero from below or above zc = np.nonzero((tsa > -p2) & (tsa < 0) & (tsb >= 0) & (tsb < p2) | (tsa < p2) & (tsa > 0) & (tsb <= 0) & (tsb > -p2))[0] + 1 # Estimate crossing time interpolated linearly within a single time step va = ts[zc-1] vb = ts[zc] ct = (np.abs(vb)*ts.tspan[zc-1] + np.abs(va)*ts.tspan[zc]) / np.abs(vb - va) # denominator always !=0 # Also include starting time if we started exactly at zero if ts[0] == 0.0: zc = np.r_[np.array([0]), zc] ct = np.r_[np.array([ts.tspan[0]]), ct] # Time indices where phase crosses pi pc = np.nonzero((tsa > p2) & (tsb < -p2) | (tsa < -p2) & (tsb > p2))[0] + 1 # Select those zero-crossings separated by at least one pi-crossing splice = np.searchsorted(pc, zc) which_zc = np.r_[np.array([0]), np.nonzero(splice[0:-1] - splice[1:])[0] +1] if ct.shape[0] is 0: return ct else: return ct[which_zc]
<SYSTEM_TASK:> For a single variable timeseries representing the phase of an oscillator, <END_TASK> <USER_TASK:> Description: def periods(ts, phi=0.0): """For a single variable timeseries representing the phase of an oscillator, measure the period of each successive oscillation. An individual oscillation is defined to start and end when the phase passes phi (by default zero) after completing a full cycle. If the timeseries begins (or ends) exactly at phi, then the first (or last) oscillation will be included. Arguments: ts: Timeseries (single variable) The timeseries of an angle variable (radians) phi (float): A single oscillation starts and ends at phase phi (by default zero). """
ts = np.squeeze(ts) if ts.ndim <= 1: return np.diff(phase_crossings(ts, phi)) else: return np.hstack([ts[...,i].periods(phi) for i in range(ts.shape[-1])])
<SYSTEM_TASK:> Continuous wavelet transform. <END_TASK> <USER_TASK:> Description: def roughcwt(data, wavelet, widths): """ Continuous wavelet transform. Performs a continuous wavelet transform on `data`, using the `wavelet` function. A CWT performs a convolution with `data` using the `wavelet` function, which is characterized by a width parameter and length parameter. Parameters ---------- data : (N,) ndarray data on which to perform the transform. wavelet : function Wavelet function, which should take 2 arguments. The first argument is the number of points that the returned vector will have (len(wavelet(width,length)) == length). The second is a width parameter, defining the size of the wavelet (e.g. standard deviation of a gaussian). See `ricker`, which satisfies these requirements. widths : (M,) sequence Widths to use for transform. Returns ------- cwt: (M, N) ndarray Will have shape of (len(data), len(widths)). Notes ----- >>> length = min(10 * width[ii], len(data)) >>> cwt[ii,:] = scipy.signal.convolve(data, wavelet(length, ... width[ii]), mode='same') Examples -------- >>> from scipy import signal >>> sig = np.random.rand(20) - 0.5 >>> wavelet = signal.ricker >>> widths = np.arange(1, 11) >>> cwtmatr = signal.cwt(sig, wavelet, widths) """
out_dtype = wavelet(widths[0], widths[0]).dtype output = np.zeros([len(widths), len(data)], dtype=out_dtype) for ind, width in enumerate(widths): wavelet_data = wavelet(min(3 * width, len(data)), width) output[ind, :] = convolve(data, wavelet_data, mode='same') return output
<SYSTEM_TASK:> Get cycle of colors in a way compatible with all matplotlib versions <END_TASK> <USER_TASK:> Description: def _get_color_list(): """Get cycle of colors in a way compatible with all matplotlib versions"""
if 'axes.prop_cycle' in plt.rcParams: return [p['color'] for p in list(plt.rcParams['axes.prop_cycle'])] else: return plt.rcParams['axes.color_cycle']
<SYSTEM_TASK:> For each variable in the Timeseries, checks whether it represents <END_TASK> <USER_TASK:> Description: def _remove_pi_crossings(ts): """For each variable in the Timeseries, checks whether it represents a phase variable ranging from -pi to pi. If so, set all points where the phase crosses pi to 'nan' so that spurious lines will not be plotted. If ts does not need adjustment, then return ts. Otherwise return a modified copy. """
orig_ts = ts if ts.ndim is 1: ts = ts[:, np.newaxis, np.newaxis] elif ts.ndim is 2: ts = ts[:, np.newaxis] # Get the indices of those variables that have range of approx -pi to pi tsmax = ts.max(axis=0) tsmin = ts.min(axis=0) phase_vars = np.transpose(np.nonzero((np.abs(tsmax - np.pi) < 0.01) & (np.abs(tsmin + np.pi) < 0.01))) if len(phase_vars) is 0: return orig_ts else: ts = ts.copy() for v in phase_vars: ts1 = np.asarray(ts[:, v[0], v[1]]) # time series of single variable ts1a = ts1[0:-1] ts1b = ts1[1:] p2 = np.pi/2 # Find time indices where phase crosses pi. Set those values to nan. pc = np.nonzero((ts1a > p2) & (ts1b < -p2) | (ts1a < -p2) & (ts1b > p2))[0] + 1 ts1[pc] = np.nan ts[:, v[0], v[1]] = ts1 return ts
<SYSTEM_TASK:> load a multi-channel Timeseries from a MATLAB .mat file <END_TASK> <USER_TASK:> Description: def timeseries_from_mat(filename, varname=None, fs=1.0): """load a multi-channel Timeseries from a MATLAB .mat file Args: filename (str): .mat file to load varname (str): variable name. only needed if there is more than one variable saved in the .mat file fs (scalar): sample rate of timeseries in Hz. (constant timestep assumed) Returns: Timeseries """
import scipy.io as sio if varname is None: mat_dict = sio.loadmat(filename) if len(mat_dict) > 1: raise ValueError('Must specify varname: file contains ' 'more than one variable. ') else: mat_dict = sio.loadmat(filename, variable_names=(varname,)) array = mat_dict.popitem()[1] return Timeseries(array, fs=fs)
<SYSTEM_TASK:> Get a list of event annotations from an EDF (European Data Format file <END_TASK> <USER_TASK:> Description: def annotations_from_file(filename): """Get a list of event annotations from an EDF (European Data Format file or EDF+ file, using edflib. Args: filename: EDF+ file Returns: list: annotation events, each in the form [start_time, duration, text] """
import edflib e = edflib.EdfReader(filename, annotations_mode='all') return e.read_annotations()
<SYSTEM_TASK:> construct a RemoteTimeseries from a RemoteArray <END_TASK> <USER_TASK:> Description: def _rts_from_ra(ra, tspan, labels, block=True): """construct a RemoteTimeseries from a RemoteArray"""
def _convert(a, tspan, labels): from nsim import Timeseries return Timeseries(a, tspan, labels) return distob.call( _convert, ra, tspan, labels, prefer_local=False, block=block)
<SYSTEM_TASK:> construct a DistTimeseries from a DistArray <END_TASK> <USER_TASK:> Description: def _dts_from_da(da, tspan, labels): """construct a DistTimeseries from a DistArray"""
sublabels = labels[:] new_subarrays = [] for i, ra in enumerate(da._subarrays): if isinstance(ra, RemoteTimeseries): new_subarrays.append(ra) else: if labels[da._distaxis]: sublabels[da._distaxis] = labels[da._distaxis][ da._si[i]:da._si[i+1]] new_subarrays.append(_rts_from_ra(ra, tspan, sublabels, False)) new_subarrays = [distob.convert_result(ar) for ar in new_subarrays] da._subarrays = new_subarrays da.__class__ = DistTimeseries da.tspan = tspan da.labels = labels da.t = _Timeslice(da) return da
<SYSTEM_TASK:> Make a simulation of the system defined by functions f and G. <END_TASK> <USER_TASK:> Description: def newsim(f, G, y0, name='NewModel', modelType=ItoModel, T=60.0, dt=0.005, repeat=1, identical=True): """Make a simulation of the system defined by functions f and G. dy = f(y,t)dt + G(y,t).dW with initial condition y0 This helper function is for convenience, making it easy to define one-off simulations interactively in ipython. Args: f: callable(y, t) (defined in global scope) returning (n,) array Vector-valued function to define the deterministic part of the system G: callable(y, t) (defined in global scope) returning (n,m) array Optional matrix-valued function to define noise coefficients of an Ito SDE system. y0 (array): Initial condition name (str): Optional class name for the new model modelType (type): The type of model to simulate. Must be a subclass of nsim.Model, for example nsim.ODEModel, nsim.ItoModel or nsim.StratonovichModel. The default is nsim.ItoModel. T: Total length of time to simulate, in seconds. dt: Timestep for numerical integration. repeat (int, optional) identical (bool, optional) Returns: Simulation Raises: SimValueError, SimTypeError """
NewModel = newmodel(f, G, y0, name, modelType) if repeat == 1: return Simulation(NewModel(), T, dt) else: return RepeatedSim(NewModel, T, dt, repeat, identical)
<SYSTEM_TASK:> Use the functions f and G to define a new Model class for simulations. <END_TASK> <USER_TASK:> Description: def newmodel(f, G, y0, name='NewModel', modelType=ItoModel): """Use the functions f and G to define a new Model class for simulations. It will take functions f and G from global scope and make a new Model class out of them. It will automatically gather any globals used in the definition of f and G and turn them into attributes of the new Model. Args: f: callable(y, t) (defined in global scope) returning (n,) array Scalar or vector-valued function to define the deterministic part G: callable(y, t) (defined in global scope) returning (n,m) array Optional scalar or matrix-valued function to define noise coefficients of a stochastic system. This should be ``None`` for an ODE system. y0 (Number or array): Initial condition name (str): Optional class name for the new model modelType (type): The type of model to simulate. Must be a subclass of nsim.Model, for example nsim.ODEModel, nsim.ItoModel or nsim.StratonovichModel. The default is nsim.ItoModel. Returns: new class (subclass of Model) Raises: SimValueError, SimTypeError """
if not issubclass(modelType, Model): raise SimTypeError('modelType must be a subclass of nsim.Model') if not callable(f) or (G is not None and not callable(G)): raise SimTypeError('f and G must be functions of y and t.') if G is not None and f.__globals__ is not G.__globals__: raise SimValueError('f and G must be defined in the same place') # TODO: validate that f and G are defined at global scope. # TODO: Handle nonlocals used in f,G so that we can lift this restriction. if modelType is ODEModel and G is not None and not np.all(G == 0.0): raise SimValueError('For an ODEModel, noise matrix G should be None') if G is None or modelType is ODEModel: newclass = type(name, (ODEModel,), dict()) setattr(newclass, 'f', staticmethod(__clone_function(f, 'f'))) else: newclass = type(name, (modelType,), dict()) setattr(newclass, 'f', staticmethod(__clone_function(f, 'f'))) setattr(newclass, 'G', staticmethod(__clone_function(G, 'G'))) setattr(newclass, 'y0', copy.deepcopy(y0)) # For any global that is used by the functions f or G, create a # corresponding attribute in our new class. globals_used = [x for x in f.__globals__ if (x in f.__code__.co_names or G is not None and x in G.__code__.co_names)] for x in globals_used: if G is None: setattr(newclass, x, __AccessDict(x, newclass.f.__globals__)) else: setattr(newclass, x, __AccessDicts(x, newclass.f.__globals__, newclass.G.__globals__)) # Put the new class into namespace __main__ (to cause dill to pickle it) newclass.__module__ = '__main__' import __main__ __main__.__dict__[name] = newclass return newclass
<SYSTEM_TASK:> Make a new version of a function that has its own independent copy <END_TASK> <USER_TASK:> Description: def __clone_function(f, name=None): """Make a new version of a function that has its own independent copy of any globals that it uses directly, and has its own name. All other attributes are assigned from the original function. Args: f: the function to clone name (str): the name for the new function (if None, keep the same name) Returns: A copy of the function f, having its own copy of any globals used Raises: SimValueError """
if not isinstance(f, types.FunctionType): raise SimTypeError('Given parameter is not a function.') if name is None: name = f.__name__ newglobals = f.__globals__.copy() globals_used = [x for x in f.__globals__ if x in f.__code__.co_names] for x in globals_used: gv = f.__globals__[x] if isinstance(gv, types.FunctionType): # Recursively clone any global functions used by this function. newglobals[x] = __clone_function(gv) elif isinstance(gv, types.ModuleType): newglobals[x] = gv else: # If it is something else, deep copy it. newglobals[x] = copy.deepcopy(gv) newfunc = types.FunctionType( f.__code__, newglobals, name, f.__defaults__, f.__closure__) return newfunc
<SYSTEM_TASK:> Return the angle of a complex Timeseries <END_TASK> <USER_TASK:> Description: def angle(self, deg=False): """Return the angle of a complex Timeseries Args: deg (bool, optional): Return angle in degrees if True, radians if False (default). Returns: angle (Timeseries): The counterclockwise angle from the positive real axis on the complex plane, with dtype as numpy.float64. """
if self.dtype.str[1] != 'c': warnings.warn('angle() is intended for complex-valued timeseries', RuntimeWarning, 1) da = distob.vectorize(np.angle)(self, deg) return _dts_from_da(da, self.tspan, self.labels)
<SYSTEM_TASK:> How to couple the output of one subsystem to the input of another. <END_TASK> <USER_TASK:> Description: def coupling(self, source_y, target_y, weight): """How to couple the output of one subsystem to the input of another. This is a fallback default coupling function that should usually be replaced with your own. This example coupling function takes the mean of all variables of the source subsystem and uses that value weighted by the connection strength to drive all variables of the target subsystem. Arguments: source_y (array of shape (d,)): State of the source subsystem. target_y (array of shape (d,)): State of target subsystem. weight (float): the connection strength for this connection. Returns: input (array of shape (d,)): Values to drive each variable of the target system. """
return np.ones_like(target_y)*np.mean(source_y)*weight
<SYSTEM_TASK:> Simulated time series <END_TASK> <USER_TASK:> Description: def timeseries(self): """Simulated time series"""
if self._timeseries is None: self.compute() if isinstance(self.system, NetworkModel): return self.system._reshape_timeseries(self._timeseries) else: return self._timeseries
<SYSTEM_TASK:> For a single variable timeseries, find the times at which the <END_TASK> <USER_TASK:> Description: def crossing_times(ts, c=0.0, d=0.0): """For a single variable timeseries, find the times at which the value crosses ``c`` from above or below. Can optionally set a non-zero ``d`` to impose the condition that the value must wander at least ``d`` units away from ``c`` between crossings. If the timeseries begins (or ends) exactly at ``c``, then time zero (or the ending time) is also included as a crossing event, so that the boundaries of the first and last excursions are included. If the actual crossing time falls between two time steps, linear interpolation is used to estimate the crossing time. Args: ts: Timeseries (single variable) c (float): Critical value at which to report crossings. d (float): Optional min distance from c to be attained between crossings. Returns: array of float """
#TODO support multivariate time series ts = ts.squeeze() if ts.ndim is not 1: raise ValueError('Currently can only use on single variable timeseries') # Translate to put the critical value at zero: ts = ts - c tsa = ts[0:-1] tsb = ts[1:] # Time indices where phase crosses or reaches zero from below or above zc = np.nonzero((tsa < 0) & (tsb >= 0) | (tsa > 0) & (tsb <= 0))[0] + 1 # Estimate crossing time interpolated linearly within a single time step va = ts[zc-1] vb = ts[zc] ct = (np.abs(vb)*ts.tspan[zc-1] + np.abs(va)*ts.tspan[zc]) / np.abs(vb - va) # denominator always !=0 # Also include starting time if we started exactly at zero if ts[0] == 0.0: zc = np.r_[np.array([0]), zc] ct = np.r_[np.array([ts.tspan[0]]), ct] if d == 0.0 or ct.shape[0] is 0: return ct # Time indices where value crosses c+d or c-d: dc = np.nonzero((tsa < d) & (tsb >= d) | (tsa > -d) & (tsb <= -d))[0] + 1 # Select those zero-crossings separated by at least one d-crossing splice = np.searchsorted(dc, zc) which_zc = np.r_[np.array([0]), np.nonzero(splice[0:-1] - splice[1:])[0] +1] return ct[which_zc]
<SYSTEM_TASK:> Returns the discrete, linear convolution of a time series with itself, <END_TASK> <USER_TASK:> Description: def autocorrelation(ts, normalized=False, unbiased=False): """ Returns the discrete, linear convolution of a time series with itself, optionally using unbiased normalization. N.B. Autocorrelation estimates are necessarily inaccurate for longer lags, as there are less pairs of points to convolve separated by that lag. Therefore best to throw out the results except for shorter lags, e.g. keep lags from tau=0 up to one quarter of the total time series length. Args: normalized (boolean): If True, the time series will first be normalized to a mean of 0 and variance of 1. This gives autocorrelation 1 at zero lag. unbiased (boolean): If True, the result at each lag m will be scaled by 1/(N-m). This gives an unbiased estimation of the autocorrelation of a stationary process from a finite length sample. Ref: S. J. Orfanidis (1996) "Optimum Signal Processing", 2nd Ed. """
ts = np.squeeze(ts) if ts.ndim <= 1: if normalized: ts = (ts - ts.mean())/ts.std() N = ts.shape[0] ar = np.asarray(ts) acf = np.correlate(ar, ar, mode='full') outlen = (acf.shape[0] + 1) / 2 acf = acf[(outlen - 1):] if unbiased: factor = np.array([1.0/(N - m) for m in range(0, outlen)]) acf = acf * factor dt = (ts.tspan[-1] - ts.tspan[0]) / (len(ts) - 1.0) lags = np.arange(outlen)*dt return Timeseries(acf, tspan=lags, labels=ts.labels) else: # recursively handle arrays of dimension > 1 lastaxis = ts.ndim - 1 m = ts.shape[lastaxis] acfs = [ts[...,i].autocorrelation(normalized, unbiased)[...,np.newaxis] for i in range(m)] res = distob.concatenate(acfs, axis=lastaxis) res.labels[lastaxis] = ts.labels[lastaxis] return res
<SYSTEM_TASK:> Verifies the value is between 1 and 9 inclusively. <END_TASK> <USER_TASK:> Description: def fan_speed(self, value): """Verifies the value is between 1 and 9 inclusively."""
if value not in range(1, 10): raise exceptions.RoasterValueError self._fan_speed.value = value
<SYSTEM_TASK:> Verifies that the heat setting is between 0 and 3. <END_TASK> <USER_TASK:> Description: def heat_setting(self, value): """Verifies that the heat setting is between 0 and 3."""
if value not in range(0, 4): raise exceptions.RoasterValueError self._heat_setting.value = value
<SYSTEM_TASK:> Verifies that the heater_level is between 0 and heater_segments. <END_TASK> <USER_TASK:> Description: def heater_level(self, value): """Verifies that the heater_level is between 0 and heater_segments. Can only be called when freshroastsr700 object is initialized with ext_sw_heater_drive=True. Will throw RoasterValueError otherwise."""
if self._ext_sw_heater_drive: if value not in range(0, self._heater_bangbang_segments+1): raise exceptions.RoasterValueError self._heater_level.value = value else: raise exceptions.RoasterValueError
<SYSTEM_TASK:> This is the thread that listens to an event from <END_TASK> <USER_TASK:> Description: def update_data_run(self, event_to_wait_on): """This is the thread that listens to an event from the comm process to execute the update_data_func callback in the context of the main process. """
# with the daemon=Turue setting, this thread should # quit 'automatically' while event_to_wait_on.wait(): event_to_wait_on.clear() if self.update_data_callback_kill_event.is_set(): return self.update_data_func()
<SYSTEM_TASK:> This is the thread that listens to an event from <END_TASK> <USER_TASK:> Description: def state_transition_run(self, event_to_wait_on): """This is the thread that listens to an event from the timer process to execute the state_transition_func callback in the context of the main process. """
# with the daemon=Turue setting, this thread should # quit 'automatically' while event_to_wait_on.wait(): event_to_wait_on.clear() if self.state_transition_callback_kill_event.is_set(): return self.state_transition_func()
<SYSTEM_TASK:> Sends the initialization packet to the roaster. <END_TASK> <USER_TASK:> Description: def _initialize(self): """Sends the initialization packet to the roaster."""
self._header.value = b'\xAA\x55' self._current_state.value = b'\x00\x00' s = self._generate_packet() self._ser.write(s) self._header.value = b'\xAA\xAA' self._current_state.value = b'\x02\x01' return self._read_existing_recipe()
<SYSTEM_TASK:> Attempts to connect to the roaster every quarter of a second. <END_TASK> <USER_TASK:> Description: def _auto_connect(self): """Attempts to connect to the roaster every quarter of a second."""
while not self._teardown.value: try: self._connect() return True except exceptions.RoasterLookupError: time.sleep(.25) return False
<SYSTEM_TASK:> Timer loop used to keep track of the time while roasting or <END_TASK> <USER_TASK:> Description: def _timer(self, state_transition_event=None): """Timer loop used to keep track of the time while roasting or cooling. If the time remaining reaches zero, the roaster will call the supplied state transistion function or the roaster will be set to the idle state."""
while not self._teardown.value: state = self.get_roaster_state() if(state == 'roasting' or state == 'cooling'): time.sleep(1) self.total_time += 1 if(self.time_remaining > 0): self.time_remaining -= 1 else: if(state_transition_event is not None): state_transition_event.set() else: self.idle() else: time.sleep(0.01)
<SYSTEM_TASK:> Returns a string based upon the current state of the roaster. Will <END_TASK> <USER_TASK:> Description: def get_roaster_state(self): """Returns a string based upon the current state of the roaster. Will raise an exception if the state is unknown. Returns: 'idle' if idle, 'sleeping' if sleeping, 'cooling' if cooling, 'roasting' if roasting, 'connecting' if in hardware connection phase, 'unknown' otherwise """
value = self._current_state.value if(value == b'\x02\x01'): return 'idle' elif(value == b'\x04\x04'): return 'cooling' elif(value == b'\x08\x01'): return 'sleeping' # handle null bytes as empty strings elif(value == b'\x00\x00' or value == b''): return 'connecting' elif(value == b'\x04\x02'): return 'roasting' else: return 'unknown'
<SYSTEM_TASK:> Generates a packet based upon the current class variables. Note that <END_TASK> <USER_TASK:> Description: def _generate_packet(self): """Generates a packet based upon the current class variables. Note that current temperature is not sent, as the original application sent zeros to the roaster for the current temperature."""
roaster_time = utils.seconds_to_float(self._time_remaining.value) packet = ( self._header.value + self._temp_unit.value + self._flags.value + self._current_state.value + struct.pack(">B", self._fan_speed.value) + struct.pack(">B", int(round(roaster_time * 10.0))) + struct.pack(">B", self._heat_setting.value) + b'\x00\x00' + self._footer) return packet
<SYSTEM_TASK:> Set the desired output level. Must be between 0 and <END_TASK> <USER_TASK:> Description: def heat_level(self, value): """Set the desired output level. Must be between 0 and number_of_segments inclusive."""
if value < 0: self._heat_level = 0 elif round(value) > self._num_segments: self._heat_level = self._num_segments else: self._heat_level = int(round(value))
<SYSTEM_TASK:> This is a method that will be called every time a packet is opened <END_TASK> <USER_TASK:> Description: def update_data(self): """This is a method that will be called every time a packet is opened from the roaster."""
time_elapsed = datetime.datetime.now() - self.start_time crntTemp = self.roaster.current_temp targetTemp = self.roaster.target_temp heaterLevel = self.roaster.heater_level # print( # "Time: %4.6f, crntTemp: %d, targetTemp: %d, heaterLevel: %d" % # (time_elapsed.total_seconds(), crntTemp, targetTemp, heaterLevel)) self.file.write( "%4.6f,%d,%d,%d\n" % (time_elapsed.total_seconds(), crntTemp, targetTemp, heaterLevel))
<SYSTEM_TASK:> Process results by providers <END_TASK> <USER_TASK:> Description: def process_results(self): """ Process results by providers """
for result in self._results: provider = result.provider self.providers.append(provider) if result.error: self.failed_providers.append(provider) continue if not result.response: continue # set blacklisted to True if ip is detected with at least one dnsbl self.blacklisted = True provider_categories = provider.process_response(result.response) assert provider_categories.issubset(DNSBL_CATEGORIES) self.categories = self.categories.union(provider_categories) self.detected_by[provider.host] = list(provider_categories)
<SYSTEM_TASK:> A generator that will generate a range of floats. <END_TASK> <USER_TASK:> Description: def frange(start, stop, step, precision): """A generator that will generate a range of floats."""
value = start while round(value, precision) < stop: yield round(value, precision) value += step
<SYSTEM_TASK:> Calculate PID output value for given reference input and feedback. <END_TASK> <USER_TASK:> Description: def update(self, currentTemp, targetTemp): """Calculate PID output value for given reference input and feedback."""
# in this implementation, ki includes the dt multiplier term, # and kd includes the dt divisor term. This is typical practice in # industry. self.targetTemp = targetTemp self.error = targetTemp - currentTemp self.P_value = self.Kp * self.error # it is common practice to compute derivative term against PV, # instead of de/dt. This is because de/dt spikes # when the set point changes. # PV version with no dPV/dt filter - note 'previous'-'current', # that's desired, how the math works out self.D_value = self.Kd * (self.Derivator - currentTemp) self.Derivator = currentTemp self.Integrator = self.Integrator + self.error if self.Integrator > self.Integrator_max: self.Integrator = self.Integrator_max elif self.Integrator < self.Integrator_min: self.Integrator = self.Integrator_min self.I_value = self.Integrator * self.Ki output = self.P_value + self.I_value + self.D_value if output > self.Output_max: output = self.Output_max if output < self.Output_min: output = self.Output_min return(output)
<SYSTEM_TASK:> Load dict from file for random words. <END_TASK> <USER_TASK:> Description: def load_nouns(self, file): """ Load dict from file for random words. :param str file: filename """
with open(os.path.join(main_dir, file + '.dat'), 'r') as f: self.nouns = json.load(f)
<SYSTEM_TASK:> Load list from file for random mails <END_TASK> <USER_TASK:> Description: def load_dmails(self, file): """ Load list from file for random mails :param str file: filename """
with open(os.path.join(main_dir, file + '.dat'), 'r') as f: self.dmails = frozenset(json.load(f))
<SYSTEM_TASK:> Load dict from file for random nicknames. <END_TASK> <USER_TASK:> Description: def load_nicknames(self, file): """ Load dict from file for random nicknames. :param str file: filename """
with open(os.path.join(main_dir, file + '.dat'), 'r') as f: self.nicknames = json.load(f)
<SYSTEM_TASK:> Returns list of random words. <END_TASK> <USER_TASK:> Description: def random_words(self, letter=None, count=1): """ Returns list of random words. :param str letter: letter :param int count: how much words :rtype: list :returns: list of random words :raises: ValueError """
self.check_count(count) words = [] if letter is None: all_words = list( chain.from_iterable(self.nouns.values())) try: words = sample(all_words, count) except ValueError: len_sample = len(all_words) raise ValueError('Param "count" must be less than {0}. \ (It is only {0} words)'.format(len_sample + 1, letter)) elif type(letter) is not str: raise ValueError('Param "letter" must be string.') elif letter not in self.available_letters: raise ValueError( 'Param "letter" must be in {0}.'.format( self.available_letters)) elif letter in self.available_letters: try: words = sample(self.nouns[letter], count) except ValueError: len_sample = len(self.nouns[letter]) raise ValueError('Param "count" must be less than {0}. \ (It is only {0} words for letter "{1}")'.format(len_sample + 1, letter)) return words
<SYSTEM_TASK:> Return list of random nicks. <END_TASK> <USER_TASK:> Description: def random_nicks(self, letter=None, gender='u', count=1): """ Return list of random nicks. :param str letter: letter :param str gender: ``'f'`` for female, ``'m'`` for male and None for both :param int count: how much nicks :rtype: list :returns: list of random nicks :raises: ValueError """
self.check_count(count) nicks = [] if gender not in ('f', 'm', 'u'): raise ValueError('Param "gender" must be in (f, m, u)') if letter is None: all_nicks = list( chain.from_iterable(self.nicknames[gender].values())) try: nicks = sample(all_nicks, count) except ValueError: len_sample = len(all_nicks) raise ValueError('Param "count" must be less than {0}. \ (It is only {0} words.")'.format(len_sample + 1)) elif type(letter) is not str: raise ValueError('Param "letter" must be string.') elif letter not in self.available_letters: raise ValueError( 'Param "letter" must be in "{0}".'.format( self.available_letters)) elif letter in self.available_letters: try: nicks = sample(self.nicknames[gender][letter], count) except ValueError: len_sample = len(self.nicknames[gender][letter]) raise ValueError('Param "count" must be less than {0}. \ (It is only {0} nicks for letter "{1}")'.format(len_sample + 1, letter)) return nicks
<SYSTEM_TASK:> Return sentences in list. <END_TASK> <USER_TASK:> Description: def get_sentences_list(self, sentences=1): """ Return sentences in list. :param int sentences: how many sentences :returns: list of strings with sentence :rtype: list """
if sentences < 1: raise ValueError('Param "sentences" must be greater than 0.') sentences_list = [] while sentences: num_rand_words = random.randint(self.MIN_WORDS, self.MAX_WORDS) random_sentence = self.make_sentence( random.sample(self.words, num_rand_words)) sentences_list.append(random_sentence) sentences -= 1 return sentences_list
<SYSTEM_TASK:> Return a sentence from list of words. <END_TASK> <USER_TASK:> Description: def make_sentence(list_words): """ Return a sentence from list of words. :param list list_words: list of words :returns: sentence :rtype: str """
lw_len = len(list_words) if lw_len > 6: list_words.insert(lw_len // 2 + random.choice(range(-2, 2)), ',') sentence = ' '.join(list_words).replace(' ,', ',') return sentence.capitalize() + '.'
<SYSTEM_TASK:> Expand a reference expression to individual spans. <END_TASK> <USER_TASK:> Description: def expand(expression): """ Expand a reference expression to individual spans. Also works on space-separated ID lists, although a sequence of space characters will be considered a delimiter. >>> expand('a1') 'a1' >>> expand('a1[3:5]') 'a1[3:5]' >>> expand('a1[3:5+6:7]') 'a1[3:5]+a1[6:7]' >>> expand('a1 a2 a3') 'a1 a2 a3' """
tokens = [] for (pre, _id, _range) in robust_ref_re.findall(expression): if not _range: tokens.append('{}{}'.format(pre, _id)) else: tokens.append(pre) tokens.extend( '{}{}[{}:{}]'.format(delim, _id, start, end) for delim, start, end in span_re.findall(_range) ) return ''.join(tokens)
<SYSTEM_TASK:> Compress a reference expression to group spans on the same id. <END_TASK> <USER_TASK:> Description: def compress(expression): """ Compress a reference expression to group spans on the same id. Also works on space-separated ID lists, although a sequence of space characters will be considered a delimiter. >>> compress('a1') 'a1' >>> compress('a1[3:5]') 'a1[3:5]' >>> compress('a1[3:5+6:7]') 'a1[3:5+6:7]' >>> compress('a1[3:5]+a1[6:7]') 'a1[3:5+6:7]' >>> compress('a1 a2 a3') 'a1 a2 a3' """
tokens = [] selection = [] last_id = None for (pre, _id, _range) in robust_ref_re.findall(expression): if _range and _id == last_id: selection.extend([pre, _range]) continue if selection: tokens.extend(selection + [']']) selection = [] tokens.extend([pre, _id]) if _range: selection = ['[', _range] last_id = _id else: last_id = None if selection: tokens.extend(selection + [']']) return ''.join(tokens)
<SYSTEM_TASK:> Split the expression into individual selection expressions. The <END_TASK> <USER_TASK:> Description: def selections(expression, keep_delimiters=True): """ Split the expression into individual selection expressions. The delimiters will be kept as separate items if keep_delimters=True. Also works on space-separated ID lists, although a sequence of space characters will be considered a delimiter. >>> selections('a1') ['a1'] >>> selections('a1[3:5]') ['a1[3:5]'] >>> selections('a1[3:5+6:7]') ['a1[3:5+6:7]'] >>> selections('a1[3:5+6:7]+a2[1:4]') ['a1[3:5+6:7]', '+', 'a2[1:4]'] >>> selections('a1[3:5+6:7]+a2[1:4]', keep_delimiters=False) ['a1[3:5+6:7]', 'a2[1:4]'] >>> selections('a1 a2 a3') ['a1', ' ', 'a2', ' ', 'a3'] """
tokens = [] for (pre, _id, _range) in robust_ref_re.findall(expression): if keep_delimiters and pre: tokens.append(pre) if _id: if _range: tokens.append('{}[{}]'.format(_id, _range)) else: tokens.append(_id) return tokens
<SYSTEM_TASK:> Return the string that is the resolution of the alignment expression <END_TASK> <USER_TASK:> Description: def resolve(container, expression): """ Return the string that is the resolution of the alignment expression `expression`, which selects ids from `container`. """
itemgetter = getattr(container, 'get_item', container.get) tokens = [] expression = expression.strip() for sel_delim, _id, _range in selection_re.findall(expression): tokens.append(delimiters.get(sel_delim, '')) item = itemgetter(_id) if item is None: raise XigtStructureError( 'Referred Item (id: {}) from reference "{}" does not ' 'exist in the given container.' .format(_id, expression) ) # treat None values as empty strings for resolution value = item.value() or '' if _range: for spn_delim, start, end in span_re.findall(_range): start = int(start) if start else None end = int(end) if end else None tokens.extend([ delimiters.get(spn_delim, ''), value[start:end] ]) else: tokens.append(value) return ''.join(tokens)
<SYSTEM_TASK:> Return a tuple, each value being a line of the source file. <END_TASK> <USER_TASK:> Description: def _get_file_content(source): """Return a tuple, each value being a line of the source file. Remove empty lines and comments (lines starting with a '#'). """
filepath = os.path.join('siglists', source + '.txt') lines = [] with resource_stream(__name__, filepath) as f: for i, line in enumerate(f): line = line.decode('utf-8', 'strict').strip() if not line or line.startswith('#'): continue try: re.compile(line) except Exception as ex: raise BadRegularExpressionLineError( 'Regex error: {} in file {} at line {}'.format( str(ex), filepath, i ) ) lines.append(line) if source in _SPECIAL_EXTENDED_VALUES: lines = lines + _SPECIAL_EXTENDED_VALUES[source] return tuple(lines)
<SYSTEM_TASK:> Check quickly whether file is rar archive. <END_TASK> <USER_TASK:> Description: def _get_rar_version(xfile): """Check quickly whether file is rar archive. """
buf = xfile.read(len(RAR5_ID)) if buf.startswith(RAR_ID): return 3 elif buf.startswith(RAR5_ID): xfile.read(1) return 5 return 0
<SYSTEM_TASK:> rescales a numpy array, so that min is 0 and max is 255 <END_TASK> <USER_TASK:> Description: def _normalize(mat: np.ndarray): """rescales a numpy array, so that min is 0 and max is 255"""
return ((mat - mat.min()) * (255 / mat.max())).astype(np.uint8)
<SYSTEM_TASK:> returns a matrix that contains RGB channels, and colors scaled <END_TASK> <USER_TASK:> Description: def to_24bit_gray(mat: np.ndarray): """returns a matrix that contains RGB channels, and colors scaled from 0 to 255"""
return np.repeat(np.expand_dims(_normalize(mat), axis=2), 3, axis=2)
<SYSTEM_TASK:> returns an RGB matrix scaled by a matplotlib color map <END_TASK> <USER_TASK:> Description: def apply_color_map(name: str, mat: np.ndarray = None): """returns an RGB matrix scaled by a matplotlib color map"""
def apply_map(mat): return (cm.get_cmap(name)(_normalize(mat))[:, :, :3] * 255).astype(np.uint8) return apply_map if mat is None else apply_map(mat)
<SYSTEM_TASK:> Can be used to create a pygame.Surface from a 2d numpy array. <END_TASK> <USER_TASK:> Description: def mat_to_surface(mat: np.ndarray, transformer=to_24bit_gray): """Can be used to create a pygame.Surface from a 2d numpy array. By default a grey image with scaled colors is returned, but using the transformer argument any transformation can be used. :param mat: the matrix to create the surface of. :type mat: np.ndarray :param transformer: function that transforms the matrix to a valid color matrix, i.e. it must have 3dimension, were the 3rd dimension are the color channels. For each channel a value between 0 and 255 is allowed :type transformer: Callable[np.ndarray[np.ndarray]]"""
return pygame.pixelcopy.make_surface(transformer(mat.transpose()) if transformer is not None else mat.transpose())
<SYSTEM_TASK:> Merge any number of dictionaries <END_TASK> <USER_TASK:> Description: def merge_dict(data, *args): """Merge any number of dictionaries """
results = {} for current in (data,) + args: results.update(current) return results
<SYSTEM_TASK:> Joins individual URL strings together, and returns a single string. <END_TASK> <USER_TASK:> Description: def make_url(url, *paths): """Joins individual URL strings together, and returns a single string. """
for path in paths: url = re.sub(r'/?$', re.sub(r'^/?', '/', path), url) return url
<SYSTEM_TASK:> returns a handler that listens for unicode characters <END_TASK> <USER_TASK:> Description: def unicode_char(ignored_chars=None): """returns a handler that listens for unicode characters"""
return lambda e: e.unicode if e.type == pygame.KEYDOWN \ and ((ignored_chars is None) or (e.unicode not in ignored_chars))\ else EventConsumerInfo.DONT_CARE