desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Helper method that generates a unique label for a :class:`MetricSpec` / :class:`~nupic.frameworks.opf.opf_utils.InferenceType` pair. The label is formatted as follows: <predictionKind>:<metric type>:(paramName=value)*:field=<fieldname> For example: classification:aae:paramA=10.2:paramB=20:window=100:field=pounds :returns: (string) label for inference type'
def getLabel(self, inferenceType=None):
result = [] if (inferenceType is not None): result.append(InferenceType.getLabel(inferenceType)) result.append(self.inferenceElement) result.append(self.metric) params = self.params if (params is not None): sortedParams = params.keys() sortedParams.sort() for param in sortedParams: if (param in ('customFuncSource', 'customFuncDef', 'customExpr')): continue value = params[param] if isinstance(value, str): result.extend([("%s='%s'" % (param, value))]) else: result.extend([('%s=%s' % (param, value))]) if self.field: result.append(('field=%s' % self.field)) return self._LABEL_SEPARATOR.join(result)
'Extracts the PredictionKind (temporal vs. nontemporal) from the given metric label. :param label: (string) for a metric spec generated by :meth:`getMetricLabel` :returns: (:class:`~nupic.frameworks.opf.opf_utils.InferenceType`)'
@classmethod def getInferenceTypeFromLabel(cls, label):
(infType, _, _) = label.partition(cls._LABEL_SEPARATOR) if (not InferenceType.validate(infType)): return None return infType
':param windowSize: The number of values that are used to compute the moving average'
def __init__(self, windowSize=None):
self._windowSize = windowSize self._countDict = dict() self._history = deque([])
'Initialize this metric If the params contains the key \'errorMetric\', then that is the name of another metric to which we will pass a modified groundTruth and prediction to from our addInstance() method. For example, we may compute a moving mean on the groundTruth and then pass that to the AbsoluteAveError metric'
def __init__(self, metricSpec):
self.id = None self.verbosity = 0 self.window = (-1) self.history = None self.accumulatedError = 0 self.aggregateError = None self.steps = 0 self.spec = metricSpec self.disabled = False self._predictionSteps = [0] self._groundTruthHistory = deque([]) self._subErrorMetrics = None self._maxRecords = None if ((metricSpec is not None) and (metricSpec.params is not None)): self.id = metricSpec.params.get('id', None) self._predictionSteps = metricSpec.params.get('steps', [0]) if (not hasattr(self._predictionSteps, '__iter__')): self._predictionSteps = [self._predictionSteps] self.verbosity = metricSpec.params.get('verbosity', 0) self._maxRecords = metricSpec.params.get('maxRecords', None) if ('window' in metricSpec.params): assert (metricSpec.params['window'] >= 1) self.history = deque([]) self.window = metricSpec.params['window'] if ('errorMetric' in metricSpec.params): self._subErrorMetrics = [] for step in self._predictionSteps: subSpec = copy.deepcopy(metricSpec) subSpec.params.pop('steps', None) subSpec.params.pop('errorMetric') subSpec.metric = metricSpec.params['errorMetric'] self._subErrorMetrics.append(getModule(subSpec))
'Utility function that saves the passed in groundTruth into a local history buffer, and returns the groundTruth from self._predictionSteps ago, where self._predictionSteps is defined by the \'steps\' parameter. This can be called from the beginning of a derived class\'s addInstance() before it passes groundTruth and prediction onto accumulate().'
def _getShiftedGroundTruth(self, groundTruth):
self._groundTruthHistory.append(groundTruth) assert (len(self._predictionSteps) == 1) if (len(self._groundTruthHistory) > self._predictionSteps[0]): return self._groundTruthHistory.popleft() elif hasattr(groundTruth, '__iter__'): return ([None] * len(groundTruth)) else: return None
'Compute and store metric value'
def addInstance(self, groundTruth, prediction, record=None, result=None):
self.value = self.avg(prediction)
'Return the metric value'
def getMetric(self):
return {'value': self.value}
'Helper function to return a scalar value representing the most likely outcome given a probability distribution'
def mostLikely(self, pred):
if (len(pred) == 1): return pred.keys()[0] mostLikelyOutcome = None maxProbability = 0 for (prediction, probability) in pred.items(): if (probability > maxProbability): mostLikelyOutcome = prediction maxProbability = probability return mostLikelyOutcome
'Helper function to return a scalar value representing the expected value of a probability distribution'
def expValue(self, pred):
if (len(pred) == 1): return pred.keys()[0] return sum([(x * p) for (x, p) in pred.items()])
'Accumulate history of groundTruth and "prediction" values. For this metric, groundTruth is the actual category and "prediction" is a dict containing one top-level item with a key of 0 (meaning this is the 0-step classificaton) and a value which is another dict, which contains the probability for each category as output from the classifier. For example, this is what "prediction" would be if the classifier said that category 0 had a 0.6 probability and category 1 had a 0.4 probability: {0:0.6, 1: 0.4}'
def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result=None):
if self.disabled: return 0 if (historyBuffer is not None): historyBuffer.append((groundTruth, prediction[0])) if (len(historyBuffer) > self.spec.params['window']): historyBuffer.popleft() return 0
'MetricMulti constructor using metricSpec is not allowed.'
def __init__(self, metricSpec):
raise ValueError('MetricMulti cannot be constructed from metricSpec string! Use MetricMulti(weights,metrics) constructor instead.')
'MetricMulti @param weights - [list of floats] used as weights @param metrics - [list of submetrics] @param window - (opt) window size for moving average, or None when disabled'
def __init__(self, weights, metrics, window=None):
if ((weights is None) or (not isinstance(weights, list)) or (not (len(weights) > 0)) or (not isinstance(weights[0], float))): raise ValueError("MetricMulti requires 'weights' parameter as a [list of floats]") self.weights = weights if ((metrics is None) or (not isinstance(metrics, list)) or (not (len(metrics) > 0)) or (not isinstance(metrics[0], MetricsIface))): raise ValueError("MetricMulti requires 'metrics' parameter as a [list of Metrics]") self.metrics = metrics if (window is not None): self.movingAvg = MovingAverage(windowSize=window) else: self.movingAvg = None
'Constructor Args: model: The OPF Model instance against which to run the task task: A dictionary conforming to opfTaskSchema.json cmdOptions: ParseCommandLineOptionsResult namedtuple'
def __init__(self, model, task, cmdOptions):
validateOpfJsonValue(task, 'opfTaskSchema.json') self.__logger = logging.getLogger('.'.join(['com.numenta', self.__class__.__module__, self.__class__.__name__])) self.__logger.debug((((('Instantiated %s(' + 'model=%r, ') + 'task=%r, ') + 'cmdOptions=%r)') % (self.__class__.__name__, model, task, cmdOptions))) streamDef = task['dataset'] datasetReader = opf_basic_environment.BasicDatasetReader(streamDef) self.__model = model self.__datasetReader = datasetReader self.__task = task self.__cmdOptions = cmdOptions self.__predictionLogger = opf_basic_environment.BasicPredictionLogger(fields=model.getFieldInfo(), experimentDir=cmdOptions.experimentDir, label=task['taskLabel'], inferenceType=self.__model.getInferenceType()) taskControl = task['taskControl'] self.__taskDriver = OPFTaskDriver(taskControl=taskControl, model=model) loggedMetricPatterns = taskControl.get('loggedMetrics', None) loggedMetricLabels = matchPatterns(loggedMetricPatterns, self.__taskDriver.getMetricLabels()) self.__predictionLogger.setLoggedMetrics(loggedMetricLabels) self.__metricsLogger = opf_basic_environment.BasicPredictionMetricsLogger(experimentDir=cmdOptions.experimentDir, label=task['taskLabel'])
'Runs a single experiment task'
def run(self):
self.__logger.debug('run(): Starting task <%s>', self.__task['taskLabel']) if self.__cmdOptions.privateOptions['testMode']: numIters = 10 else: numIters = self.__task['iterationCount'] if (numIters >= 0): iterTracker = iter(xrange(numIters)) else: iterTracker = iter(itertools.count()) periodic = PeriodicActivityMgr(requestedActivities=self._createPeriodicActivities()) self.__model.resetSequenceStates() self.__taskDriver.setup() while True: try: next(iterTracker) except StopIteration: break try: inputRecord = self.__datasetReader.next() except StopIteration: break result = self.__taskDriver.handleInputRecord(inputRecord=inputRecord) if (InferenceElement.encodings in result.inferences): result.inferences.pop(InferenceElement.encodings) self.__predictionLogger.writeRecord(result) periodic.tick() self._getAndEmitExperimentMetrics(final=True) self.__taskDriver.finalize() self.__model.resetSequenceStates()
'Creates and returns a list of activites for this TaskRunner instance Returns: a list of PeriodicActivityRequest elements'
def _createPeriodicActivities(self):
periodicActivities = [] class MetricsReportCb(object, ): def __init__(self, taskRunner): self.__taskRunner = taskRunner return def __call__(self): self.__taskRunner._getAndEmitExperimentMetrics() reportMetrics = PeriodicActivityRequest(repeating=True, period=1000, cb=MetricsReportCb(self)) periodicActivities.append(reportMetrics) class IterationProgressCb(object, ): PROGRESS_UPDATE_PERIOD_TICKS = 1000 def __init__(self, taskLabel, requestedIterationCount, logger): self.__taskLabel = taskLabel self.__requestedIterationCount = requestedIterationCount self.__logger = logger self.__numIterationsSoFar = 0 def __call__(self): self.__numIterationsSoFar += self.PROGRESS_UPDATE_PERIOD_TICKS self.__logger.debug(('%s: ITERATION PROGRESS: %s of %s' % (self.__taskLabel, self.__numIterationsSoFar, self.__requestedIterationCount))) iterationProgressCb = IterationProgressCb(taskLabel=self.__task['taskLabel'], requestedIterationCount=self.__task['iterationCount'], logger=self.__logger) iterationProgressReporter = PeriodicActivityRequest(repeating=True, period=IterationProgressCb.PROGRESS_UPDATE_PERIOD_TICKS, cb=iterationProgressCb) periodicActivities.append(iterationProgressReporter) return periodicActivities
'requestedActivities: a sequence of PeriodicActivityRequest elements'
def __init__(self, requestedActivities):
self.__activities = [] for req in requestedActivities: act = self.Activity(repeating=req.repeating, period=req.period, cb=req.cb, iteratorHolder=[iter(xrange((req.period - 1)))]) self.__activities.append(act)
'Activity tick handler; services all activities Returns: True if controlling iterator says it\'s okay to keep going; False to stop'
def tick(self):
for act in self.__activities: if (not act.iteratorHolder[0]): continue try: next(act.iteratorHolder[0]) except StopIteration: act.cb() if act.repeating: act.iteratorHolder[0] = iter(xrange((act.period - 1))) else: act.iteratorHolder[0] = None return True
'Translates the given metrics value to JSON string metrics: A list of dictionaries per OPFTaskDriver.getMetrics(): Returns: JSON string representing the given metrics object.'
def _translateMetricsToJSON(self, metrics, label):
metricsDict = metrics def _mapNumpyValues(obj): '\n ' import numpy if isinstance(obj, numpy.float32): return float(obj) elif isinstance(obj, numpy.bool_): return bool(obj) elif isinstance(obj, numpy.ndarray): return obj.tolist() else: raise TypeError(('UNEXPECTED OBJ: %s; class=%s' % (obj, obj.__class__))) jsonString = json.dumps(metricsDict, indent=4, default=_mapNumpyValues) return jsonString
'Constructor experimentDir: experiment directory path that contains description.py label: A label string to incorporate into the filename. inferenceElements: inferenceType: An constant from opf_utils.InferenceType for the requested prediction writer fields: a non-empty sequence of nupic.data.fieldmeta.FieldMetaInfo representing fields that will be emitted to this prediction writer metricNames: OPTIONAL - A list of metric names that well be emiited by this prediction writer checkpointSource: If not None, a File-like object containing the previously-checkpointed predictions for setting the initial contents of this PredictionOutputStream. Will be copied before returning, if needed.'
def __init__(self, experimentDir, label, inferenceType, fields, metricNames=None, checkpointSource=None):
self.__experimentDir = experimentDir self.__inferenceType = inferenceType self.__inputFieldsMeta = tuple(copy.deepcopy(fields)) self.__numInputFields = len(self.__inputFieldsMeta) self.__label = label if (metricNames is not None): metricNames.sort() self.__metricNames = metricNames self.__outputFieldsMeta = [] self._rawInputNames = [] self.__datasetPath = None self.__dataset = None self.__checkpointCache = None if (checkpointSource is not None): checkpointSource.seek(0) self.__checkpointCache = StringIO.StringIO() shutil.copyfileobj(checkpointSource, self.__checkpointCache) return
'Open the data file and write the header row'
def __openDatafile(self, modelResult):
resetFieldMeta = FieldMetaInfo(name='reset', type=FieldMetaType.integer, special=FieldMetaSpecial.reset) self.__outputFieldsMeta.append(resetFieldMeta) rawInput = modelResult.rawInput rawFields = rawInput.keys() rawFields.sort() for field in rawFields: if (field.startswith('_') or (field == 'reset')): continue value = rawInput[field] meta = FieldMetaInfo(name=field, type=FieldMetaType.string, special=FieldMetaSpecial.none) self.__outputFieldsMeta.append(meta) self._rawInputNames.append(field) for (inferenceElement, value) in modelResult.inferences.iteritems(): inferenceLabel = InferenceElement.getLabel(inferenceElement) if (type(value) in (list, tuple)): self.__outputFieldsMeta.extend(self.__getListMetaInfo(inferenceElement)) elif isinstance(value, dict): self.__outputFieldsMeta.extend(self.__getDictMetaInfo(inferenceElement, value)) else: if InferenceElement.getInputElement(inferenceElement): self.__outputFieldsMeta.append(FieldMetaInfo(name=(inferenceLabel + '.actual'), type=FieldMetaType.string, special='')) self.__outputFieldsMeta.append(FieldMetaInfo(name=inferenceLabel, type=FieldMetaType.string, special='')) if self.__metricNames: for metricName in self.__metricNames: metricField = FieldMetaInfo(name=metricName, type=FieldMetaType.float, special=FieldMetaSpecial.none) self.__outputFieldsMeta.append(metricField) inferenceDir = _FileUtils.createExperimentInferenceDir(self.__experimentDir) filename = (((self.__label + '.') + opf_utils.InferenceType.getLabel(self.__inferenceType)) + '.predictionLog.csv') self.__datasetPath = os.path.join(inferenceDir, filename) print ('OPENING OUTPUT FOR PREDICTION WRITER AT: %r' % self.__datasetPath) print ('Prediction field-meta: %r' % ([tuple(i) for i in self.__outputFieldsMeta],)) self.__dataset = FileRecordStream(streamID=self.__datasetPath, write=True, fields=self.__outputFieldsMeta) if (self.__checkpointCache is not None): self.__checkpointCache.seek(0) reader = csv.reader(self.__checkpointCache, dialect='excel') try: header = reader.next() except StopIteration: print ('Empty record checkpoint initializer for %r' % (self.__datasetPath,)) else: assert (tuple(self.__dataset.getFieldNames()) == tuple(header)), ('dataset.getFieldNames(): %r; predictionCheckpointFieldNames: %r' % (tuple(self.__dataset.getFieldNames()), tuple(header))) numRowsCopied = 0 while True: try: row = reader.next() except StopIteration: break self.__dataset.appendRecord(row) numRowsCopied += 1 self.__dataset.flush() print ('Restored %d rows from checkpoint for %r' % (numRowsCopied, self.__datasetPath)) self.__checkpointCache.close() self.__checkpointCache = None return
'Tell the writer which metrics should be written Parameters: metricsNames: A list of metric lables to be written'
def setLoggedMetrics(self, metricNames):
if (metricNames is None): self.__metricNames = set([]) else: self.__metricNames = set(metricNames)
'[virtual method override] Closes the writer (e.g., close the underlying file)'
def close(self):
if self.__dataset: self.__dataset.close() self.__dataset = None return
'Get field metadata information for inferences that are of list type TODO: Right now we assume list inferences are associated with the input field metadata'
def __getListMetaInfo(self, inferenceElement):
fieldMetaInfo = [] inferenceLabel = InferenceElement.getLabel(inferenceElement) for inputFieldMeta in self.__inputFieldsMeta: if InferenceElement.getInputElement(inferenceElement): outputFieldMeta = FieldMetaInfo(name=(inputFieldMeta.name + '.actual'), type=inputFieldMeta.type, special=inputFieldMeta.special) predictionField = FieldMetaInfo(name=((inputFieldMeta.name + '.') + inferenceLabel), type=inputFieldMeta.type, special=inputFieldMeta.special) fieldMetaInfo.append(outputFieldMeta) fieldMetaInfo.append(predictionField) return fieldMetaInfo
'Get field metadate information for inferences that are of dict type'
def __getDictMetaInfo(self, inferenceElement, inferenceDict):
fieldMetaInfo = [] inferenceLabel = InferenceElement.getLabel(inferenceElement) if InferenceElement.getInputElement(inferenceElement): fieldMetaInfo.append(FieldMetaInfo(name=(inferenceLabel + '.actual'), type=FieldMetaType.string, special='')) keys = sorted(inferenceDict.keys()) for key in keys: fieldMetaInfo.append(FieldMetaInfo(name=((inferenceLabel + '.') + str(key)), type=FieldMetaType.string, special='')) return fieldMetaInfo
'[virtual method override] Emits a single prediction as input versus predicted. modelResult: An opf_utils.ModelResult object that contains the model input and output for the current timestep.'
def append(self, modelResult):
inferences = modelResult.inferences hasInferences = False if (inferences is not None): for value in inferences.itervalues(): hasInferences = (hasInferences or (value is not None)) if (not hasInferences): return if (self.__dataset is None): self.__openDatafile(modelResult) inputData = modelResult.sensorInput sequenceReset = int(bool(inputData.sequenceReset)) outputRow = [sequenceReset] rawInput = modelResult.rawInput for field in self._rawInputNames: outputRow.append(str(rawInput[field])) for (inferenceElement, outputVal) in inferences.iteritems(): inputElement = InferenceElement.getInputElement(inferenceElement) if inputElement: inputVal = getattr(inputData, inputElement) else: inputVal = None if (type(outputVal) in (list, tuple)): assert (type(inputVal) in (list, tuple, None)) for (iv, ov) in zip(inputVal, outputVal): outputRow.append(str(iv)) outputRow.append(str(ov)) elif isinstance(outputVal, dict): if (inputVal is not None): if (modelResult.predictedFieldName is not None): outputRow.append(str(inputVal[modelResult.predictedFieldName])) else: outputRow.append(str(inputVal)) for key in sorted(outputVal.keys()): outputRow.append(str(outputVal[key])) else: if (inputVal is not None): outputRow.append(str(inputVal)) outputRow.append(str(outputVal)) metrics = modelResult.metrics for metricName in self.__metricNames: outputRow.append(metrics.get(metricName, 0.0)) self.__dataset.appendRecord(outputRow) self.__dataset.flush() return
'[virtual method override] Save a checkpoint of the prediction output stream. The checkpoint comprises up to maxRows of the most recent inference records. Parameters: checkpointSink: A File-like object where predictions checkpoint data, if any, will be stored. maxRows: Maximum number of most recent inference rows to checkpoint.'
def checkpoint(self, checkpointSink, maxRows):
checkpointSink.truncate() if (self.__dataset is None): if (self.__checkpointCache is not None): self.__checkpointCache.seek(0) shutil.copyfileobj(self.__checkpointCache, checkpointSink) checkpointSink.flush() return else: return self.__dataset.flush() totalDataRows = self.__dataset.getDataRowCount() if (totalDataRows == 0): return reader = FileRecordStream(self.__datasetPath, missingValues=[]) writer = csv.writer(checkpointSink) writer.writerow(reader.getFieldNames()) numToWrite = min(maxRows, totalDataRows) numRowsToSkip = (totalDataRows - numToWrite) for i in xrange(numRowsToSkip): reader.next() numWritten = 0 while True: row = reader.getNextRecord() if (row is None): break row = [str(element) for element in row] writer.writerow(row) numWritten += 1 assert (numWritten == numToWrite), ('numWritten (%s) != numToWrite (%s)' % (numWritten, numToWrite)) checkpointSink.flush() return
'Emit a input/prediction pair, if possible. modelResult: An opf_utils.ModelResult object that contains the model input and output for the current timestep.'
def update(self, modelResult):
self.__writer.append(modelResult) return
'writer: Non-temporal prediction log writer conforming to PredictionWriterIface interface.'
def __init__(self, writer):
self.__logger = logging.getLogger('.'.join(['com.numenta', self.__class__.__module__, self.__class__.__name__])) self.__writer = writer self.__inferenceShifter = InferenceShifter() return
'Queue up the T(i+1) prediction value and emit a T(i) input/prediction pair, if possible. E.g., if the previous T(i-1) iteration was learn-only, then we would not have a T(i) prediction in our FIFO and would not be able to emit a meaningful input/prediction pair. modelResult: An opf_utils.ModelResult object that contains the model input and output for the current timestep.'
def update(self, modelResult):
self.__writer.append(self.__inferenceShifter.shift(modelResult))
'experimentDir: experiment directory path that contains description.py Returns: experiment inference directory path string (the path may not yet exist - see createExperimentInferenceDir())'
@staticmethod def getExperimentInferenceDirPath(experimentDir):
return os.path.abspath(os.path.join(experimentDir, 'inference'))
'Creates the inference output directory for the given experiment experimentDir: experiment directory path that contains description.py Returns: path of the inference output directory'
@classmethod def createExperimentInferenceDir(cls, experimentDir):
path = cls.getExperimentInferenceDirPath(experimentDir) cls.makeDirectory(path) return path
'Makes directory for the given directory path if it doesn\'t already exist in the filesystem. Creates all requested directory segments as needed. path: path of the directory to create. Returns: nothing'
@staticmethod def makeDirectory(path):
try: os.makedirs(path) except OSError as e: if (e.errno == os.errno.EEXIST): pass else: raise return
'Get the sensor input element that corresponds to the given inference element. This is mainly used for metrics and prediction logging :param inferenceElement: (:class:`.InferenceElement`) :return: (string) name of sensor input element'
@staticmethod def getInputElement(inferenceElement):
return InferenceElement.__inferenceInputMap.get(inferenceElement, None)
'.. note:: This should only be checked IF THE MODEL\'S INFERENCE TYPE IS ALSO TEMPORAL. That is, a temporal model CAN have non-temporal inference elements, but a non-temporal model CANNOT have temporal inference elements. :param inferenceElement: (:class:`.InferenceElement`) :return: (bool) ``True`` if the inference from this time step is predicted the input for the NEXT time step.'
@staticmethod def isTemporal(inferenceElement):
if (InferenceElement.__temporalInferenceElements is None): InferenceElement.__temporalInferenceElements = set([InferenceElement.prediction]) return (inferenceElement in InferenceElement.__temporalInferenceElements)
':param inferenceElement: (:class:`.InferenceElement`) value being delayed :param key: (string) If the inference is a dictionary type, this specifies key for the sub-inference that is being delayed. :return: (int) the number of records that elapse between when an inference is made and when the corresponding input record will appear. For example, a multistep prediction for 3 timesteps out will have a delay of 3.'
@staticmethod def getTemporalDelay(inferenceElement, key=None):
if (inferenceElement in (InferenceElement.prediction, InferenceElement.encodings)): return 1 if (inferenceElement in (InferenceElement.anomalyScore, InferenceElement.anomalyLabel, InferenceElement.classification, InferenceElement.classConfidences)): return 0 if (inferenceElement in (InferenceElement.multiStepPredictions, InferenceElement.multiStepBestPredictions, InferenceElement.multiStepBucketLikelihoods)): return int(key) return 0
':param inferences: (dict) where the keys are :class:`.InferenceElement` objects. :return: (int) the maximum delay for the :class:`.InferenceElement` objects in the inference dictionary.'
@staticmethod def getMaxDelay(inferences):
maxDelay = 0 for (inferenceElement, inference) in inferences.iteritems(): if isinstance(inference, dict): for key in inference.iterkeys(): maxDelay = max(InferenceElement.getTemporalDelay(inferenceElement, key), maxDelay) else: maxDelay = max(InferenceElement.getTemporalDelay(inferenceElement), maxDelay) return maxDelay
':param inferenceType: (:class:`.InferenceType`) :return: (bool) `True` if the inference type is \'temporal\', i.e. requires a temporal memory in the network.'
@staticmethod def isTemporal(inferenceType):
if (InferenceType.__temporalInferenceTypes is None): InferenceType.__temporalInferenceTypes = set([InferenceType.TemporalNextStep, InferenceType.TemporalClassification, InferenceType.TemporalAnomaly, InferenceType.TemporalMultiStep, InferenceType.NontemporalMultiStep]) return (inferenceType in InferenceType.__temporalInferenceTypes)
':param opf_utils.InferenceType inferenceType: mutually-exclusive with proto arg :param proto: capnp ModelProto message reader for deserializing; mutually-exclusive with the other constructor args.'
def __init__(self, inferenceType=None, proto=None):
assert (((inferenceType is not None) and (proto is None)) or ((inferenceType is None) and (proto is not None))), 'proto and other constructor args are mutually exclusive' if (proto is None): self._numPredictions = 0 self.__inferenceType = inferenceType self.__learningEnabled = True self.__inferenceEnabled = True self.__inferenceArgs = {} else: self._numPredictions = proto.numPredictions inferenceType = str(proto.inferenceType) inferenceType = (inferenceType[:1].upper() + inferenceType[1:]) self.__inferenceType = InferenceType.getValue(inferenceType) self.__learningEnabled = proto.learningEnabled self.__inferenceEnabled = proto.inferenceEnabled self.__inferenceArgs = json.loads(proto.inferenceArgs)
'Run one iteration of this model. :param inputRecord: (object) A record object formatted according to :meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecord` or :meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecordDict` result format. :returns: (:class:`~nupic.frameworks.opf.opf_utils.ModelResult`) An ModelResult namedtuple. The contents of ModelResult.inferences depends on the the specific inference type of this model, which can be queried by :meth:`.getInferenceType`.'
def run(self, inputRecord):
predictionNumber = self._numPredictions self._numPredictions += 1 result = opf_utils.ModelResult(predictionNumber=predictionNumber, rawInput=inputRecord) return result
'Return the InferenceType of this model. This is immutable. :returns: :class:`~nupic.frameworks.opf.opf_utils.InferenceType`'
def getInferenceType(self):
return self.__inferenceType
'Turn Learning on for the current model.'
def enableLearning(self):
self.__learningEnabled = True return
'Turn Learning off for the current model.'
def disableLearning(self):
self.__learningEnabled = False return
'Return the Learning state of the current model. :returns: (bool) The learning state'
def isLearningEnabled(self):
return self.__learningEnabled
'Enable inference for this model. :param inferenceArgs: (dict) A dictionary of arguments required for inference. These depend on the InferenceType of the current model'
def enableInference(self, inferenceArgs=None):
self.__inferenceEnabled = True self.__inferenceArgs = inferenceArgs
'Return the dict of arguments for the current inference mode. :returns: (dict) The arguments of the inference mode'
def getInferenceArgs(self):
return self.__inferenceArgs
'Turn Inference off for the current model.'
def disableInference(self):
self.__inferenceEnabled = False
'Return the inference state of the current model. :returns: (bool) The inference state'
def isInferenceEnabled(self):
return self.__inferenceEnabled
'Return the pycapnp proto type that the class uses for serialization. This is used to convert the proto into the proper type before passing it into the read or write method of the subclass.'
@staticmethod def getSchema():
raise NotImplementedError()
'Return the absolute path of the model\'s checkpoint file. :param checkpointDir: (string) Directory of where the experiment is to be or was saved :returns: (string) An absolute path.'
@staticmethod def _getModelCheckpointFilePath(checkpointDir):
path = os.path.join(checkpointDir, 'model.data') path = os.path.abspath(path) return path
'Serializes model using capnproto and writes data to ``checkpointDir``'
def writeToCheckpoint(self, checkpointDir):
proto = self.getSchema().new_message() self.write(proto) checkpointPath = self._getModelCheckpointFilePath(checkpointDir) if os.path.exists(checkpointDir): if (not os.path.isdir(checkpointDir)): raise Exception(('Existing filesystem entry <%s> is not a model checkpoint -- refusing to delete (not a directory)' % checkpointDir)) if (not os.path.isfile(checkpointPath)): raise Exception(('Existing filesystem entry <%s> is not a model checkpoint -- refusing to delete (%s missing or not a file)' % (checkpointDir, checkpointPath))) shutil.rmtree(checkpointDir) self.__makeDirectoryFromAbsolutePath(checkpointDir) with open(checkpointPath, 'wb') as f: proto.write(f)
'Deerializes model from checkpointDir using capnproto'
@classmethod def readFromCheckpoint(cls, checkpointDir):
checkpointPath = cls._getModelCheckpointFilePath(checkpointDir) with open(checkpointPath, 'r') as f: proto = cls.getSchema().read(f) model = cls.read(proto) return model
'Save the state maintained by the Model base class :param proto: capnp ModelProto message builder'
def writeBaseToProto(self, proto):
inferenceType = self.getInferenceType() inferenceType = (inferenceType[:1].lower() + inferenceType[1:]) proto.inferenceType = inferenceType proto.numPredictions = self._numPredictions proto.learningEnabled = self.__learningEnabled proto.inferenceEnabled = self.__inferenceEnabled proto.inferenceArgs = json.dumps(self.__inferenceArgs)
'Write state to proto object. The type of proto is determined by :meth:`getSchema`.'
def write(self, proto):
raise NotImplementedError()
'Read state from proto object. The type of proto is determined by :meth:`getSchema`.'
@classmethod def read(cls, proto):
raise NotImplementedError()
'Save the model in the given directory. :param saveModelDir: (string) Absolute directory path for saving the model. This directory should only be used to store a saved model. If the directory does not exist, it will be created automatically and populated with model data. A pre-existing directory will only be accepted if it contains previously saved model data. If such a directory is given, the full contents of the directory will be deleted and replaced with current model data.'
def save(self, saveModelDir):
logger = self._getLogger() logger.debug('(%s) Creating local checkpoint in %r...', self, saveModelDir) modelPickleFilePath = self._getModelPickleFilePath(saveModelDir) if os.path.exists(saveModelDir): if (not os.path.isdir(saveModelDir)): raise Exception(('Existing filesystem entry <%s> is not a model checkpoint -- refusing to delete (not a directory)' % saveModelDir)) if (not os.path.isfile(modelPickleFilePath)): raise Exception(('Existing filesystem entry <%s> is not a model checkpoint -- refusing to delete (%s missing or not a file)' % (saveModelDir, modelPickleFilePath))) shutil.rmtree(saveModelDir) self.__makeDirectoryFromAbsolutePath(saveModelDir) with open(modelPickleFilePath, 'wb') as modelPickleFile: logger.debug('(%s) Pickling Model instance...', self) pickle.dump(self, modelPickleFile) logger.debug('(%s) Finished pickling Model instance', self) self._serializeExtraData(extraDataDir=self._getModelExtraDataDir(saveModelDir)) logger.debug('(%s) Finished creating local checkpoint', self) return
'Protected method that is called during serialization with an external directory path. It can be overridden by subclasses to bypass pickle for saving large binary states. This is called by ModelBase only. :param extraDataDir: (string) Model\'s extra data directory path'
def _serializeExtraData(self, extraDataDir):
pass
'Load saved model. :param savedModelDir: (string) Directory of where the experiment is to be or was saved :returns: (:class:`Model`) The loaded model instance'
@classmethod def load(cls, savedModelDir):
logger = opf_utils.initLogger(cls) logger.debug('Loading model from local checkpoint at %r...', savedModelDir) modelPickleFilePath = Model._getModelPickleFilePath(savedModelDir) with open(modelPickleFilePath, 'r') as modelPickleFile: logger.debug('Unpickling Model instance...') model = pickle.load(modelPickleFile) logger.debug('Finished unpickling Model instance') model._deSerializeExtraData(extraDataDir=Model._getModelExtraDataDir(savedModelDir)) logger.debug('Finished Loading model from local checkpoint') return model
'Protected method that is called during deserialization (after __setstate__) with an external directory path. It can be overridden by subclasses to bypass pickle for loading large binary states. This is called by ModelBase only. :param extraDataDir: (string) Model\'s extra data directory path'
def _deSerializeExtraData(self, extraDataDir):
pass
'Return the absolute path of the model\'s pickle file. :param saveModelDir: (string) Directory of where the experiment is to be or was saved :returns: (string) An absolute path.'
@staticmethod def _getModelPickleFilePath(saveModelDir):
path = os.path.join(saveModelDir, 'model.pkl') path = os.path.abspath(path) return path
'Return the absolute path to the directory where the model\'s own "extra data" are stored (i.e., data that\'s too big for pickling). :param saveModelDir: (string) Directory of where the experiment is to be or was saved :returns: (string) An absolute path.'
@staticmethod def _getModelExtraDataDir(saveModelDir):
path = os.path.join(saveModelDir, 'modelextradata') path = os.path.abspath(path) return path
'Make directory for the given directory path if it doesn\'t already exist in the filesystem. :param absDirPath: (string) Absolute path of the directory to create @exception (Exception) OSError if directory creation fails'
@staticmethod def __makeDirectoryFromAbsolutePath(absDirPath):
assert os.path.isabs(absDirPath) try: os.makedirs(absDirPath) except OSError as e: if (e.errno != os.errno.EEXIST): raise return
'Resolves the referenced value. If the result is already cached, returns it to caller. Otherwise, invokes the pure virtual method handleGetValue. If handleGetValue() returns another value-getter, calls that value-getter to resolve the value. This may result in a chain of calls that terminates once the value is fully resolved to a non-value-getter value. Upon return, the value is fully resolved and cached, so subsequent calls will always return the cached value reference. topContainer: The top-level container (dict, tuple, or list [sub-]instance) within whose context the value-getter is applied. Returns: The fully-resolved value that was referenced by the value-getter instance'
def __call__(self, topContainer):
assert (not self.__inLookup) if (self.__cachedResult is not self.__NoResult): return self.__cachedResult self.__cachedResult = self.handleGetValue(topContainer) if isinstance(self.__cachedResult, ValueGetterBase): valueGetter = self.__cachedResult self.__inLookup = True self.__cachedResult = valueGetter(topContainer) self.__inLookup = False assert (self.__cachedResult is not self.__NoResult) assert (not isinstance(self.__cachedResult, ValueGetterBase)) return self.__cachedResult
'A "pure virtual" method. The derived class MUST override this method and return the referenced value. The derived class is NOT responsible for fully resolving the reference\'d value in the event the value resolves to another ValueGetterBase-based instance -- this is handled automatically within ValueGetterBase implementation. topContainer: The top-level container (dict, tuple, or list [sub-]instance) within whose context the value-getter is applied. Returns: The value referenced by this instance (which may be another value-getter instance)'
def handleGetValue(self, topContainer):
raise NotImplementedError(('ERROR: ValueGetterBase is an abstract ' + 'class; base class MUST override handleGetValue()'))
'referenceDict: Explicit reference dictionary that contains the field corresonding to the first key name in dictKeyChain. This may be the result returned by the built-in globals() function, when we desire to look up a dictionary value from a dictionary referenced by a global variable within the calling module. If None is passed for referenceDict, then the topContainer parameter supplied to handleGetValue() will be used as the reference dictionary instead (this allows the desired module to designate the appropriate reference dictionary for the value-getters when it calls applyValueGettersToContainer()) dictKeyChain: One or more strings; the first string is a key (that will eventually be defined) in the reference dictionary. If additional strings are supplied, then the values correspnding to prior key strings must be dictionaries, and each additionl string references a sub-dictionary of the former. The final string is the key of the field whose value will be returned by handleGetValue(). NOTE: Its possible that the referenced value does not yet exist at the time of instantiation of this class. It will be resolved when the base description.py calls applyValueGettersToContainer(). Example: config = dict( _dsEncoderFieldName2_N = 70, _dsEncoderFieldName2_W = 5, dsEncoderSchema = [ dict( base=dict( fieldname=\'Name2\', type=\'ScalarEncoder\', name=\'Name2\', minval=0, maxval=270, clipInput=True, n=DictValueGetter(None, \'_dsEncoderFieldName2_N\'), w=DictValueGetter(None, \'_dsEncoderFieldName2_W\')), updateConfigFromSubConfig(config) applyValueGettersToContainer(config)'
def __init__(self, referenceDict, *dictKeyChain):
ValueGetterBase.__init__(self) assert ((referenceDict is None) or isinstance(referenceDict, dict)) assert (len(dictKeyChain) >= 1) self.__referenceDict = referenceDict self.__dictKeyChain = dictKeyChain
'This method overrides ValueGetterBase\'s "pure virtual" method. It returns the referenced value. The derived class is NOT responsible for fully resolving the reference\'d value in the event the value resolves to another ValueGetterBase-based instance -- this is handled automatically within ValueGetterBase implementation. topContainer: The top-level container (dict, tuple, or list [sub-]instance) within whose context the value-getter is applied. If self.__referenceDict is None, then topContainer will be used as the reference dictionary for resolving our dictionary key chain. Returns: The value referenced by this instance (which may be another value-getter instance)'
def handleGetValue(self, topContainer):
value = (self.__referenceDict if (self.__referenceDict is not None) else topContainer) for key in self.__dictKeyChain: value = value[key] return value
'dictKeyChain: One or more strings; the first string is a key (that will eventually be defined) in the dictionary that will be passed to applyValueGettersToContainer(). If additional strings are supplied, then the values correspnding to prior key strings must be dictionaries, and each additionl string references a sub-dictionary of the former. The final string is the key of the field whose value will be returned by this value-getter NOTE: its possible that the referenced value does not yet exist at the time of instantiation of this class. It will be resolved when the base description.py calls applyValueGettersToContainer(). Example: config = dict( _dsEncoderFieldName2_N = 70, _dsEncoderFieldName2_W = 5, dsEncoderSchema = [ dict( base=dict( fieldname=\'Name2\', type=\'ScalarEncoder\', name=\'Name2\', minval=0, maxval=270, clipInput=True, n=DeferredDictLookup(\'_dsEncoderFieldName2_N\'), w=DeferredDictLookup(\'_dsEncoderFieldName2_W\')), updateConfigFromSubConfig(config) applyValueGettersToContainer(config)'
def __init__(self, *dictKeyChain):
DictValueGetter.__init__(self, None, *dictKeyChain)
'Creates and returns the _IterationPhase-based instance corresponding to this phase specification model: Model instance'
def _getImpl(self, model):
impl = _IterationPhaseLearnOnly(model=model, nIters=self.__nIters) return impl
'Creates and returns the _IterationPhase-based instance corresponding to this phase specification model: Model instance'
def _getImpl(self, model):
impl = _IterationPhaseInferOnly(model=model, nIters=self.__nIters, inferenceArgs=self.__inferenceArgs) return impl
'Creates and returns the _IterationPhase-based instance corresponding to this phase specification model: Model instance'
def _getImpl(self, model):
impl = _IterationPhaseLearnAndInfer(model=model, nIters=self.__nIters, inferenceArgs=self.__inferenceArgs) return impl
'Replaces the Iteration Cycle phases :param phaseSpecs: Iteration cycle description consisting of a sequence of IterationPhaseSpecXXXXX elements that are performed in the given order'
def replaceIterationCycle(self, phaseSpecs):
self.__phaseManager = _PhaseManager(model=self.__model, phaseSpecs=phaseSpecs) return
'Performs initial setup activities, including \'setup\' callbacks. This method MUST be called once before the first call to :meth:`handleInputRecord`.'
def setup(self):
for cb in self.__userCallbacks['setup']: cb(self.__model) return
'Perform final activities, including \'finish\' callbacks. This method MUST be called once after the last call to :meth:`handleInputRecord`.'
def finalize(self):
for cb in self.__userCallbacks['finish']: cb(self.__model) return
'Processes the given record according to the current iteration cycle phase :param inputRecord: (object) record expected to be returned from :meth:`nupic.data.record_stream.RecordStreamIface.getNextRecord`. :returns: :class:`nupic.frameworks.opf.opf_utils.ModelResult`'
def handleInputRecord(self, inputRecord):
assert inputRecord, ('Invalid inputRecord: %r' % inputRecord) results = self.__phaseManager.handleInputRecord(inputRecord) metrics = self.__metricsMgr.update(results) for cb in self.__userCallbacks['postIter']: cb(self.__model) results.metrics = metrics return results
'Gets the current metric values :returns: A dictionary of metric values. The key for each entry is the label for the metric spec, as generated by :meth:`nupic.frameworks.opf.metrics.MetricSpec.getLabel`. The value for each entry is a dictionary containing the value of the metric as returned by :meth:`nupic.frameworks.opf.metrics.MetricsIface.getMetric`.'
def getMetrics(self):
return self.__metricsMgr.getMetrics()
':returns: (list) labels for the metrics that are being calculated'
def getMetricLabels(self):
return self.__metricsMgr.getMetricLabels()
'model: Model instance phaseSpecs: Iteration period description consisting of a sequence of IterationPhaseSpecXXXXX elements that are performed in the given order'
def __init__(self, model, phaseSpecs):
self.__model = model self.__phases = tuple(map((lambda x: x._getImpl(model=model)), phaseSpecs)) if self.__phases: self.__phaseCycler = itertools.cycle(self.__phases) self.__advancePhase() return
'Advance to the next iteration cycle phase'
def __advancePhase(self):
self.__currentPhase = self.__phaseCycler.next() self.__currentPhase.enterPhase() return
'Processes the given record according to the current phase inputRecord: record object formatted according to nupic.data.FileSource.getNext() result format. Returns: An opf_utils.ModelResult object with the inputs and inferences after the current record is processed by the model'
def handleInputRecord(self, inputRecord):
results = self.__model.run(inputRecord) shouldContinue = self.__currentPhase.advance() if (not shouldContinue): self.__advancePhase() return results
'nIters: Number of iterations; MUST be greater than 0'
def __init__(self, nIters):
assert (nIters > 0), ('nIters=%s' % nIters) self.__nIters = nIters return
'Performs initialization that is necessary upon entry to the phase. Must be called before handleInputRecord() at the beginning of each phase'
@abstractmethod def enterPhase(self):
self.__iter = iter(xrange(self.__nIters)) self.__iter.next()
'Advances the iteration; Returns: True if more iterations remain; False if this is the final iteration.'
def advance(self):
hasMore = True try: self.__iter.next() except StopIteration: self.__iter = None hasMore = False return hasMore
'model: Model instance nIters: Number of iterations; MUST be greater than 0'
def __init__(self, model, nIters):
super(_IterationPhaseLearnOnly, self).__init__(nIters=nIters) self.__model = model return
'[_IterationPhase method implementation] Performs initialization that is necessary upon entry to the phase. Must be called before handleInputRecord() at the beginning of each phase'
def enterPhase(self):
super(_IterationPhaseLearnOnly, self).enterPhase() self.__model.enableLearning() self.__model.disableInference() return
'model: Model instance nIters: Number of iterations; MUST be greater than 0 inferenceArgs: A dictionary of arguments required for inference. These depend on the InferenceType of the current model'
def __init__(self, model, nIters, inferenceArgs):
super(_IterationPhaseInferCommon, self).__init__(nIters=nIters) self._model = model self._inferenceArgs = inferenceArgs return
'[_IterationPhase method implementation] Performs initialization that is necessary upon entry to the phase. Must be called before handleInputRecord() at the beginning of each phase'
def enterPhase(self):
super(_IterationPhaseInferCommon, self).enterPhase() self._model.enableInference(inferenceArgs=self._inferenceArgs) return
'model: Model instance nIters: Number of iterations; MUST be greater than 0 inferenceArgs: A dictionary of arguments required for inference. These depend on the InferenceType of the current model'
def __init__(self, model, nIters, inferenceArgs):
super(_IterationPhaseInferOnly, self).__init__(model=model, nIters=nIters, inferenceArgs=inferenceArgs) return
'[_IterationPhase method implementation] Performs initialization that is necessary upon entry to the phase. Must be called before handleInputRecord() at the beginning of each phase'
def enterPhase(self):
super(_IterationPhaseInferOnly, self).enterPhase() self._model.disableLearning() return
'model: Model instance nIters: Number of iterations; MUST be greater than 0 inferenceArgs: A dictionary of arguments required for inference. These depend on the InferenceType of the current model'
def __init__(self, model, nIters, inferenceArgs):
super(_IterationPhaseLearnAndInfer, self).__init__(model=model, nIters=nIters, inferenceArgs=inferenceArgs) return
'[_IterationPhase method implementation] Performs initialization that is necessary upon entry to the phase. Must be called before handleInputRecord() at the beginning of each phase'
def enterPhase(self):
super(_IterationPhaseLearnAndInfer, self).enterPhase() self._model.enableLearning() return
'Since the two-gram has no use for this information, this is a no-op'
def setFieldStatistics(self, fieldStats):
pass
':param proto: capnp TwoGramModelProto message reader'
@classmethod def read(cls, proto):
instance = object.__new__(cls) super(TwoGramModel, instance).__init__(proto=proto.modelBase) instance._logger = opf_utils.initLogger(instance) instance._reset = proto.reset instance._hashToValueDict = {x.hash: x.value for x in proto.hashToValueDict} instance._learningEnabled = proto.learningEnabled instance._encoder = encoders.MultiEncoder.read(proto.encoder) instance._fieldNames = instance._encoder.getScalarNames() instance._prevValues = list(proto.prevValues) instance._twoGramDicts = [dict() for _ in xrange(len(proto.twoGramDicts))] for (idx, field) in enumerate(proto.twoGramDicts): for entry in field: prev = (None if (entry.value == (-1)) else entry.value) instance._twoGramDicts[idx][prev] = collections.defaultdict(int) for bucket in entry.buckets: instance._twoGramDicts[idx][prev][bucket.index] = bucket.count return instance
':param proto: capnp TwoGramModelProto message builder'
def write(self, proto):
super(TwoGramModel, self).writeBaseToProto(proto.modelBase) proto.reset = self._reset proto.learningEnabled = self._learningEnabled proto.prevValues = self._prevValues self._encoder.write(proto.encoder) proto.hashToValueDict = [{'hash': h, 'value': v} for (h, v) in self._hashToValueDict.items()] twoGramDicts = [] for items in self._twoGramDicts: twoGramArr = [] for (prev, values) in items.iteritems(): buckets = [{'index': index, 'count': count} for (index, count) in values.iteritems()] if (prev is None): prev = (-1) twoGramArr.append({'value': prev, 'buckets': buckets}) twoGramDicts.append(twoGramArr) proto.twoGramDicts = twoGramDicts
'net: The CLA Network instance statsCollectors: Sequence of 0 or more CLAStatistic-based instances'
def __init__(self, net, statsCollectors):
self.net = net self.statsCollectors = statsCollectors return
':param network: if not None, the deserialized nupic.engine.Network instance to use instead of creating a new Network :param baseProto: if not None, capnp ModelProto message reader for deserializing; supersedes inferenceType'
def __init__(self, sensorParams={}, inferenceType=InferenceType.TemporalNextStep, spEnable=True, spParams={}, trainSPNetOnlyIfRequested=False, tmEnable=True, tmParams={}, clEnable=True, clParams={}, anomalyParams={}, minLikelihoodThreshold=DEFAULT_LIKELIHOOD_THRESHOLD, maxPredictionsPerStep=DEFAULT_MAX_PREDICTIONS_PER_STEP, network=None, baseProto=None):
if (not (inferenceType in self.__supportedInferenceKindSet)): raise ValueError('{0} received incompatible inference type: {1}'.format(self.__class__, inferenceType)) if (baseProto is None): super(HTMPredictionModel, self).__init__(inferenceType) else: super(HTMPredictionModel, self).__init__(proto=baseProto) self.__restoringFromState = False self.__restoringFromV1 = False self.__logger = initLogger(self) self.__logger.debug(('Instantiating %s.' % self.__myClassName)) self._minLikelihoodThreshold = minLikelihoodThreshold self._maxPredictionsPerStep = maxPredictionsPerStep self.__spLearningEnabled = bool(spEnable) self.__tpLearningEnabled = bool(tmEnable) if ((not InferenceType.isTemporal(self.getInferenceType())) or (self.getInferenceType() == InferenceType.NontemporalMultiStep)): tmEnable = False self._netInfo = None self._hasSP = spEnable self._hasTP = tmEnable self._hasCL = clEnable self._classifierInputEncoder = None self._predictedFieldIdx = None self._predictedFieldName = None self._numFields = None if (network is not None): self._netInfo = NetworkInfo(net=network, statsCollectors=[]) else: self._netInfo = self.__createHTMNetwork(sensorParams, spEnable, spParams, tmEnable, tmParams, clEnable, clParams, anomalyParams) if (self.getInferenceType() == InferenceType.NontemporalAnomaly): self._getSPRegion().setParameter('anomalyMode', True) if (self.getInferenceType() == InferenceType.TemporalAnomaly): self._getTPRegion().setParameter('anomalyMode', True) self.__trainSPNetOnlyIfRequested = trainSPNetOnlyIfRequested self.__numRunCalls = 0 self.__finishedLearning = False self.__logger.debug(('Instantiated %s' % self.__class__.__name__)) self._input = None return
'Currently only supports a parameter named ``__numRunCalls``. :param paramName: (string) name of parameter to get. If not ``__numRunCalls`` an exception is thrown. :returns: (int) the value of ``self.__numRunCalls``'
def getParameter(self, paramName):
if (paramName == '__numRunCalls'): return self.__numRunCalls else: raise RuntimeError(("'%s' parameter is not exposed by htm_prediction_model." % paramName))
'Set a parameter of the anomaly classifier within this model. :param param: (string) name of parameter to set :param value: (object) value to set'
@requireAnomalyModel def setAnomalyParameter(self, param, value):
self._getAnomalyClassifier().setParameter(param, value)
'Get a parameter of the anomaly classifier within this model by key. :param param: (string) name of parameter to retrieve'
@requireAnomalyModel def getAnomalyParameter(self, param):
return self._getAnomalyClassifier().getParameter(param)
'Remove labels from the anomaly classifier within this model. Removes all records if ``labelFilter==None``, otherwise only removes the labels equal to ``labelFilter``. :param start: (int) index to start removing labels :param end: (int) index to end removing labels :param labelFilter: (string) If specified, only removes records that match'
@requireAnomalyModel def anomalyRemoveLabels(self, start, end, labelFilter):
self._getAnomalyClassifier().getSelf().removeLabels(start, end, labelFilter)
'Add labels from the anomaly classifier within this model. :param start: (int) index to start label :param end: (int) index to end label :param labelName: (string) name of label'
@requireAnomalyModel def anomalyAddLabel(self, start, end, labelName):
self._getAnomalyClassifier().getSelf().addLabel(start, end, labelName)
'Get labels from the anomaly classifier within this model. :param start: (int) index to start getting labels :param end: (int) index to end getting labels'
@requireAnomalyModel def anomalyGetLabels(self, start, end):
return self._getAnomalyClassifier().getSelf().getLabels(start, end)