text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Checks whether the path is valid for Message Descriptor. <END_TASK> <USER_TASK:> Description: def _IsValidPath(message_descriptor, path): """Checks whether the path is valid for Message Descriptor."""
parts = path.split('.') last = parts.pop() for name in parts: field = message_descriptor.fields_by_name[name] if (field is None or field.label == FieldDescriptor.LABEL_REPEATED or field.type != FieldDescriptor.TYPE_MESSAGE): return False message_descriptor = field.message_type return last in message_descriptor.fields_by_name
<SYSTEM_TASK:> Raises ValueError if message is not a FieldMask. <END_TASK> <USER_TASK:> Description: def _CheckFieldMaskMessage(message): """Raises ValueError if message is not a FieldMask."""
message_descriptor = message.DESCRIPTOR if (message_descriptor.name != 'FieldMask' or message_descriptor.file.name != 'google/protobuf/field_mask.proto'): raise ValueError('Message {0} is not a FieldMask.'.format( message_descriptor.full_name))
<SYSTEM_TASK:> Merge all fields specified by a sub-tree from source to destination. <END_TASK> <USER_TASK:> Description: def _MergeMessage( node, source, destination, replace_message, replace_repeated): """Merge all fields specified by a sub-tree from source to destination."""
source_descriptor = source.DESCRIPTOR for name in node: child = node[name] field = source_descriptor.fields_by_name[name] if field is None: raise ValueError('Error: Can\'t find field {0} in message {1}.'.format( name, source_descriptor.full_name)) if child: # Sub-paths are only allowed for singular message fields. if (field.label == FieldDescriptor.LABEL_REPEATED or field.cpp_type != FieldDescriptor.CPPTYPE_MESSAGE): raise ValueError('Error: Field {0} in message {1} is not a singular ' 'message field and cannot have sub-fields.'.format( name, source_descriptor.full_name)) _MergeMessage( child, getattr(source, name), getattr(destination, name), replace_message, replace_repeated) continue if field.label == FieldDescriptor.LABEL_REPEATED: if replace_repeated: destination.ClearField(_StrConvert(name)) repeated_source = getattr(source, name) repeated_destination = getattr(destination, name) if field.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE: for item in repeated_source: repeated_destination.add().MergeFrom(item) else: repeated_destination.extend(repeated_source) else: if field.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE: if replace_message: destination.ClearField(_StrConvert(name)) if source.HasField(name): getattr(destination, name).MergeFrom(getattr(source, name)) else: setattr(destination, name, getattr(source, name))
<SYSTEM_TASK:> Adds the field paths descended from node to field_mask. <END_TASK> <USER_TASK:> Description: def _AddFieldPaths(node, prefix, field_mask): """Adds the field paths descended from node to field_mask."""
if not node: field_mask.paths.append(prefix) return for name in sorted(node): if prefix: child_path = prefix + '.' + name else: child_path = name _AddFieldPaths(node[name], child_path, field_mask)
<SYSTEM_TASK:> Packs the specified message into current Any message. <END_TASK> <USER_TASK:> Description: def Pack(self, msg, type_url_prefix='type.googleapis.com/'): """Packs the specified message into current Any message."""
if len(type_url_prefix) < 1 or type_url_prefix[-1] != '/': self.type_url = '%s/%s' % (type_url_prefix, msg.DESCRIPTOR.full_name) else: self.type_url = '%s%s' % (type_url_prefix, msg.DESCRIPTOR.full_name) self.value = msg.SerializeToString()
<SYSTEM_TASK:> Unpacks the current Any message into specified message. <END_TASK> <USER_TASK:> Description: def Unpack(self, msg): """Unpacks the current Any message into specified message."""
descriptor = msg.DESCRIPTOR if not self.Is(descriptor): return False msg.ParseFromString(self.value) return True
<SYSTEM_TASK:> Converts Timestamp to RFC 3339 date string format. <END_TASK> <USER_TASK:> Description: def ToJsonString(self): """Converts Timestamp to RFC 3339 date string format. Returns: A string converted from timestamp. The string is always Z-normalized and uses 3, 6 or 9 fractional digits as required to represent the exact time. Example of the return format: '1972-01-01T10:00:20.021Z' """
nanos = self.nanos % _NANOS_PER_SECOND total_sec = self.seconds + (self.nanos - nanos) // _NANOS_PER_SECOND seconds = total_sec % _SECONDS_PER_DAY days = (total_sec - seconds) // _SECONDS_PER_DAY dt = datetime(1970, 1, 1) + timedelta(days, seconds) result = dt.isoformat() if (nanos % 1e9) == 0: # If there are 0 fractional digits, the fractional # point '.' should be omitted when serializing. return result + 'Z' if (nanos % 1e6) == 0: # Serialize 3 fractional digits. return result + '.%03dZ' % (nanos / 1e6) if (nanos % 1e3) == 0: # Serialize 6 fractional digits. return result + '.%06dZ' % (nanos / 1e3) # Serialize 9 fractional digits. return result + '.%09dZ' % nanos
<SYSTEM_TASK:> Parse a RFC 3339 date string format to Timestamp. <END_TASK> <USER_TASK:> Description: def FromJsonString(self, value): """Parse a RFC 3339 date string format to Timestamp. Args: value: A date string. Any fractional digits (or none) and any offset are accepted as long as they fit into nano-seconds precision. Example of accepted format: '1972-01-01T10:00:20.021-05:00' Raises: ParseError: On parsing problems. """
timezone_offset = value.find('Z') if timezone_offset == -1: timezone_offset = value.find('+') if timezone_offset == -1: timezone_offset = value.rfind('-') if timezone_offset == -1: raise ParseError( 'Failed to parse timestamp: missing valid timezone offset.') time_value = value[0:timezone_offset] # Parse datetime and nanos. point_position = time_value.find('.') if point_position == -1: second_value = time_value nano_value = '' else: second_value = time_value[:point_position] nano_value = time_value[point_position + 1:] date_object = datetime.strptime(second_value, _TIMESTAMPFOMAT) td = date_object - datetime(1970, 1, 1) seconds = td.seconds + td.days * _SECONDS_PER_DAY if len(nano_value) > 9: raise ParseError( 'Failed to parse Timestamp: nanos {0} more than ' '9 fractional digits.'.format(nano_value)) if nano_value: nanos = round(float('0.' + nano_value) * 1e9) else: nanos = 0 # Parse timezone offsets. if value[timezone_offset] == 'Z': if len(value) != timezone_offset + 1: raise ParseError('Failed to parse timestamp: invalid trailing' ' data {0}.'.format(value)) else: timezone = value[timezone_offset:] pos = timezone.find(':') if pos == -1: raise ParseError( 'Invalid timezone offset value: {0}.'.format(timezone)) if timezone[0] == '+': seconds -= (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60 else: seconds += (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60 # Set seconds and nanos self.seconds = int(seconds) self.nanos = int(nanos)
<SYSTEM_TASK:> Converts a Duration to microseconds. <END_TASK> <USER_TASK:> Description: def ToMicroseconds(self): """Converts a Duration to microseconds."""
micros = _RoundTowardZero(self.nanos, _NANOS_PER_MICROSECOND) return self.seconds * _MICROS_PER_SECOND + micros
<SYSTEM_TASK:> Converts a Duration to milliseconds. <END_TASK> <USER_TASK:> Description: def ToMilliseconds(self): """Converts a Duration to milliseconds."""
millis = _RoundTowardZero(self.nanos, _NANOS_PER_MILLISECOND) return self.seconds * _MILLIS_PER_SECOND + millis
<SYSTEM_TASK:> Converts milliseconds to Duration. <END_TASK> <USER_TASK:> Description: def FromMilliseconds(self, millis): """Converts milliseconds to Duration."""
self._NormalizeDuration( millis // _MILLIS_PER_SECOND, (millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND)
<SYSTEM_TASK:> Set Duration by seconds and nonas. <END_TASK> <USER_TASK:> Description: def _NormalizeDuration(self, seconds, nanos): """Set Duration by seconds and nonas."""
# Force nanos to be negative if the duration is negative. if seconds < 0 and nanos > 0: seconds += 1 nanos -= _NANOS_PER_SECOND self.seconds = seconds self.nanos = nanos
<SYSTEM_TASK:> Checks whether the FieldMask is valid for Message Descriptor. <END_TASK> <USER_TASK:> Description: def IsValidForDescriptor(self, message_descriptor): """Checks whether the FieldMask is valid for Message Descriptor."""
for path in self.paths: if not _IsValidPath(message_descriptor, path): return False return True
<SYSTEM_TASK:> Gets all direct fields of Message Descriptor to FieldMask. <END_TASK> <USER_TASK:> Description: def AllFieldsFromDescriptor(self, message_descriptor): """Gets all direct fields of Message Descriptor to FieldMask."""
self.Clear() for field in message_descriptor.fields: self.paths.append(field.name)
<SYSTEM_TASK:> Merges mask1 and mask2 into this FieldMask. <END_TASK> <USER_TASK:> Description: def Union(self, mask1, mask2): """Merges mask1 and mask2 into this FieldMask."""
_CheckFieldMaskMessage(mask1) _CheckFieldMaskMessage(mask2) tree = _FieldMaskTree(mask1) tree.MergeFromFieldMask(mask2) tree.ToFieldMask(self)
<SYSTEM_TASK:> Intersects mask1 and mask2 into this FieldMask. <END_TASK> <USER_TASK:> Description: def Intersect(self, mask1, mask2): """Intersects mask1 and mask2 into this FieldMask."""
_CheckFieldMaskMessage(mask1) _CheckFieldMaskMessage(mask2) tree = _FieldMaskTree(mask1) intersection = _FieldMaskTree() for path in mask2.paths: tree.IntersectPath(path, intersection) intersection.ToFieldMask(self)
<SYSTEM_TASK:> Merges fields specified in FieldMask from source to destination. <END_TASK> <USER_TASK:> Description: def MergeMessage( self, source, destination, replace_message_field=False, replace_repeated_field=False): """Merges fields specified in FieldMask from source to destination. Args: source: Source message. destination: The destination message to be merged into. replace_message_field: Replace message field if True. Merge message field if False. replace_repeated_field: Replace repeated field if True. Append elements of repeated field if False. """
tree = _FieldMaskTree(self) tree.MergeMessage( source, destination, replace_message_field, replace_repeated_field)
<SYSTEM_TASK:> Adds a field path into the tree. <END_TASK> <USER_TASK:> Description: def AddPath(self, path): """Adds a field path into the tree. If the field path to add is a sub-path of an existing field path in the tree (i.e., a leaf node), it means the tree already matches the given path so nothing will be added to the tree. If the path matches an existing non-leaf node in the tree, that non-leaf node will be turned into a leaf node with all its children removed because the path matches all the node's children. Otherwise, a new path will be added. Args: path: The field path to add. """
node = self._root for name in path.split('.'): if name not in node: node[name] = {} elif not node[name]: # Pre-existing empty node implies we already have this entire tree. return node = node[name] # Remove any sub-trees we might have had. node.clear()
<SYSTEM_TASK:> Calculates the intersection part of a field path with this tree. <END_TASK> <USER_TASK:> Description: def IntersectPath(self, path, intersection): """Calculates the intersection part of a field path with this tree. Args: path: The field path to calculates. intersection: The out tree to record the intersection part. """
node = self._root for name in path.split('.'): if name not in node: return elif not node[name]: intersection.AddPath(path) return node = node[name] intersection.AddLeafNodes(path, node)
<SYSTEM_TASK:> Adds leaf nodes begin with prefix to this tree. <END_TASK> <USER_TASK:> Description: def AddLeafNodes(self, prefix, node): """Adds leaf nodes begin with prefix to this tree."""
if not node: self.AddPath(prefix) for name in node: child_path = prefix + '.' + name self.AddLeafNodes(child_path, node[name])
<SYSTEM_TASK:> Merge all fields specified by this tree from source to destination. <END_TASK> <USER_TASK:> Description: def MergeMessage( self, source, destination, replace_message, replace_repeated): """Merge all fields specified by this tree from source to destination."""
_MergeMessage( self._root, source, destination, replace_message, replace_repeated)
<SYSTEM_TASK:> Return target value predictions for ``dataset``, using the trained <END_TASK> <USER_TASK:> Description: def predict(self, dataset, missing_value_action='auto'): """ Return target value predictions for ``dataset``, using the trained linear regression model. This method can be used to get fitted values for the model by inputting the training dataset. Parameters ---------- dataset : SFrame | pandas.Dataframe Dataset of new observations. Must include columns with the same names as the features used for model training, but does not require a target column. Additional columns are ignored. missing_value_action : str, optional Action to perform when missing values are encountered. This can be one of: - 'auto': Default to 'impute' - 'impute': Proceed with evaluation by filling in the missing values with the mean of the training data. Missing values are also imputed if an entire column of data is missing during evaluation. - 'error': Do not proceed with prediction and terminate with an error message. Returns ------- out : SArray Predicted target value for each example (i.e. row) in the dataset. See Also ---------- create, evaluate Examples ---------- >>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv') >>> model = turicreate.linear_regression.create(data, target='price', features=['bath', 'bedroom', 'size']) >>> results = model.predict(data) """
return super(LinearRegression, self).predict(dataset, missing_value_action=missing_value_action)
<SYSTEM_TASK:> r"""Evaluate the model by making target value predictions and comparing <END_TASK> <USER_TASK:> Description: def evaluate(self, dataset, metric='auto', missing_value_action='auto'): r"""Evaluate the model by making target value predictions and comparing to actual values. Two metrics are used to evaluate linear regression models. The first is root-mean-squared error (RMSE) while the second is the absolute value of the maximum error between the actual and predicted values. Let :math:`y` and :math:`\hat{y}` denote vectors of length :math:`N` (number of examples) with actual and predicted values. The RMSE is defined as: .. math:: RMSE = \sqrt{\frac{1}{N} \sum_{i=1}^N (\widehat{y}_i - y_i)^2} while the max-error is defined as .. math:: max-error = \max_{i=1}^N \|\widehat{y}_i - y_i\| Parameters ---------- dataset : SFrame Dataset of new observations. Must include columns with the same names as the target and features used for model training. Additional columns are ignored. metric : str, optional Name of the evaluation metric. Possible values are: - 'auto': Compute all metrics. - 'rmse': Rooted mean squared error. - 'max_error': Maximum error. missing_value_action : str, optional Action to perform when missing values are encountered. This can be one of: - 'auto': Default to 'impute' - 'impute': Proceed with evaluation by filling in the missing values with the mean of the training data. Missing values are also imputed if an entire column of data is missing during evaluation. - 'error': Do not proceed with evaluation and terminate with an error message. Returns ------- out : dict Results from model evaluation procedure. See Also ---------- create, predict Examples ---------- >>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv') >>> model = turicreate.linear_regression.create(data, target='price', features=['bath', 'bedroom', 'size']) >>> results = model.evaluate(data) """
_raise_error_evaluation_metric_is_valid(metric, ['auto', 'rmse', 'max_error']) return super(LinearRegression, self).evaluate(dataset, missing_value_action=missing_value_action, metric=metric)
<SYSTEM_TASK:> Convert array into a sequence of successive possibly overlapping frames. <END_TASK> <USER_TASK:> Description: def frame(data, window_length, hop_length): """Convert array into a sequence of successive possibly overlapping frames. An n-dimensional array of shape (num_samples, ...) is converted into an (n+1)-D array of shape (num_frames, window_length, ...), where each frame starts hop_length points after the preceding one. This is accomplished using stride_tricks, so the original data is not copied. However, there is no zero-padding, so any incomplete frames at the end are not included. Args: data: np.array of dimension N >= 1. window_length: Number of samples in each frame. hop_length: Advance (in samples) between each window. Returns: (N+1)-D np.array with as many rows as there are complete frames that can be extracted. """
num_samples = data.shape[0] num_frames = 1 + int(np.floor((num_samples - window_length) / hop_length)) shape = (num_frames, window_length) + data.shape[1:] strides = (data.strides[0] * hop_length,) + data.strides return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
<SYSTEM_TASK:> Calculate a "periodic" Hann window. <END_TASK> <USER_TASK:> Description: def periodic_hann(window_length): """Calculate a "periodic" Hann window. The classic Hann window is defined as a raised cosine that starts and ends on zero, and where every value appears twice, except the middle point for an odd-length window. Matlab calls this a "symmetric" window and np.hanning() returns it. However, for Fourier analysis, this actually represents just over one cycle of a period N-1 cosine, and thus is not compactly expressed on a length-N Fourier basis. Instead, it's better to use a raised cosine that ends just before the final zero value - i.e. a complete cycle of a period-N cosine. Matlab calls this a "periodic" window. This routine calculates it. Args: window_length: The number of points in the returned window. Returns: A 1D np.array containing the periodic hann window. """
return 0.5 - (0.5 * np.cos(2 * np.pi / window_length * np.arange(window_length)))
<SYSTEM_TASK:> Calculate the short-time Fourier transform magnitude. <END_TASK> <USER_TASK:> Description: def stft_magnitude(signal, fft_length, hop_length=None, window_length=None): """Calculate the short-time Fourier transform magnitude. Args: signal: 1D np.array of the input time-domain signal. fft_length: Size of the FFT to apply. hop_length: Advance (in samples) between each frame passed to FFT. window_length: Length of each block of samples to pass to FFT. Returns: 2D np.array where each row contains the magnitudes of the fft_length/2+1 unique values of the FFT for the corresponding frame of input samples. """
frames = frame(signal, window_length, hop_length) # Apply frame window to each frame. We use a periodic Hann (cosine of period # window_length) instead of the symmetric Hann of np.hanning (period # window_length-1). window = periodic_hann(window_length) windowed_frames = frames * window return np.abs(np.fft.rfft(windowed_frames, int(fft_length)))
<SYSTEM_TASK:> Return a matrix that can post-multiply spectrogram rows to make mel. <END_TASK> <USER_TASK:> Description: def spectrogram_to_mel_matrix(num_mel_bins=20, num_spectrogram_bins=129, audio_sample_rate=8000, lower_edge_hertz=125.0, upper_edge_hertz=3800.0): """Return a matrix that can post-multiply spectrogram rows to make mel. Returns a np.array matrix A that can be used to post-multiply a matrix S of spectrogram values (STFT magnitudes) arranged as frames x bins to generate a "mel spectrogram" M of frames x num_mel_bins. M = S A. The classic HTK algorithm exploits the complementarity of adjacent mel bands to multiply each FFT bin by only one mel weight, then add it, with positive and negative signs, to the two adjacent mel bands to which that bin contributes. Here, by expressing this operation as a matrix multiply, we go from num_fft multiplies per frame (plus around 2*num_fft adds) to around num_fft^2 multiplies and adds. However, because these are all presumably accomplished in a single call to np.dot(), it's not clear which approach is faster in Python. The matrix multiplication has the attraction of being more general and flexible, and much easier to read. Args: num_mel_bins: How many bands in the resulting mel spectrum. This is the number of columns in the output matrix. num_spectrogram_bins: How many bins there are in the source spectrogram data, which is understood to be fft_size/2 + 1, i.e. the spectrogram only contains the nonredundant FFT bins. audio_sample_rate: Samples per second of the audio at the input to the spectrogram. We need this to figure out the actual frequencies for each spectrogram bin, which dictates how they are mapped into mel. lower_edge_hertz: Lower bound on the frequencies to be included in the mel spectrum. This corresponds to the lower edge of the lowest triangular band. upper_edge_hertz: The desired top edge of the highest frequency band. Returns: An np.array with shape (num_spectrogram_bins, num_mel_bins). Raises: ValueError: if frequency edges are incorrectly ordered or out of range. """
nyquist_hertz = audio_sample_rate / 2. if lower_edge_hertz < 0.0: raise ValueError("lower_edge_hertz %.1f must be >= 0" % lower_edge_hertz) if lower_edge_hertz >= upper_edge_hertz: raise ValueError("lower_edge_hertz %.1f >= upper_edge_hertz %.1f" % (lower_edge_hertz, upper_edge_hertz)) if upper_edge_hertz > nyquist_hertz: raise ValueError("upper_edge_hertz %.1f is greater than Nyquist %.1f" % (upper_edge_hertz, nyquist_hertz)) spectrogram_bins_hertz = np.linspace(0.0, nyquist_hertz, num_spectrogram_bins) spectrogram_bins_mel = hertz_to_mel(spectrogram_bins_hertz) # The i'th mel band (starting from i=1) has center frequency # band_edges_mel[i], lower edge band_edges_mel[i-1], and higher edge # band_edges_mel[i+1]. Thus, we need num_mel_bins + 2 values in # the band_edges_mel arrays. band_edges_mel = np.linspace(hertz_to_mel(lower_edge_hertz), hertz_to_mel(upper_edge_hertz), num_mel_bins + 2) # Matrix to post-multiply feature arrays whose rows are num_spectrogram_bins # of spectrogram values. mel_weights_matrix = np.empty((num_spectrogram_bins, num_mel_bins)) for i in range(num_mel_bins): lower_edge_mel, center_mel, upper_edge_mel = band_edges_mel[i:i + 3] # Calculate lower and upper slopes for every spectrogram bin. # Line segments are linear in the *mel* domain, not hertz. lower_slope = ((spectrogram_bins_mel - lower_edge_mel) / (center_mel - lower_edge_mel)) upper_slope = ((upper_edge_mel - spectrogram_bins_mel) / (upper_edge_mel - center_mel)) # .. then intersect them with each other and zero. mel_weights_matrix[:, i] = np.maximum(0.0, np.minimum(lower_slope, upper_slope)) # HTK excludes the spectrogram DC bin; make sure it always gets a zero # coefficient. mel_weights_matrix[0, :] = 0.0 return mel_weights_matrix
<SYSTEM_TASK:> Convert waveform to a log magnitude mel-frequency spectrogram. <END_TASK> <USER_TASK:> Description: def log_mel_spectrogram(data, audio_sample_rate=8000, log_offset=0.0, window_length_secs=0.025, hop_length_secs=0.010, **kwargs): """Convert waveform to a log magnitude mel-frequency spectrogram. Args: data: 1D np.array of waveform data. audio_sample_rate: The sampling rate of data. log_offset: Add this to values when taking log to avoid -Infs. window_length_secs: Duration of each window to analyze. hop_length_secs: Advance between successive analysis windows. **kwargs: Additional arguments to pass to spectrogram_to_mel_matrix. Returns: 2D np.array of (num_frames, num_mel_bins) consisting of log mel filterbank magnitudes for successive frames. """
window_length_samples = int(round(audio_sample_rate * window_length_secs)) hop_length_samples = int(round(audio_sample_rate * hop_length_secs)) fft_length = 2 ** int(np.ceil(np.log(window_length_samples) / np.log(2.0))) spectrogram = stft_magnitude( data, fft_length=fft_length, hop_length=hop_length_samples, window_length=window_length_samples) mel_spectrogram = np.dot(spectrogram, spectrogram_to_mel_matrix( num_spectrogram_bins=spectrogram.shape[1], audio_sample_rate=audio_sample_rate, **kwargs)) return np.log(mel_spectrogram + log_offset)
<SYSTEM_TASK:> Return a classification, for each ``prediction_window`` examples in the <END_TASK> <USER_TASK:> Description: def classify(self, dataset, output_frequency='per_row'): """ Return a classification, for each ``prediction_window`` examples in the ``dataset``, using the trained activity classification model. The output SFrame contains predictions as both class labels as well as probabilities that the predicted value is the associated label. Parameters ---------- dataset : SFrame Dataset of new observations. Must include columns with the same names as the features and session id used for model training, but does not require a target column. Additional columns are ignored. output_frequency : {'per_row', 'per_window'}, optional The frequency of the predictions which is one of: - 'per_row': Each prediction is returned ``prediction_window`` times. - 'per_window': Return a single prediction for each ``prediction_window`` rows in ``dataset`` per ``session_id``. Returns ------- out : SFrame An SFrame with model predictions i.e class labels and probabilities. See Also ---------- create, evaluate, predict Examples ---------- >>> classes = model.classify(data) """
_tkutl._check_categorical_option_type( 'output_frequency', output_frequency, ['per_window', 'per_row']) id_target_map = self._id_target_map preds = self.predict( dataset, output_type='probability_vector', output_frequency=output_frequency) if output_frequency == 'per_row': return _SFrame({ 'class': preds.apply(lambda p: id_target_map[_np.argmax(p)]), 'probability': preds.apply(_np.max) }) elif output_frequency == 'per_window': preds['class'] = preds['probability_vector'].apply( lambda p: id_target_map[_np.argmax(p)]) preds['probability'] = preds['probability_vector'].apply(_np.max) preds = preds.remove_column('probability_vector') return preds
<SYSTEM_TASK:> Count the occurrances of the different characters in the files <END_TASK> <USER_TASK:> Description: def count_characters(root, out): """Count the occurrances of the different characters in the files"""
if os.path.isfile(root): with open(root, 'rb') as in_f: for line in in_f: for char in line: if char not in out: out[char] = 0 out[char] = out[char] + 1 elif os.path.isdir(root): for filename in os.listdir(root): count_characters(os.path.join(root, filename), out)
<SYSTEM_TASK:> Save a protobuf model specification to file. <END_TASK> <USER_TASK:> Description: def save_spec(spec, filename): """ Save a protobuf model specification to file. Parameters ---------- spec: Model_pb Protobuf representation of the model filename: str File path where the spec gets saved. Examples -------- .. sourcecode:: python >>> coremltools.utils.save_spec(spec, 'HousePricer.mlmodel') See Also -------- load_spec """
name, ext = _os.path.splitext(filename) if not ext: filename = "%s.mlmodel" % filename else: if ext != '.mlmodel': raise Exception("Extension must be .mlmodel (not %s)" % ext) with open(filename, 'wb') as f: s = spec.SerializeToString() f.write(s)
<SYSTEM_TASK:> Load a protobuf model specification from file <END_TASK> <USER_TASK:> Description: def load_spec(filename): """ Load a protobuf model specification from file Parameters ---------- filename: str Location on disk (a valid filepath) from which the file is loaded as a protobuf spec. Returns ------- model_spec: Model_pb Protobuf representation of the model Examples -------- .. sourcecode:: python >>> spec = coremltools.utils.load_spec('HousePricer.mlmodel') See Also -------- save_spec """
from ..proto import Model_pb2 spec = Model_pb2.Model() with open(filename, 'rb') as f: contents = f.read() spec.ParseFromString(contents) return spec
<SYSTEM_TASK:> Returns a list of neural network layers if the model contains any. <END_TASK> <USER_TASK:> Description: def _get_nn_layers(spec): """ Returns a list of neural network layers if the model contains any. Parameters ---------- spec: Model_pb A model protobuf specification. Returns ------- [NN layer] list of all layers (including layers from elements of a pipeline """
layers = [] if spec.WhichOneof('Type') == 'pipeline': layers = [] for model_spec in spec.pipeline.models: if not layers: return _get_nn_layers(model_spec) else: layers.extend(_get_nn_layers(model_spec)) elif spec.WhichOneof('Type') in ['pipelineClassifier', 'pipelineRegressor']: layers = [] for model_spec in spec.pipeline.models: if not layers: return _get_nn_layers(model_spec) else: layers.extend(_get_nn_layers(model_spec)) elif spec.neuralNetwork.layers: layers = spec.neuralNetwork.layers elif spec.neuralNetworkClassifier.layers: layers = spec.neuralNetworkClassifier.layers elif spec.neuralNetworkRegressor.layers: layers = spec.neuralNetworkRegressor.layers return layers
<SYSTEM_TASK:> Evaluate a classifier specification for testing. <END_TASK> <USER_TASK:> Description: def evaluate_classifier_with_probabilities(model, data, probabilities='probabilities', verbose = False): """ Evaluate a classifier specification for testing. Parameters ---------- filename: [str | Model] File from where to load the model from (OR) a loaded version of the MLModel. data: [str | Dataframe] Test data on which to evaluate the models (dataframe, or path to a csv file). probabilities: str Column to interpret as the probabilities column verbose: bool Verbosity levels of the predictions. """
model = _get_model(model) if verbose: print("") print("Other Framework\t\tPredicted") max_probability_error, num_key_mismatch = 0, 0 for _,row in data.iterrows(): predicted_values = model.predict(dict(row))[_to_unicode(probabilities)] other_values = row[probabilities] if set(predicted_values.keys()) != set(other_values.keys()): if verbose: print("Different classes: ", str(predicted_values.keys()), str(other_values.keys())) num_key_mismatch += 1 continue for cur_class, cur_predicted_class_values in predicted_values.items(): delta = cur_predicted_class_values - other_values[cur_class] if verbose: print(delta, cur_predicted_class_values, other_values[cur_class]) max_probability_error = max(abs(delta), max_probability_error) if verbose: print("") ret = { "num_samples": len(data), "max_probability_error": max_probability_error, "num_key_mismatch": num_key_mismatch } if verbose: print("results: %s" % ret) return ret
<SYSTEM_TASK:> Rename a feature in the specification. <END_TASK> <USER_TASK:> Description: def rename_feature(spec, current_name, new_name, rename_inputs=True, rename_outputs=True): """ Rename a feature in the specification. Parameters ---------- spec: Model_pb The specification containing the feature to rename. current_name: str Current name of the feature. If this feature doesn't exist, the rename is a no-op. new_name: str New name of the feature. rename_inputs: bool Search for `current_name` only in the input features (i.e ignore output features) rename_outputs: bool Search for `current_name` only in the output features (i.e ignore input features) Examples -------- .. sourcecode:: python # In-place rename of spec >>> coremltools.utils.rename_feature(spec, 'old_feature', 'new_feature_name') """
from coremltools.models import MLModel if not rename_inputs and not rename_outputs: return changed_input = False changed_output = False if rename_inputs: for input in spec.description.input: if input.name == current_name: input.name = new_name changed_input = True if rename_outputs: for output in spec.description.output: if output.name == current_name: output.name = new_name changed_output = True if spec.description.predictedFeatureName == current_name: spec.description.predictedFeatureName = new_name if spec.description.predictedProbabilitiesName == current_name: spec.description.predictedProbabilitiesName = new_name if not changed_input and not changed_output: return # Rename internally in NN model nn = None for nn_type in ['neuralNetwork','neuralNetworkClassifier','neuralNetworkRegressor']: if spec.HasField(nn_type): nn = getattr(spec,nn_type) if nn is not None: for layer in nn.layers: if rename_inputs: for index,name in enumerate(layer.input): if name == current_name: layer.input[index] = new_name if rename_outputs: for index,name in enumerate(layer.output): if name == current_name: layer.output[index] = new_name # Rename internally for feature vectorizer if spec.HasField('featureVectorizer') and rename_inputs: for input in spec.featureVectorizer.inputList: if input.inputColumn == current_name: input.inputColumn = new_name changed_input = True # Rename for pipeline models pipeline = None if spec.HasField('pipeline'): pipeline = spec.pipeline elif spec.HasField('pipelineClassifier'): pipeline = spec.pipelineClassifier.pipeline elif spec.HasField('pipelineRegressor'): pipeline = spec.pipelineRegressor.pipeline if pipeline is not None: for index,model in enumerate(pipeline.models): rename_feature(model, current_name, new_name, rename_inputs or (index != 0), rename_outputs or (index < len(spec.pipeline.models)))
<SYSTEM_TASK:> Performs cleaning steps on the data so various type comparisons can <END_TASK> <USER_TASK:> Description: def _sanitize_value(x): """ Performs cleaning steps on the data so various type comparisons can be performed correctly. """
if isinstance(x, _six.string_types + _six.integer_types + (float,)): return x elif _HAS_SKLEARN and _sp.issparse(x): return x.todense() elif isinstance(x, _np.ndarray): return x elif isinstance(x, tuple): return (_sanitize_value(v) for v in x) elif isinstance(x, list): return [_sanitize_value(v) for v in x] elif isinstance(x, dict): return dict( (_sanitize_value(k), _sanitize_value(v)) for k, v in x.items()) else: assert False, str(x)
<SYSTEM_TASK:> Performs a robust equality test between elements. <END_TASK> <USER_TASK:> Description: def _element_equal(x, y): """ Performs a robust equality test between elements. """
if isinstance(x, _np.ndarray) or isinstance(y, _np.ndarray): try: return (abs(_np.asarray(x) - _np.asarray(y)) < 1e-5).all() except: return False elif isinstance(x, dict): return (isinstance(y, dict) and _element_equal(x.keys(), y.keys()) and all(_element_equal(x[k], y[k]) for k in x.keys())) elif isinstance(x, float): return abs(x - y) < 1e-5 * (abs(x) + abs(y)) elif isinstance(x, (list, tuple)): return x == y else: return bool(x == y)
<SYSTEM_TASK:> Evaluate a transformer specification for testing. <END_TASK> <USER_TASK:> Description: def evaluate_transformer(model, input_data, reference_output, verbose=False): """ Evaluate a transformer specification for testing. Parameters ---------- spec: [str | MLModel] File from where to load the Model from (OR) a loaded version of MLModel. input_data: list[dict] Test data on which to evaluate the models. reference_output: list[dict] Expected results for the model. verbose: bool Verbosity levels of the predictions. Examples -------- .. sourcecode:: python >>> input_data = [{'input_1': 1, 'input_2': 2}, {'input_1': 3, 'input_2': 3}] >>> expected_output = [{'input_1': 2.5, 'input_2': 2.0}, {'input_1': 1.3, 'input_2': 2.3}] >>> metrics = coremltools.utils.evaluate_transformer(scaler_spec, input_data, expected_output) See Also -------- evaluate_regressor, evaluate_classifier """
model = _get_model(model) if verbose: print(model) print("") print("Other Framework\t\tPredicted") num_errors = 0 for index, row in enumerate(input_data): assert isinstance(row, dict) sanitized_row = _sanitize_value(row) ref_data = _sanitize_value(reference_output[index]) if verbose: print("Input:\n\t", str(row)) print("Correct output:\n\t", str(ref_data)) predicted = _sanitize_value(model.predict(sanitized_row)) assert isinstance(ref_data, dict) assert isinstance(predicted, dict) predicted_trimmed = dict( (k, predicted[k]) for k in ref_data.keys()) if verbose: print("Predicted:\n\t", str(predicted_trimmed)) if not _element_equal(predicted_trimmed, ref_data): num_errors += 1 ret = { "num_samples": len(input_data), "num_errors": num_errors } if verbose: print("results: %s" % ret) return ret
<SYSTEM_TASK:> Compute the in degree, out degree and total degree of each vertex. <END_TASK> <USER_TASK:> Description: def create(graph, verbose=True): """ Compute the in degree, out degree and total degree of each vertex. Parameters ---------- graph : SGraph The graph on which to compute degree counts. verbose : bool, optional If True, print progress updates. Returns ------- out : DegreeCountingModel Examples -------- If given an :class:`~turicreate.SGraph` ``g``, we can create a :class:`~turicreate.degree_counting.DegreeCountingModel` as follows: >>> g = turicreate.load_sgraph('http://snap.stanford.edu/data/web-Google.txt.gz', ... format='snap') >>> m = turicreate.degree_counting.create(g) >>> g2 = m['graph'] >>> g2 SGraph({'num_edges': 5105039, 'num_vertices': 875713}) Vertex Fields:['__id', 'in_degree', 'out_degree', 'total_degree'] Edge Fields:['__src_id', '__dst_id'] >>> g2.vertices.head(5) Columns: __id int in_degree int out_degree int total_degree int <BLANKLINE> Rows: 5 <BLANKLINE> Data: +------+-----------+------------+--------------+ | __id | in_degree | out_degree | total_degree | +------+-----------+------------+--------------+ | 5 | 15 | 7 | 22 | | 7 | 3 | 16 | 19 | | 8 | 1 | 2 | 3 | | 10 | 13 | 11 | 24 | | 27 | 19 | 16 | 35 | +------+-----------+------------+--------------+ See Also -------- DegreeCountingModel """
from turicreate._cython.cy_server import QuietProgress if not isinstance(graph, _SGraph): raise TypeError('"graph" input must be a SGraph object.') with QuietProgress(verbose): params = _tc.extensions._toolkits.graph.degree_count.create( {'graph': graph.__proxy__}) return DegreeCountingModel(params['model'])
<SYSTEM_TASK:> replace the index'th emphasized text with s <END_TASK> <USER_TASK:> Description: def replace_emphasis(self, s, index = 0): """replace the index'th emphasized text with s"""
e = self.emphasized[index] self.body[e[0]:e[1]] = [s] del self.emphasized[index]
<SYSTEM_TASK:> Override of litre._execute; sets up variable context before <END_TASK> <USER_TASK:> Description: def _execute(self, code): """Override of litre._execute; sets up variable context before evaluating code """
self.globals['example'] = self.example eval(code, self.globals)
<SYSTEM_TASK:> Compile examples on the stack, whose topmost item is the last example <END_TASK> <USER_TASK:> Description: def compile( self , howmany = 1 , pop = -1 , expect_error = False , extension = '.o' , options = ['-c'] , built_handler = lambda built_file: None , source_file = None , source_suffix = '.cpp' # C-style comments by default; handles C++ and YACC , make_comment = lambda text: '/*\n%s\n*/' % text , built_file = None , command = None ): """ Compile examples on the stack, whose topmost item is the last example seen but not yet handled so far. :howmany: How many of the topmost examples on the stack to compile. You can pass a number, or 'all' to indicate that all examples should be compiled. :pop: How many of the topmost examples to discard. By default, all of the examples that are compiled are discarded. :expect_error: Whether a compilation error is to be expected. Any value > 1 will cause the expected diagnostic's text to be dumped for diagnostic purposes. It's common to expect an error but see a completely unrelated one because of bugs in the example (you can get this behavior for all examples by setting show_expected_error_output in your config). :extension: The extension of the file to build (set to .exe for run) :options: Compiler flags :built_file: A path to use for the built file. By default, a temp filename is conjured up :built_handler: A function that's called with the name of the built file upon success. :source_file: The full name of the source file to write :source_suffix: If source_file is None, the suffix to use for the source file :make_comment: A function that transforms text into an appropriate comment. :command: A function that is passed (includes, opts, target, source), where opts is a string representing compiler options, target is the name of the file to build, and source is the name of the file into which the example code is written. By default, the function formats litre.config.compiler with its argument tuple. """
# Grab one example by default if howmany == 'all': howmany = len(self.stack) source = '\n'.join( self.prefix + [str(x) for x in self.stack[-howmany:]] ) source = reduce(lambda s, f: f(s), self.preprocessors, source) if pop: if pop < 0: pop = howmany del self.stack[-pop:] if len(self.stack): self.example = self.stack[-1] cpp = self._source_file_path(source_file, source_suffix) if built_file is None: built_file = self._output_file_path(source_file, extension) opts = ' '.join(options) includes = ' '.join(['-I%s' % d for d in self.includes]) if not command: command = self.config.compiler if type(command) == str: command = lambda i, o, t, s, c = command: c % (i, o, t, s) cmd = command(includes, opts, expand_vars(built_file), expand_vars(cpp)) if expect_error and self.config.show_expected_error_output: expect_error += 1 comment_cmd = command(includes, opts, built_file, os.path.basename(cpp)) comment = make_comment(config.comment_text(comment_cmd, expect_error)) self._write_source(cpp, '\n'.join([comment, source])) #print 'wrote in', cpp #print 'trying command', cmd status, output = syscmd(cmd, expect_error) if status or expect_error > 1: print if expect_error and expect_error < 2: print 'Compilation failure expected, but none seen' print '------------ begin offending source ------------' print open(cpp).read() print '------------ end offending source ------------' if self.config.save_cpp: print 'saved in', repr(cpp) else: self._remove_source(cpp) sys.stdout.flush() else: print '.', sys.stdout.flush() built_handler(built_file) self._remove_source(cpp) try: self._unlink(built_file) except: if not expect_error: print 'failed to unlink', built_file return status
<SYSTEM_TASK:> Loads jamfile at the given location. After loading, project global <END_TASK> <USER_TASK:> Description: def load (self, jamfile_location): """Loads jamfile at the given location. After loading, project global file and jamfile needed by the loaded one will be loaded recursively. If the jamfile at that location is loaded already, does nothing. Returns the project module for the Jamfile."""
assert isinstance(jamfile_location, basestring) absolute = os.path.join(os.getcwd(), jamfile_location) absolute = os.path.normpath(absolute) jamfile_location = b2.util.path.relpath(os.getcwd(), absolute) mname = self.module_name(jamfile_location) # If Jamfile is already loaded, do not try again. if not mname in self.jamfile_modules: if "--debug-loading" in self.manager.argv(): print "Loading Jamfile at '%s'" % jamfile_location self.load_jamfile(jamfile_location, mname) # We want to make sure that child project are loaded only # after parent projects. In particular, because parent projects # define attributes which are inherited by children, and we do not # want children to be loaded before parents has defined everything. # # While "build-project" and "use-project" can potentially refer # to child projects from parent projects, we do not immediately # load child projects when seing those attributes. Instead, # we record the minimal information that will be used only later. self.load_used_projects(mname) return mname
<SYSTEM_TASK:> Loads parent of Jamfile at 'location'. <END_TASK> <USER_TASK:> Description: def load_parent(self, location): """Loads parent of Jamfile at 'location'. Issues an error if nothing is found."""
assert isinstance(location, basestring) found = b2.util.path.glob_in_parents( location, self.JAMROOT + self.JAMFILE) if not found: print "error: Could not find parent for project at '%s'" % location print "error: Did not find Jamfile.jam or Jamroot.jam in any parent directory." sys.exit(1) return self.load(os.path.dirname(found[0]))
<SYSTEM_TASK:> Given 'name' which can be project-id or plain directory name, <END_TASK> <USER_TASK:> Description: def find(self, name, current_location): """Given 'name' which can be project-id or plain directory name, return project module corresponding to that id or directory. Returns nothing of project is not found."""
assert isinstance(name, basestring) assert isinstance(current_location, basestring) project_module = None # Try interpreting name as project id. if name[0] == '/': project_module = self.id2module.get(name) if not project_module: location = os.path.join(current_location, name) # If no project is registered for the given location, try to # load it. First see if we have Jamfile. If not we might have project # root, willing to act as Jamfile. In that case, project-root # must be placed in the directory referred by id. project_module = self.module_name(location) if not project_module in self.jamfile_modules: if b2.util.path.glob([location], self.JAMROOT + self.JAMFILE): project_module = self.load(location) else: project_module = None return project_module
<SYSTEM_TASK:> Returns the name of module corresponding to 'jamfile-location'. <END_TASK> <USER_TASK:> Description: def module_name(self, jamfile_location): """Returns the name of module corresponding to 'jamfile-location'. If no module corresponds to location yet, associates default module name with that location."""
assert isinstance(jamfile_location, basestring) module = self.location2module.get(jamfile_location) if not module: # Root the path, so that locations are always umbiguious. # Without this, we can't decide if '../../exe/program1' and '.' # are the same paths, or not. jamfile_location = os.path.realpath( os.path.join(os.getcwd(), jamfile_location)) module = "Jamfile<%s>" % jamfile_location self.location2module[jamfile_location] = module return module
<SYSTEM_TASK:> Loads 'file' as standalone project that has no location <END_TASK> <USER_TASK:> Description: def load_standalone(self, jamfile_module, file): """Loads 'file' as standalone project that has no location associated with it. This is mostly useful for user-config.jam, which should be able to define targets, but although it has some location in filesystem, we do not want any build to happen in user's HOME, for example. The caller is required to never call this method twice on the same file. """
assert isinstance(jamfile_module, basestring) assert isinstance(file, basestring) self.used_projects[jamfile_module] = [] bjam.call("load", jamfile_module, file) self.load_used_projects(jamfile_module)
<SYSTEM_TASK:> Initialize the module for a project. <END_TASK> <USER_TASK:> Description: def initialize(self, module_name, location=None, basename=None, standalone_path=''): """Initialize the module for a project. module-name is the name of the project module. location is the location (directory) of the project to initialize. If not specified, standalone project will be initialized standalone_path is the path to the source-location. this should only be called from the python side. """
assert isinstance(module_name, basestring) assert isinstance(location, basestring) or location is None assert isinstance(basename, basestring) or basename is None jamroot = False parent_module = None if module_name == "test-config": # No parent pass elif module_name == "site-config": parent_module = "test-config" elif module_name == "user-config": parent_module = "site-config" elif module_name == "project-config": parent_module = "user-config" elif location and not self.is_jamroot(basename): # We search for parent/project-root only if jamfile was specified # --- i.e # if the project is not standalone. parent_module = self.load_parent(location) elif location: # It's either jamroot, or standalone project. # If it's jamroot, inherit from user-config. # If project-config module exist, inherit from it. parent_module = 'user-config' if 'project-config' in self.module2attributes: parent_module = 'project-config' jamroot = True # TODO: need to consider if standalone projects can do anything but defining # prebuilt targets. If so, we need to give more sensible "location", so that # source paths are correct. if not location: location = "" # the call to load_parent() above can end up loading this module again # make sure we don't reinitialize the module's attributes if module_name not in self.module2attributes: if "--debug-loading" in self.manager.argv(): print "Initializing project '%s'" % module_name attributes = ProjectAttributes(self.manager, location, module_name) self.module2attributes[module_name] = attributes python_standalone = False if location: attributes.set("source-location", [location], exact=1) elif not module_name in ["test-config", "site-config", "user-config", "project-config"]: # This is a standalone project with known location. Set source location # so that it can declare targets. This is intended so that you can put # a .jam file in your sources and use it via 'using'. Standard modules # (in 'tools' subdir) may not assume source dir is set. source_location = standalone_path if not source_location: source_location = self.loaded_tool_module_path_.get(module_name) if not source_location: self.manager.errors()('Standalone module path not found for "{}"' .format(module_name)) attributes.set("source-location", [source_location], exact=1) python_standalone = True attributes.set("requirements", property_set.empty(), exact=True) attributes.set("usage-requirements", property_set.empty(), exact=True) attributes.set("default-build", property_set.empty(), exact=True) attributes.set("projects-to-build", [], exact=True) attributes.set("project-root", None, exact=True) attributes.set("build-dir", None, exact=True) self.project_rules_.init_project(module_name, python_standalone) if parent_module: self.inherit_attributes(module_name, parent_module) attributes.set("parent-module", parent_module, exact=1) if jamroot: attributes.set("project-root", location, exact=1) parent = None if parent_module: parent = self.target(parent_module) if module_name not in self.module2target: target = b2.build.targets.ProjectTarget(self.manager, module_name, module_name, parent, self.attribute(module_name, "requirements"), # FIXME: why we need to pass this? It's not # passed in jam code. self.attribute(module_name, "default-build")) self.module2target[module_name] = target self.current_project = self.target(module_name)
<SYSTEM_TASK:> Make 'project-module' inherit attributes of project <END_TASK> <USER_TASK:> Description: def inherit_attributes(self, project_module, parent_module): """Make 'project-module' inherit attributes of project root and parent module."""
assert isinstance(project_module, basestring) assert isinstance(parent_module, basestring) attributes = self.module2attributes[project_module] pattributes = self.module2attributes[parent_module] # Parent module might be locationless user-config. # FIXME: #if [ modules.binding $(parent-module) ] #{ # $(attributes).set parent : [ path.parent # [ path.make [ modules.binding $(parent-module) ] ] ] ; # } attributes.set("project-root", pattributes.get("project-root"), exact=True) attributes.set("default-build", pattributes.get("default-build"), exact=True) attributes.set("requirements", pattributes.get("requirements"), exact=True) attributes.set("usage-requirements", pattributes.get("usage-requirements"), exact=1) parent_build_dir = pattributes.get("build-dir") if parent_build_dir: # Have to compute relative path from parent dir to our dir # Convert both paths to absolute, since we cannot # find relative path from ".." to "." location = attributes.get("location") parent_location = pattributes.get("location") our_dir = os.path.join(os.getcwd(), location) parent_dir = os.path.join(os.getcwd(), parent_location) build_dir = os.path.join(parent_build_dir, os.path.relpath(our_dir, parent_dir)) attributes.set("build-dir", build_dir, exact=True)
<SYSTEM_TASK:> Associate the given id with the given project module. <END_TASK> <USER_TASK:> Description: def register_id(self, id, module): """Associate the given id with the given project module."""
assert isinstance(id, basestring) assert isinstance(module, basestring) self.id2module[id] = module
<SYSTEM_TASK:> Temporary changes the current project to 'project'. Should <END_TASK> <USER_TASK:> Description: def push_current(self, project): """Temporary changes the current project to 'project'. Should be followed by 'pop-current'."""
if __debug__: from .targets import ProjectTarget assert isinstance(project, ProjectTarget) self.saved_current_project.append(self.current_project) self.current_project = project
<SYSTEM_TASK:> Returns the project target corresponding to the 'project-module'. <END_TASK> <USER_TASK:> Description: def target(self, project_module): """Returns the project target corresponding to the 'project-module'."""
assert isinstance(project_module, basestring) if project_module not in self.module2target: self.module2target[project_module] = \ b2.build.targets.ProjectTarget(project_module, project_module, self.attribute(project_module, "requirements")) return self.module2target[project_module]
<SYSTEM_TASK:> Makes rule 'name' available to all subsequently loaded Jamfiles. <END_TASK> <USER_TASK:> Description: def add_rule(self, name, callable_): """Makes rule 'name' available to all subsequently loaded Jamfiles. Calling that rule wil relay to 'callable'."""
assert isinstance(name, basestring) assert callable(callable_) self.project_rules_.add_rule(name, callable_)
<SYSTEM_TASK:> Load a Python module that should be useable from Jamfiles. <END_TASK> <USER_TASK:> Description: def load_module(self, name, extra_path=None): """Load a Python module that should be useable from Jamfiles. There are generally two types of modules Jamfiles might want to use: - Core Boost.Build. Those are imported using plain names, e.g. 'toolset', so this function checks if we have module named b2.package.module already. - Python modules in the same directory as Jamfile. We don't want to even temporary add Jamfile's directory to sys.path, since then we might get naming conflicts between standard Python modules and those. """
assert isinstance(name, basestring) assert is_iterable_typed(extra_path, basestring) or extra_path is None # See if we loaded module of this name already existing = self.loaded_tool_modules_.get(name) if existing: return existing # check the extra path as well as any paths outside # of the b2 package and import the module if it exists b2_path = os.path.normpath(b2.__path__[0]) # normalize the pathing in the BOOST_BUILD_PATH. # this allows for using startswith() to determine # if a path is a subdirectory of the b2 root_path paths = [os.path.normpath(p) for p in self.manager.boost_build_path()] # remove all paths that start with b2's root_path paths = [p for p in paths if not p.startswith(b2_path)] # add any extra paths paths.extend(extra_path) try: # find_module is used so that the pyc's can be used. # an ImportError is raised if not found f, location, description = imp.find_module(name, paths) except ImportError: # if the module is not found in the b2 package, # this error will be handled later pass else: # we've found the module, now let's try loading it. # it's possible that the module itself contains an ImportError # which is why we're loading it in this else clause so that the # proper error message is shown to the end user. # TODO: does this module name really need to be mangled like this? mname = name + "__for_jamfile" self.loaded_tool_module_path_[mname] = location module = imp.load_module(mname, f, location, description) self.loaded_tool_modules_[name] = module return module # the cache is created here due to possibly importing packages # that end up calling get_manager() which might fail if not self.__python_module_cache: self.__build_python_module_cache() underscore_name = name.replace('-', '_') # check to see if the module is within the b2 package # and already loaded mname = self.__python_module_cache.get(underscore_name) if mname in sys.modules: return sys.modules[mname] # otherwise, if the module name is within the cache, # the module exists within the BOOST_BUILD_PATH, # load it. elif mname: # in some cases, self.loaded_tool_module_path_ needs to # have the path to the file during the import # (project.initialize() for example), # so the path needs to be set *before* importing the module. path = os.path.join(b2.__path__[0], *mname.split('.')[1:]) self.loaded_tool_module_path_[mname] = path # mname is guaranteed to be importable since it was # found within the cache __import__(mname) module = sys.modules[mname] self.loaded_tool_modules_[name] = module return module self.manager.errors()("Cannot find module '%s'" % name)
<SYSTEM_TASK:> Prints the project attributes. <END_TASK> <USER_TASK:> Description: def dump(self): """Prints the project attributes."""
id = self.get("id") if not id: id = "(none)" else: id = id[0] parent = self.get("parent") if not parent: parent = "(none)" else: parent = parent[0] print "'%s'" % id print "Parent project:%s", parent print "Requirements:%s", self.get("requirements") print "Default build:%s", string.join(self.get("debuild-build")) print "Source location:%s", string.join(self.get("source-location")) print "Projects to build:%s", string.join(self.get("projects-to-build").sort());
<SYSTEM_TASK:> Given a free-standing function 'callable', return a new <END_TASK> <USER_TASK:> Description: def make_wrapper(self, callable_): """Given a free-standing function 'callable', return a new callable that will call 'callable' and report all exceptins, using 'call_and_report_errors'."""
assert callable(callable_) def wrapper(*args, **kw): return self.call_and_report_errors(callable_, *args, **kw) return wrapper
<SYSTEM_TASK:> Declare and set a project global constant. <END_TASK> <USER_TASK:> Description: def constant(self, name, value): """Declare and set a project global constant. Project global constants are normal variables but should not be changed. They are applied to every child Jamfile."""
assert is_iterable_typed(name, basestring) assert is_iterable_typed(value, basestring) self.registry.current().add_constant(name[0], value)
<SYSTEM_TASK:> Declare and set a project global constant, whose value is a path. The <END_TASK> <USER_TASK:> Description: def path_constant(self, name, value): """Declare and set a project global constant, whose value is a path. The path is adjusted to be relative to the invocation directory. The given value path is taken to be either absolute, or relative to this project root."""
assert is_iterable_typed(name, basestring) assert is_iterable_typed(value, basestring) if len(value) > 1: self.registry.manager.errors()("path constant should have one element") self.registry.current().add_constant(name[0], value, path=1)
<SYSTEM_TASK:> Creates a feature extractor from an input array feature, return <END_TASK> <USER_TASK:> Description: def create_array_feature_extractor(input_features, output_name, extract_indices, output_type = None): """ Creates a feature extractor from an input array feature, return input_features is a list of one (name, array) tuple. extract_indices is either an integer or a list. If it's an integer, the output type is by default a double (but may also be an integer). If a list, the output type is an array. """
# Make sure that our starting stuff is in the proper form. assert len(input_features) == 1 assert isinstance(input_features[0][1], datatypes.Array) # Create the model. spec = _Model_pb2.Model() spec.specificationVersion = SPECIFICATION_VERSION if isinstance(extract_indices, _integer_types): extract_indices = [extract_indices] if output_type is None: output_type = datatypes.Double() elif isinstance(extract_indices, (list, tuple)): if not all(isinstance(x, _integer_types) for x in extract_indices): raise TypeError("extract_indices must be an integer or a list of integers.") if output_type is None: output_type = datatypes.Array(len(extract_indices)) else: raise TypeError("extract_indices must be an integer or a list of integers.") output_features = [(output_name, output_type)] for idx in extract_indices: assert idx < input_features[0][1].num_elements spec.arrayFeatureExtractor.extractIndex.append(idx) set_transform_interface_params(spec, input_features, output_features) return spec
<SYSTEM_TASK:> Takes an SVM classifier produces a starting spec using the parts. that are <END_TASK> <USER_TASK:> Description: def _generate_base_svm_classifier_spec(model): """ Takes an SVM classifier produces a starting spec using the parts. that are shared between all SVMs. """
if not(_HAS_SKLEARN): raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.') check_fitted(model, lambda m: hasattr(m, 'support_vectors_')) spec = _Model_pb2.Model() spec.specificationVersion = SPECIFICATION_VERSION svm = spec.supportVectorClassifier _set_kernel(model, svm) for cur_rho in model.intercept_: if(len(model.classes_) == 2): # For some reason Scikit Learn doesn't negate for binary classification svm.rho.append(cur_rho) else: svm.rho.append(-cur_rho) for i in range(len(model._dual_coef_)): svm.coefficients.add() for cur_alpha in model._dual_coef_[i]: svm.coefficients[i].alpha.append(cur_alpha) for cur_src_vector in model.support_vectors_: cur_dest_vector = svm.denseSupportVectors.vectors.add() for i in cur_src_vector: cur_dest_vector.values.append(i) return spec
<SYSTEM_TASK:> Insert the new_layer after layer, whose position is layer_idx. The new layer's <END_TASK> <USER_TASK:> Description: def _insert_layer_after(self, layer_idx, new_layer, new_keras_layer): """ Insert the new_layer after layer, whose position is layer_idx. The new layer's parameter is stored in a Keras layer called new_keras_layer """
# reminder: new_keras_layer is not part of the original Keras network, # so it's input / output blob information is missing. It serves only as # a parameter holder. layer = self.layer_list[layer_idx] self.layer_list.insert(layer_idx+1, new_layer) self.keras_layer_map[new_layer] = new_keras_layer successors = self.get_successors(layer) # add edge layer -> new_layer self._add_edge(layer, new_layer) # add edges new_layer -> layer_successor, remove layer -> successor for succ in successors: self._add_edge(new_layer, succ) self._remove_edge(layer, succ)
<SYSTEM_TASK:> Insert the new_layer before layer, whose position is layer_idx. The new layer's <END_TASK> <USER_TASK:> Description: def _insert_layer_between(self, src, snk, new_layer, new_keras_layer): """ Insert the new_layer before layer, whose position is layer_idx. The new layer's parameter is stored in a Keras layer called new_keras_layer """
if snk is None: insert_pos = self.layer_list.index(src) + 1 else: insert_pos = self.layer_list.index(snk) # insert position self.layer_list.insert(insert_pos, new_layer) self.keras_layer_map[new_layer] = new_keras_layer if src is None: # snk is an input layer self._add_edge(new_layer, snk) elif snk is None: # src is an output layer self._add_edge(src, new_layer) else: self._add_edge(src, new_layer) self._add_edge(new_layer, snk) self._remove_edge(src, snk)
<SYSTEM_TASK:> Insert permutation layers before a 1D start point or after 1D end point <END_TASK> <USER_TASK:> Description: def insert_1d_permute_layers(self): """ Insert permutation layers before a 1D start point or after 1D end point """
idx, nb_layers = 0, len(self.layer_list) in_edges, out_edges = self._get_1d_interface_edges() # Hacky Warning: (1) use a 4-D permute, which is not likely to happen in Keras, # to represent actual permutation needed for (seq, c, h, w) in CoreML # (2) Assume 2-D input shape has meaning (seq, c), and during CoreML runtime, # it is represented as 4D blob, (seq, c, h, w) for in_edge in in_edges: src, snk = in_edge if src is None: permute_layer = '_permute_' + snk else: permute_layer = src + '_permute_' + snk keras_permute = _keras.layers.Permute(dims=(3,1,2,0)) # assume w = 1, switch seq and w self._insert_layer_between(src, snk, permute_layer, keras_permute) for out_edge in out_edges: src, snk = out_edge if snk is None: permute_layer = src + '_permute_' else: permute_layer = src + '_permute_' + snk keras_permute = _keras.layers.Permute(dims=(3,1,2,0)) # assume w = 1, switch seq and w back self._insert_layer_between(src, snk, permute_layer, keras_permute)
<SYSTEM_TASK:> Report something about component configuration that the user should better know. <END_TASK> <USER_TASK:> Description: def log_component_configuration(component, message): """Report something about component configuration that the user should better know."""
assert isinstance(component, basestring) assert isinstance(message, basestring) __component_logs.setdefault(component, []).append(message)
<SYSTEM_TASK:> Create a Transformer object to transform data for feature engineering. <END_TASK> <USER_TASK:> Description: def create(dataset, transformers): """ Create a Transformer object to transform data for feature engineering. Parameters ---------- dataset : SFrame The dataset to use for training the model. transformers: Transformer | list[Transformer] An Transformer or a list of Transformers. See Also -------- turicreate.toolkits.feature_engineering._feature_engineering._TransformerBase Examples -------- .. sourcecode:: python # Create data. >>> sf = turicreate.SFrame({'a': [1,2,3], 'b' : [2,3,4]}) >>> from turicreate.feature_engineering import FeatureHasher, \ QuadraticFeatures, OneHotEncoder # Create a single transformer. >>> encoder = turicreate.feature_engineering.create(sf, OneHotEncoder(max_categories = 10)) # Create a chain of transformers. >>> chain = turicreate.feature_engineering.create(sf, [ QuadraticFeatures(), FeatureHasher() ]) # Create a chain of transformers with names for each of the steps. >>> chain = turicreate.feature_engineering.create(sf, [ ('quadratic', QuadraticFeatures()), ('hasher', FeatureHasher()) ]) """
err_msg = "The parameters 'transformers' must be a valid Transformer object." cls = transformers.__class__ _raise_error_if_not_sframe(dataset, "dataset") # List of transformers. if (cls == list): transformers = TransformerChain(transformers) # Transformer. else: if not issubclass(cls, TransformerBase): raise TypeError(err_msg) # Fit and return transformers.fit(dataset) return transformers
<SYSTEM_TASK:> Makes a token to refer to a Python value inside Jam language code. <END_TASK> <USER_TASK:> Description: def value_to_jam(value, methods=False): """Makes a token to refer to a Python value inside Jam language code. The token is merely a string that can be passed around in Jam code and eventually passed back. For example, we might want to pass PropertySet instance to a tag function and it might eventually call back to virtual_target.add_suffix_and_prefix, passing the same instance. For values that are classes, we'll also make class methods callable from Jam. Note that this is necessary to make a bit more of existing Jamfiles work. This trick should not be used to much, or else the performance benefits of Python port will be eaten. """
global __value_id r = __python_to_jam.get(value, None) if r: return r exported_name = '###_' + str(__value_id) __value_id = __value_id + 1 __python_to_jam[value] = exported_name __jam_to_python[exported_name] = value if methods and type(value) == types.InstanceType: for field_name in dir(value): field = getattr(value, field_name) if callable(field) and not field_name.startswith("__"): bjam.import_rule("", exported_name + "." + field_name, field) return exported_name
<SYSTEM_TASK:> Abbreviates each part of string that is delimited by a '-'. <END_TASK> <USER_TASK:> Description: def abbreviate_dashed(s): """Abbreviates each part of string that is delimited by a '-'."""
r = [] for part in s.split('-'): r.append(abbreviate(part)) return '-'.join(r)
<SYSTEM_TASK:> Apply a set of standard transformations to string to produce an <END_TASK> <USER_TASK:> Description: def abbreviate(s): """Apply a set of standard transformations to string to produce an abbreviation no more than 4 characters long. """
if not s: return '' # check the cache if s in abbreviate.abbreviations: return abbreviate.abbreviations[s] # anything less than 4 characters doesn't need # an abbreviation if len(s) < 4: # update cache abbreviate.abbreviations[s] = s return s # save the first character in case it's a vowel s1 = s[0] s2 = s[1:] if s.endswith('ing'): # strip off the 'ing' s2 = s2[:-3] # reduce all doubled characters to one s2 = ''.join(c for c, _ in groupby(s2)) # remove all vowels s2 = s2.translate(None, "AEIOUaeiou") # shorten remaining consonants to 4 characters # and add the first char back to the front s2 = s1 + s2[:4] # update cache abbreviate.abbreviations[s] = s2 return s2
<SYSTEM_TASK:> Get the decision from this node to a child node. <END_TASK> <USER_TASK:> Description: def get_decision(self, child, is_missing = False): """ Get the decision from this node to a child node. Parameters ---------- child: Node A child node of this node. Returns ------- dict: A dictionary that describes how to get from this node to the child node. """
# Child does exist and there is a path to the child. value = self.value feature = self.split_feature_column index = self.split_feature_index if not is_missing: if self.left_id == child.node_id: if self.node_type in ["float", "integer"]: sign = "<" else: sign = "=" else: if self.node_type in ["float", "integer"]: sign = ">=" else: sign = "!=" else: sign = "missing" value = None return { "node_id" : self.node_id, "node_type" : self.node_type, "feature" : feature, "index" : index, "sign" : sign, "value" : value, "child_id" : child.node_id, "is_missing" : is_missing }
<SYSTEM_TASK:> Return the node as a dictionary. <END_TASK> <USER_TASK:> Description: def to_dict(self): """ Return the node as a dictionary. Returns ------- dict: All the attributes of this node as a dictionary (minus the left and right). """
out = {} for key in self.__dict__.keys(): if key not in ['left', 'right', 'missing', 'parent']: out[key] = self.__dict__[key] return out
<SYSTEM_TASK:> Recursive function to dump this tree as a json blob. <END_TASK> <USER_TASK:> Description: def to_json(self, root_id = 0, output = {}): """ Recursive function to dump this tree as a json blob. Parameters ---------- root_id: Root id of the sub-tree output: Carry over output from the previous sub-trees. Returns ------- dict: A tree in JSON format. Starts at the root node and recursively represents each node in JSON. - node_id : ID of the node. - left_id : ID of left child (None if it doesn't exist). - right_id : ID of right child (None if it doesn't exist). - split_feature_column : Feature column on which a decision is made. - split_feature_index : Feature index (within that column) on which the decision is made. - is_leaf : Is this node a leaf node? - node_type : Node type (categorical, numerical, leaf etc.) - value : Prediction (if leaf), decision split point (if not leaf). - left : JSON representation of the left node. - right : JSON representation of the right node. Examples -------- .. sourcecode:: python >>> tree.to_json() # Leaf node {'is_leaf': False, 'left': {'is_leaf': True, 'left_id': None, 'node_id': 115, 'node_type': u'leaf', 'parent_id': 60, 'right_id': None, 'split_feature_column': None, 'split_feature_index': None, 'value': 0.436364}, 'left_id': 115, 'node_id': 60, 'node_type': u'float', 'parent_id': 29, 'right': {'is_leaf': True, 'left_id': None, 'node_id': 116, 'node_type': u'leaf', 'parent_id': 60, 'right_id': None, 'split_feature_column': None, 'split_feature_index': None, 'value': -0.105882}, 'right_id': 116, 'split_feature_column': 'Quantity_features_14', 'split_feature_index': 'count_sum', 'value': 22.5} """
_raise_error_if_not_of_type(root_id, [int,long], "root_id") _numeric_param_check_range("root_id", root_id, 0, self.num_nodes - 1) node = self.nodes[root_id] output = node.to_dict() if node.left_id is not None: j = node.left_id output['left'] = self.to_json(j, output) if node.right_id is not None: j = node.right_id output['right'] = self.to_json(j, output) return output
<SYSTEM_TASK:> Return the prediction path from this node to the parent node. <END_TASK> <USER_TASK:> Description: def get_prediction_path(self, node_id, missing_id = []): """ Return the prediction path from this node to the parent node. Parameters ---------- node_id : id of the node to get the prediction path. missing_id : Additional info that contains nodes with missing features. Returns ------- list: The list of decisions (top to bottom) from the root to this node. Examples -------- .. sourcecode:: python >>> tree.get_prediction_score(5) # Any node [{'child_id': 2, 'feature': 'Quantity_features_90', 'index': 'sum_timegaplast_gap', 'node_id': 0, 'sign': '>', 'value': 53.5}, {'child_id': 5, 'feature': 'Quantity_features_90', 'index': 'sum_sum', 'node_id': 2, 'sign': '<=', 'value': 146.5}] """
_raise_error_if_not_of_type(node_id, [int,long], "node_id") _numeric_param_check_range("node_id", node_id, 0, self.num_nodes - 1) def _deduplicate_path(path): s_nodes = {} # super_nodes s_path = [] # paths of super nodes. for node in path: feature = node['feature'] index = node['index'] if (feature, index) not in s_nodes: s_nodes[feature, index] = node s_path.append(node) else: s_node = s_nodes[feature, index] s_sign = s_node['sign'] sign = node['sign'] value = node['value'] # Supernode has no range. if s_sign == "<": if sign == ">=": s_node["value"] = [value, s_node["value"]] s_node["sign"] = "in" elif sign == "<": s_node["value"] = value elif s_sign == ">=": if sign == ">=": s_node["value"] = value elif sign == "<": s_node["value"] = [s_node["value"], value] s_node["sign"] = "in" # Supernode has a range. elif s_sign == "in": if sign == ">=": s_node["value"][0] = value elif sign == "<": s_node["value"][1] = value # Return super node path. return s_path path = [] node = self.nodes[node_id] while node.parent is not None: parent = node.parent is_missing = node.node_id in missing_id path.insert(0, parent.get_decision(node, is_missing)) node = node.parent return _deduplicate_path(path)
<SYSTEM_TASK:> Given a weighted graph with observed class labels of a subset of vertices, <END_TASK> <USER_TASK:> Description: def create(graph, label_field, threshold=1e-3, weight_field='', self_weight=1.0, undirected=False, max_iterations=None, _single_precision=False, _distributed='auto', verbose=True): """ Given a weighted graph with observed class labels of a subset of vertices, infer the label probability for the unobserved vertices using the "label propagation" algorithm. The algorithm iteratively updates the label probability of current vertex as a weighted sum of label probability of self and the neighboring vertices until converge. See :class:`turicreate.label_propagation.LabelPropagationModel` for the details of the algorithm. Notes: label propagation works well with small number of labels, i.e. binary labels, or less than 1000 classes. The toolkit will throw error if the number of classes exceeds the maximum value (1000). Parameters ---------- graph : SGraph The graph on which to compute the label propagation. label_field: str Vertex field storing the initial vertex labels. The values in must be [0, num_classes). None values indicate unobserved vertex labels. threshold : float, optional Threshold for convergence, measured in the average L2 norm (the sum of squared values) of the delta of each vertex's label probability vector. max_iterations: int, optional The max number of iterations to run. Default is unlimited. If set, the algorithm terminates when either max_iterations or convergence threshold is reached. weight_field: str, optional Vertex field for edge weight. If empty, all edges are assumed to have unit weight. self_weight: float, optional The weight for self edge. undirected: bool, optional If true, treat each edge as undirected, and propagates label in both directions. _single_precision : bool, optional If true, running label propagation in single precision. The resulting probability values may less accurate, but should run faster and use less memory. _distributed : distributed environment, internal verbose : bool, optional If True, print progress updates. Returns ------- out : LabelPropagationModel References ---------- - Zhu, X., & Ghahramani, Z. (2002). `Learning from labeled and unlabeled data with label propagation <http://www.cs.cmu.edu/~zhuxj/pub/CMU-CALD-02-107.pdf>`_. Examples -------- If given an :class:`~turicreate.SGraph` ``g``, we can create a :class:`~turicreate.label_propagation.LabelPropagationModel` as follows: >>> g = turicreate.load_sgraph('http://snap.stanford.edu/data/email-Enron.txt.gz', ... format='snap') # Initialize random classes for a subset of vertices # Leave the unobserved vertices with None label. >>> import random >>> def init_label(vid): ... x = random.random() ... if x < 0.2: ... return 0 ... elif x > 0.9: ... return 1 ... else: ... return None >>> g.vertices['label'] = g.vertices['__id'].apply(init_label, int) >>> m = turicreate.label_propagation.create(g, label_field='label') We can obtain for each vertex the predicted label and the probability of each label in the graph ``g`` using: >>> labels = m['labels'] # SFrame >>> labels +------+-------+-----------------+-------------------+----------------+ | __id | label | predicted_label | P0 | P1 | +------+-------+-----------------+-------------------+----------------+ | 5 | 1 | 1 | 0.0 | 1.0 | | 7 | None | 0 | 0.8213214997 | 0.1786785003 | | 8 | None | 1 | 5.96046447754e-08 | 0.999999940395 | | 10 | None | 0 | 0.534984718273 | 0.465015281727 | | 27 | None | 0 | 0.752801638549 | 0.247198361451 | | 29 | None | 1 | 5.96046447754e-08 | 0.999999940395 | | 33 | None | 1 | 5.96046447754e-08 | 0.999999940395 | | 47 | 0 | 0 | 1.0 | 0.0 | | 50 | None | 0 | 0.788279032657 | 0.211720967343 | | 52 | None | 0 | 0.666666666667 | 0.333333333333 | +------+-------+-----------------+-------------------+----------------+ [36692 rows x 5 columns] See Also -------- LabelPropagationModel """
from turicreate._cython.cy_server import QuietProgress _raise_error_if_not_of_type(label_field, str) _raise_error_if_not_of_type(weight_field, str) if not isinstance(graph, _SGraph): raise TypeError('graph input must be a SGraph object.') if graph.vertices[label_field].dtype != int: raise TypeError('label_field %s must be integer typed.' % label_field) opts = {'label_field': label_field, 'threshold': threshold, 'weight_field': weight_field, 'self_weight': self_weight, 'undirected': undirected, 'max_iterations': max_iterations, 'single_precision': _single_precision, 'graph': graph.__proxy__} with QuietProgress(verbose): params = _tc.extensions._toolkits.graph.label_propagation.create(opts) model = params['model'] return LabelPropagationModel(model)
<SYSTEM_TASK:> Check if a Turi create model is pickle safe. <END_TASK> <USER_TASK:> Description: def _is_not_pickle_safe_gl_model_class(obj_class): """ Check if a Turi create model is pickle safe. The function does it by checking that _CustomModel is the base class. Parameters ---------- obj_class : Class to be checked. Returns ---------- True if the GLC class is a model and is pickle safe. """
if issubclass(obj_class, _toolkits._model.CustomModel): return not obj_class._is_gl_pickle_safe() return False
<SYSTEM_TASK:> Check if class is a Turi create model. <END_TASK> <USER_TASK:> Description: def _is_not_pickle_safe_gl_class(obj_class): """ Check if class is a Turi create model. The function does it by checking the method resolution order (MRO) of the class and verifies that _Model is the base class. Parameters ---------- obj_class : Class to be checked. Returns ---------- True if the class is a GLC Model. """
gl_ds = [_SFrame, _SArray, _SGraph] # Object is GLC-DS or GLC-Model return (obj_class in gl_ds) or _is_not_pickle_safe_gl_model_class(obj_class)
<SYSTEM_TASK:> Internal util to get the type of the GLC class. The pickle file stores <END_TASK> <USER_TASK:> Description: def _get_gl_class_type(obj_class): """ Internal util to get the type of the GLC class. The pickle file stores this name so that it knows how to construct the object on unpickling. Parameters ---------- obj_class : Class which has to be categorized. Returns ---------- A class type for the pickle file to save. """
if obj_class == _SFrame: return "SFrame" elif obj_class == _SGraph: return "SGraph" elif obj_class == _SArray: return "SArray" elif _is_not_pickle_safe_gl_model_class(obj_class): return "Model" else: return None
<SYSTEM_TASK:> Internal util to get a GLC object from a persistent ID in the pickle file. <END_TASK> <USER_TASK:> Description: def _get_gl_object_from_persistent_id(type_tag, gl_archive_abs_path): """ Internal util to get a GLC object from a persistent ID in the pickle file. Parameters ---------- type_tag : The name of the glc class as saved in the GLC pickler. gl_archive_abs_path: An absolute path to the GLC archive where the object was saved. Returns ---------- The GLC object. """
if type_tag == "SFrame": obj = _SFrame(gl_archive_abs_path) elif type_tag == "SGraph": obj = _load_graph(gl_archive_abs_path) elif type_tag == "SArray": obj = _SArray(gl_archive_abs_path) elif type_tag == "Model": from . import load_model as _load_model obj = _load_model(gl_archive_abs_path) else: raise _pickle.UnpicklingError("Turi pickling Error: Unsupported object." " Only SFrames, SGraphs, SArrays, and Models are supported.") return obj
<SYSTEM_TASK:> Provide a persistent ID for "saving" GLC objects by reference. Return <END_TASK> <USER_TASK:> Description: def persistent_id(self, obj): """ Provide a persistent ID for "saving" GLC objects by reference. Return None for all non GLC objects. Parameters ---------- obj: Name of the object whose persistent ID is extracted. Returns -------- None if the object is not a GLC object. (ClassName, relative path) if the object is a GLC object. Notes ----- Borrowed from pickle docs (https://docs.python.org/2/library/_pickle.html) For the benefit of object persistence, the pickle module supports the notion of a reference to an object outside the pickled data stream. To pickle objects that have an external persistent id, the pickler must have a custom persistent_id() method that takes an object as an argument and returns either None or the persistent id for that object. For GLC objects, the persistent_id is merely a relative file path (within the ZIP archive) to the GLC archive where the GLC object is saved. For example: (SFrame, 'sframe-save-path') (SGraph, 'sgraph-save-path') (Model, 'model-save-path') """
# Get the class of the object (if it can be done) obj_class = None if not hasattr(obj, '__class__') else obj.__class__ if obj_class is None: return None # If the object is a GLC class. if _is_not_pickle_safe_gl_class(obj_class): if (id(obj) in self.gl_object_memo): # has already been pickled return (None, None, id(obj)) else: # Save the location of the GLC object's archive to the pickle file. relative_filename = str(_uuid.uuid4()) filename = _os.path.join(self.gl_temp_storage_path, relative_filename) self.mark_for_delete -= set([filename]) # Save the GLC object obj.save(filename) # Memoize. self.gl_object_memo.add(id(obj)) # Return the tuple (class_name, relative_filename) in archive. return (_get_gl_class_type(obj.__class__), relative_filename, id(obj)) # Not a GLC object. Default to cloud pickle else: return None
<SYSTEM_TASK:> Close the pickle file, and the zip archive file. The single zip archive <END_TASK> <USER_TASK:> Description: def close(self): """ Close the pickle file, and the zip archive file. The single zip archive file can now be shipped around to be loaded by the unpickler. """
if self.file is None: return # Close the pickle file. self.file.close() self.file = None for f in self.mark_for_delete: error = [False] def register_error(*args): error[0] = True _shutil.rmtree(f, onerror = register_error) if error[0]: _atexit.register(_shutil.rmtree, f, ignore_errors=True)
<SYSTEM_TASK:> Reconstruct a GLC object using the persistent ID. <END_TASK> <USER_TASK:> Description: def persistent_load(self, pid): """ Reconstruct a GLC object using the persistent ID. This method should not be used externally. It is required by the unpickler super class. Parameters ---------- pid : The persistent ID used in pickle file to save the GLC object. Returns ---------- The GLC object. """
if len(pid) == 2: # Pre GLC-1.3 release behavior, without memorization type_tag, filename = pid abs_path = _os.path.join(self.gl_temp_storage_path, filename) return _get_gl_object_from_persistent_id(type_tag, abs_path) else: # Post GLC-1.3 release behavior, with memorization type_tag, filename, object_id = pid if object_id in self.gl_object_memo: return self.gl_object_memo[object_id] else: abs_path = _os.path.join(self.gl_temp_storage_path, filename) obj = _get_gl_object_from_persistent_id(type_tag, abs_path) self.gl_object_memo[object_id] = obj return obj
<SYSTEM_TASK:> Clean up files that were created. <END_TASK> <USER_TASK:> Description: def close(self): """ Clean up files that were created. """
if self.file: self.file.close() self.file = None # If temp_file is a folder, we do not remove it because we may # still need it after the unpickler is disposed if self.tmp_file and _os.path.isfile(self.tmp_file): _os.remove(self.tmp_file) self.tmp_file = None
<SYSTEM_TASK:> Convert scikit-learn pipeline, classifier, or regressor to Core ML format. <END_TASK> <USER_TASK:> Description: def convert(sk_obj, input_features = None, output_feature_names = None): """ Convert scikit-learn pipeline, classifier, or regressor to Core ML format. Parameters ---------- sk_obj: model | [model] of scikit-learn format. Scikit learn model(s) to convert to a Core ML format. The input model may be a single scikit learn model, a scikit learn pipeline model, or a list of scikit learn models. Currently supported scikit learn models are: - Linear and Logistic Regression - LinearSVC and LinearSVR - SVC and SVR - NuSVC and NuSVR - Gradient Boosting Classifier and Regressor - Decision Tree Classifier and Regressor - Random Forest Classifier and Regressor - Normalizer - Imputer - Standard Scaler - DictVectorizer - One Hot Encoder The input model, or the last model in a pipeline or list of models, determines whether this is exposed as a Transformer, Regressor, or Classifier. Note that there may not be a one-to-one correspondence between scikit learn models and which Core ML models are used to represent them. For example, many scikit learn models are embedded in a pipeline to handle processing of input features. input_features: str | dict | list Optional name(s) that can be given to the inputs of the scikit-learn model. Defaults to 'input'. Input features can be specified in a number of forms. - Single string: In this case, the input is assumed to be a single array, with the number of dimensions set using num_dimensions. - List of strings: In this case, the overall input dimensions to the scikit-learn model is assumed to be the length of the list. If neighboring names are identical, they are assumed to be an input array of that length. For example: ["a", "b", "c"] resolves to [("a", Double), ("b", Double), ("c", Double)]. And: ["a", "a", "b"] resolves to [("a", Array(2)), ("b", Double)]. - Dictionary: Where the keys are the names and the indices or ranges of feature indices. In this case, it's presented as a mapping from keys to indices or ranges of contiguous indices. For example, {"a" : 0, "b" : [2,3], "c" : 1} Resolves to [("a", Double), ("c", Double), ("b", Array(2))]. Note that the ordering is determined by the indices. - List of tuples of the form `(name, datatype)`. Here, `name` is the name of the exposed feature, and `datatype` is an instance of `String`, `Double`, `Int64`, `Array`, or `Dictionary`. output_feature_names: string or list of strings Optional name(s) that can be given to the inputs of the scikit-learn model. The output_feature_names is interpreted according to the model type: - If the scikit-learn model is a transformer, it is the name of the array feature output by the final sequence of the transformer (defaults to "output"). - If it is a classifier, it should be a 2-tuple of names giving the top class prediction and the array of scores for each class (defaults to "classLabel" and "classScores"). - If it is a regressor, it should give the name of the prediction value (defaults to "prediction"). Returns ------- model:MLModel Returns an MLModel instance representing a Core ML model. Examples -------- .. sourcecode:: python >>> from sklearn.linear_model import LinearRegression >>> import pandas as pd # Load data >>> data = pd.read_csv('houses.csv') # Train a model >>> model = LinearRegression() >>> model.fit(data[["bedroom", "bath", "size"]], data["price"]) # Convert and save the scikit-learn model >>> import coremltools >>> coreml_model = coremltools.converters.sklearn.convert(model, ["bedroom", "bath", "size"], "price") >>> coreml_model.save('HousePricer.mlmodel') """
# This function is just a thin wrapper around the internal converter so # that sklearn isn't actually imported unless this function is called from ...models import MLModel # NOTE: Providing user-defined class labels will be enabled when # several issues with the ordering of the classes are worked out. For now, # to use custom class labels, directly import the internal function below. from ._converter_internal import _convert_sklearn_model spec = _convert_sklearn_model( sk_obj, input_features, output_feature_names, class_labels = None) return MLModel(spec)
<SYSTEM_TASK:> Generate a new Message instance from this Descriptor and a byte string. <END_TASK> <USER_TASK:> Description: def ParseMessage(descriptor, byte_str): """Generate a new Message instance from this Descriptor and a byte string. Args: descriptor: Protobuf Descriptor object byte_str: Serialized protocol buffer byte string Returns: Newly created protobuf Message object. """
result_class = MakeClass(descriptor) new_msg = result_class() new_msg.ParseFromString(byte_str) return new_msg
<SYSTEM_TASK:> Construct a class object for a protobuf described by descriptor. <END_TASK> <USER_TASK:> Description: def MakeClass(descriptor): """Construct a class object for a protobuf described by descriptor. Composite descriptors are handled by defining the new class as a member of the parent class, recursing as deep as necessary. This is the dynamic equivalent to: class Parent(message.Message): __metaclass__ = GeneratedProtocolMessageType DESCRIPTOR = descriptor class Child(message.Message): __metaclass__ = GeneratedProtocolMessageType DESCRIPTOR = descriptor.nested_types[0] Sample usage: file_descriptor = descriptor_pb2.FileDescriptorProto() file_descriptor.ParseFromString(proto2_string) msg_descriptor = descriptor.MakeDescriptor(file_descriptor.message_type[0]) msg_class = reflection.MakeClass(msg_descriptor) msg = msg_class() Args: descriptor: A descriptor.Descriptor object describing the protobuf. Returns: The Message class object described by the descriptor. """
if descriptor in MESSAGE_CLASS_CACHE: return MESSAGE_CLASS_CACHE[descriptor] attributes = {} for name, nested_type in descriptor.nested_types_by_name.items(): attributes[name] = MakeClass(nested_type) attributes[GeneratedProtocolMessageType._DESCRIPTOR_KEY] = descriptor result = GeneratedProtocolMessageType( str(descriptor.name), (message.Message,), attributes) MESSAGE_CLASS_CACHE[descriptor] = result return result
<SYSTEM_TASK:> Loads images from a directory. JPEG and PNG images are supported. <END_TASK> <USER_TASK:> Description: def load_images(url, format='auto', with_path=True, recursive=True, ignore_failure=True, random_order=False): """ Loads images from a directory. JPEG and PNG images are supported. Parameters ---------- url : str The string of the path where all the images are stored. format : {'PNG' | 'JPG' | 'auto'}, optional The format of the images in the directory. The default 'auto' parameter value tries to infer the image type from the file extension. If a format is specified, all images must be of that format. with_path : bool, optional Indicates whether a path column is added to the SFrame. If 'with_path' is set to True, the returned SFrame contains a 'path' column, which holds a path string for each Image object. recursive : bool, optional Indicates whether 'load_images' should do recursive directory traversal, or a flat directory traversal. ignore_failure : bool, optional If true, prints warning for failed images and keep loading the rest of the images. random_order : bool, optional Load images in random order. Returns ------- out : SFrame Returns an SFrame with either an 'image' column or both an 'image' and a 'path' column. The 'image' column is a column of Image objects. If with_path is True, there is also a 'path' column which contains the image path for each of each corresponding Image object. Examples -------- >>> url ='https://static.turi.com/datasets/images/nested' >>> image_sframe = turicreate.image_analysis.load_images(url, "auto", with_path=False, ... recursive=True) """
from ... import extensions as _extensions from ...util import _make_internal_url return _extensions.load_images(url, format, with_path, recursive, ignore_failure, random_order)
<SYSTEM_TASK:> Internal helper function for decoding a single Image or an SArray of Images <END_TASK> <USER_TASK:> Description: def _decode(image_data): """ Internal helper function for decoding a single Image or an SArray of Images """
from ...data_structures.sarray import SArray as _SArray from ... import extensions as _extensions if type(image_data) is _SArray: return _extensions.decode_image_sarray(image_data) elif type(image_data) is _Image: return _extensions.decode_image(image_data)
<SYSTEM_TASK:> Resizes the image or SArray of Images to a specific width, height, and <END_TASK> <USER_TASK:> Description: def resize(image, width, height, channels=None, decode=False, resample='nearest'): """ Resizes the image or SArray of Images to a specific width, height, and number of channels. Parameters ---------- image : turicreate.Image | SArray The image or SArray of images to be resized. width : int The width the image is resized to. height : int The height the image is resized to. channels : int, optional The number of channels the image is resized to. 1 channel corresponds to grayscale, 3 channels corresponds to RGB, and 4 channels corresponds to RGBA images. decode : bool, optional Whether to store the resized image in decoded format. Decoded takes more space, but makes the resize and future operations on the image faster. resample : 'nearest' or 'bilinear' Specify the resampling filter: - ``'nearest'``: Nearest neigbhor, extremely fast - ``'bilinear'``: Bilinear, fast and with less aliasing artifacts Returns ------- out : turicreate.Image Returns a resized Image object. Notes ----- Grayscale Images -> Images with one channel, representing a scale from white to black RGB Images -> Images with 3 channels, with each pixel having Green, Red, and Blue values. RGBA Images -> An RGB image with an opacity channel. Examples -------- Resize a single image >>> img = turicreate.Image('https://static.turi.com/datasets/images/sample.jpg') >>> resized_img = turicreate.image_analysis.resize(img,100,100,1) Resize an SArray of images >>> url ='https://static.turi.com/datasets/images/nested' >>> image_sframe = turicreate.image_analysis.load_images(url, "auto", with_path=False, ... recursive=True) >>> image_sarray = image_sframe["image"] >>> resized_images = turicreate.image_analysis.resize(image_sarray, 100, 100, 1) """
if height < 0 or width < 0: raise ValueError("Cannot resize to negative sizes") if resample == 'nearest': resample_method = 0 elif resample == 'bilinear': resample_method = 1 else: raise ValueError("Unknown resample option: '%s'" % resample) from ...data_structures.sarray import SArray as _SArray from ... import extensions as _extensions if type(image) is _Image: if channels is None: channels = image.channels if channels <= 0: raise ValueError("cannot resize images to 0 or fewer channels") return _extensions.resize_image(image, width, height, channels, decode, resample_method) elif type(image) is _SArray: if channels is None: channels = 3 if channels <= 0: raise ValueError("cannot resize images to 0 or fewer channels") return image.apply(lambda x: _extensions.resize_image(x, width, height, channels, decode, resample_method)) else: raise ValueError("Cannot call 'resize' on objects that are not either an Image or SArray of Images")
<SYSTEM_TASK:> Convert bit array to byte array. <END_TASK> <USER_TASK:> Description: def _convert_1bit_array_to_byte_array(arr): """ Convert bit array to byte array. :param arr: list Bits as a list where each element is an integer of 0 or 1 Returns ------- numpy.array 1D numpy array of type uint8 """
# Padding if necessary while len(arr) < 8 or len(arr) % 8: arr.append(0) arr = _np.array(arr, dtype='uint8') bit_arr = [] idx = 0 # Iterate and combine 8-bits into a uint8 for arr_idx in range(int(len(arr) / 8)): bit_arr.append(((arr[idx] << 7) & (1 << 7)) | ((arr[idx+1] << 6) & (1 << 6)) | ((arr[idx+2] << 5) & (1 << 5)) | ((arr[idx+3] << 4) & (1 << 4)) | ((arr[idx+4] << 3) & (1 << 3)) | ((arr[idx+5] << 2) & (1 << 2)) | ((arr[idx+6] << 1) & (1 << 1)) | ((arr[idx+7] << 0) & (1 << 0)) ) idx += 8 return _np.array(bit_arr, dtype='uint8')
<SYSTEM_TASK:> Unpack bytes to bits <END_TASK> <USER_TASK:> Description: def _decompose_bytes_to_bit_arr(arr): """ Unpack bytes to bits :param arr: list Byte Stream, as a list of uint8 values Returns ------- bit_arr: list Decomposed bit stream as a list of 0/1s of length (len(arr) * 8) """
bit_arr = [] for idx in range(len(arr)): for i in reversed(range(8)): bit_arr.append((arr[idx] >> i) & (1 << 0)) return bit_arr
<SYSTEM_TASK:> Generate a linear lookup table. <END_TASK> <USER_TASK:> Description: def _get_linear_lookup_table_and_weight(nbits, wp): """ Generate a linear lookup table. :param nbits: int Number of bits to represent a quantized weight value :param wp: numpy.array Weight blob to be quantized Returns ------- lookup_table: numpy.array Lookup table of shape (2^nbits, ) qw: numpy.array Decomposed bit stream as a list of 0/1s of length (len(arr) * 8) """
w = wp.reshape(1, -1) qw, scales, biases = _quantize_channelwise_linear(w, nbits, axis=0) indices = _np.array(range(0, 2**nbits)) lookup_table = indices * scales[0] + biases[0] return lookup_table, qw
<SYSTEM_TASK:> Generate K-Means lookup table given a weight parameter field <END_TASK> <USER_TASK:> Description: def _get_kmeans_lookup_table_and_weight(nbits, w, init='k-means++', tol=1e-2, n_init=1, rand_seed=0): """ Generate K-Means lookup table given a weight parameter field :param nbits: Number of bits for quantization :param w: Weight as numpy array Returns ------- lut: numpy.array Lookup table, numpy array of shape (1 << nbits, ); wq: numpy.array Quantized weight of type numpy.uint8 """
if _HAS_SKLEARN: from sklearn.cluster import KMeans else: raise Exception('sklearn package required for k-means quantization') units = _np.prod(w.shape) lut_len = 1 << nbits n_clusters = units if (units < lut_len) else lut_len wf = w.reshape(-1, 1) kmeans = KMeans(n_clusters=n_clusters, init=init, tol=tol, n_init=n_init, random_state=rand_seed).fit(wf) wq = kmeans.labels_[:units] lut = _np.zeros(lut_len) lut[:n_clusters] = kmeans.cluster_centers_.flatten() return lut, wq
<SYSTEM_TASK:> Linearly quantize weight blob. <END_TASK> <USER_TASK:> Description: def _quantize_channelwise_linear(weight, nbits, axis=0): """ Linearly quantize weight blob. :param weight: numpy.array Weight to be quantized. :param nbits: int Number of bits per weight element :param axis: int Axis of the weight blob to compute channel-wise quantization, can be 0 or 1 Returns ------- quantized_weight: numpy.array quantized weight as float numpy array, with the same shape as weight scale: numpy.array per channel scale bias: numpy.array per channel bias """
if len(weight.shape) == 1: # vector situation, treat as 1 channel weight = weight.reshape((1, weight.shape[0])) rank = len(weight.shape) if axis == 1: transposed_axis_order = (1,0) + tuple(range(2,rank)) weight = _np.transpose(weight, transposed_axis_order) num_channels = weight.shape[0] shape = weight.shape weight = weight.reshape((num_channels, -1)) # [C, L] a = _np.amin(weight, axis=-1) # [C,] b = _np.amax(weight, axis=-1) # [C,] # Quantize weights to full range [0, (1 << nbits) - 1] qa = 0 qb = (1 << nbits) - 1 # Use a mask to filter out channels with very close weight values mask = (b - a) > 1e-5 # [C,1] (normal channels) r_mask = ~mask # (all-same-value) channels qw = _np.zeros_like(weight) # [C, L] scale = _np.ones((num_channels,)) bias = _np.zeros((num_channels,)) if _np.any(mask): # normal channels qw[mask] = (weight[mask] - a[mask][:,None]) / (b[mask] - a[mask])[:,None] * (qb - qa) + qa scale[mask] = (b[mask] - a[mask]) / (qb - qa) bias[mask] = - scale[mask] * qa + a[mask] if _np.any(r_mask): # singular channels qw[r_mask] = qa scale[r_mask] = 0 bias[r_mask] = a[r_mask] # Reshape quantized_weight = qw.reshape(shape) if axis == 1: quantized_weight = _np.transpose(quantized_weight, transposed_axis_order) return (quantized_weight, scale, bias)
<SYSTEM_TASK:> Quantize the weight blob <END_TASK> <USER_TASK:> Description: def _quantize_wp(wp, nbits, qm, axis=0, **kwargs): """ Quantize the weight blob :param wp: numpy.array Weight parameters :param nbits: int Number of bits :param qm: Quantization mode :param lut_function: (``callable function``) Python callable representing a look-up table Returns ------- scale: numpy.array Per-channel scale bias: numpy.array Per-channel bias lut: numpy.array Lookup table quantized_wp: numpy.array Quantized weight of same shape as wp, with dtype numpy.uint8 """
scale = bias = lut = None # Linear Quantization if qm == _QUANTIZATION_MODE_LINEAR_QUANTIZATION: qw, scale, bias = _quantize_channelwise_linear(wp, nbits, axis) # Lookup tables elif qm == _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS: lut, qw = _get_kmeans_lookup_table_and_weight(nbits, wp) elif qm == _QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE: if 'lut_function' not in kwargs.keys(): raise Exception('Custom lookup table quantization mode ' 'selected but no lookup table function passed') lut_function = kwargs['lut_function'] if not callable(lut_function): raise Exception('Argument for Lookup Table passed in but is ' 'not callable') try: lut, qw = lut_function(nbits, wp) except Exception as e: raise Exception('{}\nCall to Lookup Table function failed' .format(e.message)) elif qm == _QUANTIZATION_MODE_LOOKUP_TABLE_LINEAR: lut, qw = _get_linear_lookup_table_and_weight(nbits, wp) else: raise NotImplementedError('Quantization method "{}" not supported'.format(qm)) quantized_wp = _np.uint8(qw) return scale, bias, lut, quantized_wp
<SYSTEM_TASK:> Quantize WeightParam field in Neural Network Protobuf <END_TASK> <USER_TASK:> Description: def _quantize_wp_field(wp, nbits, qm, shape, axis=0, **kwargs): """ Quantize WeightParam field in Neural Network Protobuf :param wp: MLModel.NeuralNetwork.WeightParam WeightParam field :param nbits: int Number of bits to be quantized :param qm: str Quantization mode :param shape: tuple Tensor shape held by wp :param axis: int Axis over which quantization is performed on, can be either 0 or 1 :param lut_function: (``callable function``) Python callable representing a LUT table function """
# De-quantization if qm == _QUANTIZATION_MODE_DEQUANTIZE: return _dequantize_wp(wp, shape, axis) # If the float32 field is empty do nothing and return if len(wp.floatValue) == 0: return # Half precision (16-bit) quantization if nbits == 16: return _wp_to_fp16wp(wp) if nbits > 8: raise Exception('Only 8-bit and lower quantization is supported') if qm not in _SUPPORTED_QUANTIZATION_MODES: raise Exception('Quantization mode {} not supported'.format(qm)) # axis parameter check if axis == 1 and len(shape) != 4: raise Exception('Quantization on second axis is only supported ' 'for rank-4 weight blob.') if axis != 0 and axis != 1: raise Exception('Invalid quantization axis {} passed in. Allowed' 'values are 0 (first axis) and 1 (second axis)'.format(axis)) # WeightParam size check - non-linear quantizations are applied on layer level num_channels = shape[axis] if qm == _QUANTIZATION_MODE_LINEAR_QUANTIZATION else 1 if len(wp.floatValue) % num_channels: raise Exception('Number of quantization channels does not divide evenly into weights') qparams = wp.quantization qparams.numberOfBits = nbits weights = _np.array(wp.floatValue).reshape(shape) scale, bias, lut, uint8_weights = _quantize_wp(weights, nbits, qm, axis, **kwargs) uint8_weights = uint8_weights.flatten() if qm == _QUANTIZATION_MODE_LINEAR_QUANTIZATION: qparams.linearQuantization.scale.extend(scale) qparams.linearQuantization.bias.extend(bias) else: qparams.lookupTableQuantization.floatValue.extend(lut) wp.rawValue = bytes() if nbits == 8: wp.rawValue += uint8_weights.tobytes() else: wp.rawValue += _convert_array_to_nbit_quantized_bytes(uint8_weights, nbits).tobytes() del wp.floatValue[:]
<SYSTEM_TASK:> Utility function to compare the performance of a full precision vs quantized model <END_TASK> <USER_TASK:> Description: def compare_models(full_precision_model, quantized_model, sample_data): """ Utility function to compare the performance of a full precision vs quantized model :param full_precision_model: MLModel The full precision model with float32 weights :param quantized_model: MLModel Quantized version of the model with quantized weights :param sample_data: str | [dict] Data used to characterize performance of the quantized model in comparison to the full precision model. Either a list of sample input dictionaries or an absolute path to a directory containing images. Path to a directory containing images is only valid for models with one image input. For all other models a list of sample inputs must be provided. :return: None. Performance metrics are printed out """
emessage = (""" Invalid sample data provided. Only a list of dictionaries containing sample data or path to a folder containing images is supported""") spec = full_precision_model.get_spec() num_inputs = len(spec.description.input) if isinstance(sample_data, str): input_type = spec.description.input[0].type.WhichOneof('Type') if num_inputs != 1 or input_type != 'imageType': raise Exception("""Unable to analyze quantized models. Sample data was a path to a directory which is only supported with models with one image type input. Please try passing in a list of sample inputs as sample data. """) _characterize_qmodel_perf_with_data_dir(full_precision_model, quantized_model.get_spec(), sample_data) elif isinstance(sample_data, list): if not all(type(d) is dict for d in sample_data): raise Exception(emessage) _characterize_quantized_model_perf(full_precision_model, quantized_model.get_spec(), sample_data) else: raise Exception(emessage)
<SYSTEM_TASK:> Create a recommender that uses item-item similarities based on <END_TASK> <USER_TASK:> Description: def create(observation_data, user_id='user_id', item_id='item_id', target=None, user_data=None, item_data=None, nearest_items=None, similarity_type='jaccard', threshold=0.001, only_top_k=64, verbose=True, target_memory_usage = 8*1024*1024*1024, **kwargs): """ Create a recommender that uses item-item similarities based on users in common. Parameters ---------- observation_data : SFrame The dataset to use for training the model. It must contain a column of user ids and a column of item ids. Each row represents an observed interaction between the user and the item. The (user, item) pairs are stored with the model so that they can later be excluded from recommendations if desired. It can optionally contain a target ratings column. All other columns are interpreted by the underlying model as side features for the observations. The user id and item id columns must be of type 'int' or 'str'. The target column must be of type 'int' or 'float'. user_id : string, optional The name of the column in `observation_data` that corresponds to the user id. item_id : string, optional The name of the column in `observation_data` that corresponds to the item id. target : string, optional The `observation_data` can optionally contain a column of scores representing ratings given by the users. If present, the name of this column may be specified variables `target`. user_data : SFrame, optional Side information for the users. This SFrame must have a column with the same name as what is specified by the `user_id` input parameter. `user_data` can provide any amount of additional user-specific information. (NB: This argument is currently ignored by this model.) item_data : SFrame, optional Side information for the items. This SFrame must have a column with the same name as what is specified by the `item_id` input parameter. `item_data` can provide any amount of additional item-specific information. (NB: This argument is currently ignored by this model.) similarity_type : {'jaccard', 'cosine', 'pearson'}, optional Similarity metric to use. See ItemSimilarityRecommender for details. Default: 'jaccard'. threshold : float, optional Predictions ignore items below this similarity value. Default: 0.001. only_top_k : int, optional Number of similar items to store for each item. Default value is 64. Decreasing this decreases the amount of memory required for the model, but may also decrease the accuracy. nearest_items : SFrame, optional A set of each item's nearest items. When provided, this overrides the similarity computed above. See Notes in the documentation for ItemSimilarityRecommender. Default: None. target_memory_usage : int, optional The target memory usage for the processing buffers and lookup tables. The actual memory usage may be higher or lower than this, but decreasing this decreases memory usage at the expense of training time, and increasing this can dramatically speed up the training time. Default is 8GB = 8589934592. seed_item_set_size : int, optional For users that have not yet rated any items, or have only rated uniquely occurring items with no similar item info, the model seeds the user's item set with the average ratings of the seed_item_set_size most popular items when making predictions and recommendations. If set to 0, then recommendations based on either popularity (no target present) or average item score (target present) are made in this case. training_method : (advanced), optional. The internal processing is done with a combination of nearest neighbor searching, dense tables for tracking item-item similarities, and sparse item-item tables. If 'auto' is chosen (default), then the estimated computation time is estimated for each, and the computation balanced between the methods in order to minimize training time given the target memory usage. This allows the user to force the use of one of these methods. All should give equivalent results; the only difference would be training time. Possible values are {'auto', 'dense', 'sparse', 'nn', 'nn:dense', 'nn:sparse'}. 'dense' uses a dense matrix to store item-item interactions as a lookup, and may do multiple passes to control memory requirements. 'sparse' does the same but with a sparse lookup table; this is better if the data has many infrequent items. "nn" uses a brute-force nearest neighbors search. "nn:dense" and "nn:sparse" use nearest neighbors for the most frequent items (see nearest_neighbors_interaction_proportion_threshold below), and either sparse or dense matrices for the remainder. "auto" chooses the method predicted to be the fastest based on the properties of the data. nearest_neighbors_interaction_proportion_threshold : (advanced) float Any item that has was rated by more than this proportion of users is treated by doing a nearest neighbors search. For frequent items, this is almost always faster, but it is slower for infrequent items. Furthermore, decreasing this causes more items to be processed using the nearest neighbor path, which may decrease memory requirements. degree_approximation_threshold : (advanced) int, optional Users with more than this many item interactions may be approximated. The approximation is done by a combination of sampling and choosing the interactions likely to have the most impact on the model. Increasing this can increase the training time and may or may not increase the quality of the model. Default = 4096. max_data_passes : (advanced) int, optional The maximum number of passes through the data allowed in building the similarity lookup tables. If it is not possible to build the recommender in this many passes (calculated before that stage of training), then additional approximations are applied; namely decreasing degree_approximation_threshold. If this is not possible, an error is raised. To decrease the number of passes required, increase target_memory_usage or decrease nearest_neighbors_interaction_proportion_threshold. Default = 1024. Examples -------- Given basic user-item observation data, an :class:`~turicreate.recommender.item_similarity_recommender.ItemSimilarityRecommender` is created: >>> sf = turicreate.SFrame({'user_id': ['0', '0', '0', '1', '1', '2', '2', '2'], ... 'item_id': ['a', 'b', 'c', 'a', 'b', 'b', 'c', 'd']}) >>> m = turicreate.item_similarity_recommender.create(sf) >>> recs = m.recommend() When a target is available, one can specify the desired similarity. For example we may choose to use a cosine similarity, and use it to make predictions or recommendations. >>> sf2 = turicreate.SFrame({'user_id': ['0', '0', '0', '1', '1', '2', '2', '2'], ... 'item_id': ['a', 'b', 'c', 'a', 'b', 'b', 'c', 'd'], ... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]}) >>> m2 = turicreate.item_similarity_recommender.create(sf2, target="rating", ... similarity_type='cosine') >>> m2.predict(sf) >>> m2.recommend() Notes ----- Currently, :class:`~turicreate.recommender.item_similarity_recommender.ItemSimilarityRecommender` does not leverage the use of side features `user_data` and `item_data`. **Incorporating pre-defined similar items** For item similarity models, one may choose to provide user-specified nearest neighbors graph using the keyword argument `nearest_items`. This is an SFrame containing, for each item, the nearest items and the similarity score between them. If provided, these item similarity scores are used for recommendations. The SFrame must contain (at least) three columns: * 'item_id': a column with the same name as that provided to the `item_id` argument (which defaults to the string "item_id"). * 'similar': a column containing the nearest items for the given item id. This should have the same type as the `item_id` column. * 'score': a numeric score measuring how similar these two items are. For example, suppose you first create an ItemSimilarityRecommender and use :class:`~turicreate.recommender.ItemSimilarityRecommender.get_similar_items`: >>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"], ... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"]}) >>> m = turicreate.item_similarity_recommender.create(sf) >>> nn = m.get_similar_items() >>> m2 = turicreate.item_similarity_recommender.create(sf, nearest_items=nn) With the above code, the item similarities computed for model `m` can be used to create a new recommender object, `m2`. Note that we could have created `nn` from some other means, but now use `m2` to make recommendations via `m2.recommend()`. See Also -------- ItemSimilarityRecommender """
from turicreate._cython.cy_server import QuietProgress opts = {} model_proxy = _turicreate.extensions.item_similarity() model_proxy.init_options(opts) if user_data is None: user_data = _turicreate.SFrame() if item_data is None: item_data = _turicreate.SFrame() if nearest_items is None: nearest_items = _turicreate.SFrame() if "training_method" in kwargs and kwargs["training_method"] in ["in_memory", "sgraph"]: print("WARNING: training_method = " + str(kwargs["training_method"]) + " deprecated; see documentation.") kwargs["training_method"] = "auto" opts = {'user_id': user_id, 'item_id': item_id, 'target': target, 'similarity_type': similarity_type, 'threshold': threshold, 'target_memory_usage' : float(target_memory_usage), 'max_item_neighborhood_size': only_top_k} extra_data = {"nearest_items" : nearest_items} if kwargs: try: possible_args = set(_get_default_options()["name"]) except (RuntimeError, KeyError): possible_args = set() bad_arguments = set(kwargs.keys()).difference(possible_args) if bad_arguments: raise TypeError("Bad Keyword Arguments: " + ', '.join(bad_arguments)) opts.update(kwargs) extra_data = {"nearest_items" : nearest_items} opts.update(kwargs) with QuietProgress(verbose): model_proxy.train(observation_data, user_data, item_data, opts, extra_data) return ItemSimilarityRecommender(model_proxy)
<SYSTEM_TASK:> Convert an ReLU layer with maximum value from keras to coreml. <END_TASK> <USER_TASK:> Description: def convert_advanced_relu(builder, layer, input_names, output_names, keras_layer): """ Convert an ReLU layer with maximum value from keras to coreml. Parameters ---------- keras_layer: layer A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. """
# Get input and output names input_name, output_name = (input_names[0], output_names[0]) if keras_layer.max_value is None: builder.add_activation(layer, 'RELU', input_name, output_name) return # No direct support of RELU with max-activation value - use negate and # clip layers relu_output_name = output_name + '_relu' builder.add_activation(layer, 'RELU', input_name, relu_output_name) # negate it neg_output_name = relu_output_name + '_neg' builder.add_activation(layer+'__neg__', 'LINEAR', relu_output_name, neg_output_name,[-1.0, 0]) # apply threshold clip_output_name = relu_output_name + '_clip' builder.add_unary(layer+'__clip__', neg_output_name, clip_output_name, 'threshold', alpha = -keras_layer.max_value) # negate it back builder.add_activation(layer+'_neg2', 'LINEAR', clip_output_name, output_name,[-1.0, 0])
<SYSTEM_TASK:> Convert separable convolution layer from keras to coreml. <END_TASK> <USER_TASK:> Description: def convert_separable_convolution(builder, layer, input_names, output_names, keras_layer): """ Convert separable convolution layer from keras to coreml. Parameters ---------- keras_layer: layer A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. """
_check_data_format(keras_layer) # Get input and output names input_name, output_name = (input_names[0], output_names[0]) has_bias = keras_layer.use_bias # Get the weights from _keras. weight_list = keras_layer.get_weights() output_blob_shape = list(filter(None, keras_layer.output_shape)) output_channels = output_blob_shape[-1] # D: depth mutliplier # w[0] is (H,W,Cin,D) # w[1] is (1,1,Cin * D, Cout) W0 = weight_list[0] W1 = weight_list[1] height, width, input_channels, depth_mult = W0.shape b = weight_list[2] if has_bias else None W0 = _np.reshape(W0, (height, width, 1, input_channels * depth_mult)) stride_height, stride_width = keras_layer.strides # Dilations if (type(keras_layer.dilation_rate) is list) or (type(keras_layer.dilation_rate) is tuple): dilations = [keras_layer.dilation_rate[0], keras_layer.dilation_rate[1]] else: dilations = [keras_layer.dilation_rate, keras_layer.dilation_rate] intermediate_name = output_name + '_intermin_' builder.add_convolution(name = layer + '_step_1', kernel_channels = 1, output_channels = input_channels * depth_mult, height = height, width = width, stride_height = stride_height, stride_width = stride_width, border_mode = keras_layer.padding, groups = input_channels, W = W0, b = None, has_bias = False, is_deconv = False, output_shape = None, input_name = input_name, output_name = intermediate_name, dilation_factors = dilations) builder.add_convolution(name = layer + '_step_2', kernel_channels = input_channels * depth_mult, output_channels = output_channels, height = 1, width = 1, stride_height = 1, stride_width = 1, border_mode = keras_layer.padding, groups = 1, W = W1, b = b, has_bias = has_bias, is_deconv = False, output_shape = None, input_name = intermediate_name, output_name = output_name, dilation_factors = [1,1])
<SYSTEM_TASK:> Convert a Batch Normalization layer. <END_TASK> <USER_TASK:> Description: def convert_batchnorm(builder, layer, input_names, output_names, keras_layer): """ Convert a Batch Normalization layer. Parameters keras_layer: layer A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. """
# Get input and output names input_name, output_name = (input_names[0], output_names[0]) axis = keras_layer.axis nb_channels = keras_layer.input_shape[axis] # Set parameters # Parameter arrangement in Keras: gamma, beta, mean, variance idx = 0 gamma, beta = None, None if keras_layer.scale: gamma = keras_layer.get_weights()[idx] idx += 1 if keras_layer.center: beta = keras_layer.get_weights()[idx] idx += 1 mean = keras_layer.get_weights()[idx] std = keras_layer.get_weights()[idx+1] gamma = _np.ones(mean.shape) if gamma is None else gamma beta = _np.zeros(mean.shape) if beta is None else beta # compute adjusted parameters variance = std * std f = 1.0 / _np.sqrt(std + keras_layer.epsilon) gamma1 = gamma*f beta1 = beta - gamma*mean*f mean[:] = 0.0 #mean variance[:] = 1.0 - .00001 #stddev builder.add_batchnorm( name = layer, channels = nb_channels, gamma = gamma1, beta = beta1, mean = mean, variance = variance, input_name = input_name, output_name = output_name)
<SYSTEM_TASK:> Convert pooling layer from keras to coreml. <END_TASK> <USER_TASK:> Description: def convert_pooling(builder, layer, input_names, output_names, keras_layer): """ Convert pooling layer from keras to coreml. Parameters ---------- keras_layer: layer A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. """
_check_data_format(keras_layer) # Get input and output names input_name, output_name = (input_names[0], output_names[0]) # Pooling layer type if isinstance(keras_layer, _keras.layers.convolutional.MaxPooling2D) or \ isinstance(keras_layer, _keras.layers.convolutional.MaxPooling1D) or \ isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling2D) or \ isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling1D): layer_type_str = 'MAX' elif isinstance(keras_layer, _keras.layers.convolutional.AveragePooling2D) or \ isinstance(keras_layer, _keras.layers.convolutional.AveragePooling1D) or \ isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling2D) or \ isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling1D): layer_type_str = 'AVERAGE' else: raise TypeError("Pooling type %s not supported" % keras_layer) # if it's global, set the global flag if isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling2D) or \ isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling2D): # 2D global pooling global_pooling = True height, width = (0, 0) stride_height, stride_width = (0,0) padding_type = 'VALID' elif isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling1D) or \ isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling1D): # 1D global pooling: 1D global pooling seems problematic in the backend, # use this work-around global_pooling = False _, width, channels = keras_layer.input_shape height = 1 stride_height, stride_width = height, width padding_type = 'VALID' else: global_pooling = False # Set pool sizes and strides # 1D cases: if isinstance(keras_layer, _keras.layers.convolutional.MaxPooling1D) or \ isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling1D) or \ isinstance(keras_layer, _keras.layers.convolutional.AveragePooling1D) or \ isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling1D): pool_size = keras_layer.pool_size if type(keras_layer.pool_size) is int else keras_layer.pool_size[0] height, width = 1, pool_size if keras_layer.strides is not None: strides = keras_layer.strides if type(keras_layer.strides) is int else keras_layer.strides[0] stride_height, stride_width = 1, strides else: stride_height, stride_width = 1, pool_size # 2D cases: else: height, width = keras_layer.pool_size if keras_layer.strides is None: stride_height, stride_width = height, width else: stride_height, stride_width = keras_layer.strides # Padding padding = keras_layer.padding if keras_layer.padding == 'valid': padding_type = 'VALID' elif keras_layer.padding == 'same': padding_type = 'SAME' else: raise TypeError("Border mode %s not supported" % padding) builder.add_pooling(name = layer, height = height, width = width, stride_height = stride_height, stride_width = stride_width, layer_type = layer_type_str, padding_type = padding_type, input_name = input_name, output_name = output_name, exclude_pad_area = True, is_global = global_pooling)