text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Adds a Descriptor to the pool, non-recursively. <END_TASK> <USER_TASK:> Description: def AddDescriptor(self, desc): """Adds a Descriptor to the pool, non-recursively. If the Descriptor contains nested messages or enums, the caller must explicitly register them. This method also registers the FileDescriptor associated with the message. Args: desc: A Descriptor. """
if not isinstance(desc, descriptor.Descriptor): raise TypeError('Expected instance of descriptor.Descriptor.') self._descriptors[desc.full_name] = desc self._AddFileDescriptor(desc.file)
<SYSTEM_TASK:> Adds a FieldDescriptor describing an extension to the pool. <END_TASK> <USER_TASK:> Description: def AddExtensionDescriptor(self, extension): """Adds a FieldDescriptor describing an extension to the pool. Args: extension: A FieldDescriptor. Raises: AssertionError: when another extension with the same number extends the same message. TypeError: when the specified extension is not a descriptor.FieldDescriptor. """
if not (isinstance(extension, descriptor.FieldDescriptor) and extension.is_extension): raise TypeError('Expected an extension descriptor.') if extension.extension_scope is None: self._toplevel_extensions[extension.full_name] = extension try: existing_desc = self._extensions_by_number[ extension.containing_type][extension.number] except KeyError: pass else: if extension is not existing_desc: raise AssertionError( 'Extensions "%s" and "%s" both try to extend message type "%s" ' 'with field number %d.' % (extension.full_name, existing_desc.full_name, extension.containing_type.full_name, extension.number)) self._extensions_by_number[extension.containing_type][ extension.number] = extension self._extensions_by_name[extension.containing_type][ extension.full_name] = extension # Also register MessageSet extensions with the type name. if _IsMessageSetExtension(extension): self._extensions_by_name[extension.containing_type][ extension.message_type.full_name] = extension
<SYSTEM_TASK:> Gets a FileDescriptor by file name. <END_TASK> <USER_TASK:> Description: def FindFileByName(self, file_name): """Gets a FileDescriptor by file name. Args: file_name: The path to the file to get a descriptor for. Returns: A FileDescriptor for the named file. Raises: KeyError: if the file cannot be found in the pool. """
try: return self._file_descriptors[file_name] except KeyError: pass try: file_proto = self._internal_db.FindFileByName(file_name) except KeyError as error: if self._descriptor_db: file_proto = self._descriptor_db.FindFileByName(file_name) else: raise error if not file_proto: raise KeyError('Cannot find a file named %s' % file_name) return self._ConvertFileProtoToFileDescriptor(file_proto)
<SYSTEM_TASK:> Gets the FileDescriptor for the file containing the specified symbol. <END_TASK> <USER_TASK:> Description: def FindFileContainingSymbol(self, symbol): """Gets the FileDescriptor for the file containing the specified symbol. Args: symbol: The name of the symbol to search for. Returns: A FileDescriptor that contains the specified symbol. Raises: KeyError: if the file cannot be found in the pool. """
symbol = _NormalizeFullyQualifiedName(symbol) try: return self._descriptors[symbol].file except KeyError: pass try: return self._enum_descriptors[symbol].file except KeyError: pass try: return self._FindFileContainingSymbolInDb(symbol) except KeyError: pass try: return self._file_desc_by_toplevel_extension[symbol] except KeyError: pass # Try nested extensions inside a message. message_name, _, extension_name = symbol.rpartition('.') try: message = self.FindMessageTypeByName(message_name) assert message.extensions_by_name[extension_name] return message.file except KeyError: raise KeyError('Cannot find a file containing %s' % symbol)
<SYSTEM_TASK:> Loads the named descriptor from the pool. <END_TASK> <USER_TASK:> Description: def FindMessageTypeByName(self, full_name): """Loads the named descriptor from the pool. Args: full_name: The full name of the descriptor to load. Returns: The descriptor for the named type. Raises: KeyError: if the message cannot be found in the pool. """
full_name = _NormalizeFullyQualifiedName(full_name) if full_name not in self._descriptors: self._FindFileContainingSymbolInDb(full_name) return self._descriptors[full_name]
<SYSTEM_TASK:> Loads the named enum descriptor from the pool. <END_TASK> <USER_TASK:> Description: def FindEnumTypeByName(self, full_name): """Loads the named enum descriptor from the pool. Args: full_name: The full name of the enum descriptor to load. Returns: The enum descriptor for the named type. Raises: KeyError: if the enum cannot be found in the pool. """
full_name = _NormalizeFullyQualifiedName(full_name) if full_name not in self._enum_descriptors: self._FindFileContainingSymbolInDb(full_name) return self._enum_descriptors[full_name]
<SYSTEM_TASK:> Loads the named field descriptor from the pool. <END_TASK> <USER_TASK:> Description: def FindFieldByName(self, full_name): """Loads the named field descriptor from the pool. Args: full_name: The full name of the field descriptor to load. Returns: The field descriptor for the named field. Raises: KeyError: if the field cannot be found in the pool. """
full_name = _NormalizeFullyQualifiedName(full_name) message_name, _, field_name = full_name.rpartition('.') message_descriptor = self.FindMessageTypeByName(message_name) return message_descriptor.fields_by_name[field_name]
<SYSTEM_TASK:> Loads the named service descriptor from the pool. <END_TASK> <USER_TASK:> Description: def FindServiceByName(self, full_name): """Loads the named service descriptor from the pool. Args: full_name: The full name of the service descriptor to load. Returns: The service descriptor for the named service. Raises: KeyError: if the service cannot be found in the pool. """
full_name = _NormalizeFullyQualifiedName(full_name) if full_name not in self._service_descriptors: self._FindFileContainingSymbolInDb(full_name) return self._service_descriptors[full_name]
<SYSTEM_TASK:> Finds the file in descriptor DB containing the specified symbol. <END_TASK> <USER_TASK:> Description: def _FindFileContainingSymbolInDb(self, symbol): """Finds the file in descriptor DB containing the specified symbol. Args: symbol: The name of the symbol to search for. Returns: A FileDescriptor that contains the specified symbol. Raises: KeyError: if the file cannot be found in the descriptor database. """
try: file_proto = self._internal_db.FindFileContainingSymbol(symbol) except KeyError as error: if self._descriptor_db: file_proto = self._descriptor_db.FindFileContainingSymbol(symbol) else: raise error if not file_proto: raise KeyError('Cannot find a file containing %s' % symbol) return self._ConvertFileProtoToFileDescriptor(file_proto)
<SYSTEM_TASK:> Creates a FileDescriptor from a proto or returns a cached copy. <END_TASK> <USER_TASK:> Description: def _ConvertFileProtoToFileDescriptor(self, file_proto): """Creates a FileDescriptor from a proto or returns a cached copy. This method also has the side effect of loading all the symbols found in the file into the appropriate dictionaries in the pool. Args: file_proto: The proto to convert. Returns: A FileDescriptor matching the passed in proto. """
if file_proto.name not in self._file_descriptors: built_deps = list(self._GetDeps(file_proto.dependency)) direct_deps = [self.FindFileByName(n) for n in file_proto.dependency] public_deps = [direct_deps[i] for i in file_proto.public_dependency] file_descriptor = descriptor.FileDescriptor( pool=self, name=file_proto.name, package=file_proto.package, syntax=file_proto.syntax, options=_OptionsOrNone(file_proto), serialized_pb=file_proto.SerializeToString(), dependencies=direct_deps, public_dependencies=public_deps) scope = {} # This loop extracts all the message and enum types from all the # dependencies of the file_proto. This is necessary to create the # scope of available message types when defining the passed in # file proto. for dependency in built_deps: scope.update(self._ExtractSymbols( dependency.message_types_by_name.values())) scope.update((_PrefixWithDot(enum.full_name), enum) for enum in dependency.enum_types_by_name.values()) for message_type in file_proto.message_type: message_desc = self._ConvertMessageDescriptor( message_type, file_proto.package, file_descriptor, scope, file_proto.syntax) file_descriptor.message_types_by_name[message_desc.name] = ( message_desc) for enum_type in file_proto.enum_type: file_descriptor.enum_types_by_name[enum_type.name] = ( self._ConvertEnumDescriptor(enum_type, file_proto.package, file_descriptor, None, scope)) for index, extension_proto in enumerate(file_proto.extension): extension_desc = self._MakeFieldDescriptor( extension_proto, file_proto.package, index, is_extension=True) extension_desc.containing_type = self._GetTypeFromScope( file_descriptor.package, extension_proto.extendee, scope) self._SetFieldType(extension_proto, extension_desc, file_descriptor.package, scope) file_descriptor.extensions_by_name[extension_desc.name] = ( extension_desc) for desc_proto in file_proto.message_type: self._SetAllFieldTypes(file_proto.package, desc_proto, scope) if file_proto.package: desc_proto_prefix = _PrefixWithDot(file_proto.package) else: desc_proto_prefix = '' for desc_proto in file_proto.message_type: desc = self._GetTypeFromScope( desc_proto_prefix, desc_proto.name, scope) file_descriptor.message_types_by_name[desc_proto.name] = desc for index, service_proto in enumerate(file_proto.service): file_descriptor.services_by_name[service_proto.name] = ( self._MakeServiceDescriptor(service_proto, index, scope, file_proto.package, file_descriptor)) self.Add(file_proto) self._file_descriptors[file_proto.name] = file_descriptor return self._file_descriptors[file_proto.name]
<SYSTEM_TASK:> Adds the proto to the pool in the specified package. <END_TASK> <USER_TASK:> Description: def _ConvertMessageDescriptor(self, desc_proto, package=None, file_desc=None, scope=None, syntax=None): """Adds the proto to the pool in the specified package. Args: desc_proto: The descriptor_pb2.DescriptorProto protobuf message. package: The package the proto should be located in. file_desc: The file containing this message. scope: Dict mapping short and full symbols to message and enum types. syntax: string indicating syntax of the file ("proto2" or "proto3") Returns: The added descriptor. """
if package: desc_name = '.'.join((package, desc_proto.name)) else: desc_name = desc_proto.name if file_desc is None: file_name = None else: file_name = file_desc.name if scope is None: scope = {} nested = [ self._ConvertMessageDescriptor( nested, desc_name, file_desc, scope, syntax) for nested in desc_proto.nested_type] enums = [ self._ConvertEnumDescriptor(enum, desc_name, file_desc, None, scope) for enum in desc_proto.enum_type] fields = [self._MakeFieldDescriptor(field, desc_name, index) for index, field in enumerate(desc_proto.field)] extensions = [ self._MakeFieldDescriptor(extension, desc_name, index, is_extension=True) for index, extension in enumerate(desc_proto.extension)] oneofs = [ descriptor.OneofDescriptor(desc.name, '.'.join((desc_name, desc.name)), index, None, [], desc.options) for index, desc in enumerate(desc_proto.oneof_decl)] extension_ranges = [(r.start, r.end) for r in desc_proto.extension_range] if extension_ranges: is_extendable = True else: is_extendable = False desc = descriptor.Descriptor( name=desc_proto.name, full_name=desc_name, filename=file_name, containing_type=None, fields=fields, oneofs=oneofs, nested_types=nested, enum_types=enums, extensions=extensions, options=_OptionsOrNone(desc_proto), is_extendable=is_extendable, extension_ranges=extension_ranges, file=file_desc, serialized_start=None, serialized_end=None, syntax=syntax) for nested in desc.nested_types: nested.containing_type = desc for enum in desc.enum_types: enum.containing_type = desc for field_index, field_desc in enumerate(desc_proto.field): if field_desc.HasField('oneof_index'): oneof_index = field_desc.oneof_index oneofs[oneof_index].fields.append(fields[field_index]) fields[field_index].containing_oneof = oneofs[oneof_index] scope[_PrefixWithDot(desc_name)] = desc self._descriptors[desc_name] = desc return desc
<SYSTEM_TASK:> Sets all the descriptor's fields's types. <END_TASK> <USER_TASK:> Description: def _SetAllFieldTypes(self, package, desc_proto, scope): """Sets all the descriptor's fields's types. This method also sets the containing types on any extensions. Args: package: The current package of desc_proto. desc_proto: The message descriptor to update. scope: Enclosing scope of available types. """
package = _PrefixWithDot(package) main_desc = self._GetTypeFromScope(package, desc_proto.name, scope) if package == '.': nested_package = _PrefixWithDot(desc_proto.name) else: nested_package = '.'.join([package, desc_proto.name]) for field_proto, field_desc in zip(desc_proto.field, main_desc.fields): self._SetFieldType(field_proto, field_desc, nested_package, scope) for extension_proto, extension_desc in ( zip(desc_proto.extension, main_desc.extensions)): extension_desc.containing_type = self._GetTypeFromScope( nested_package, extension_proto.extendee, scope) self._SetFieldType(extension_proto, extension_desc, nested_package, scope) for nested_type in desc_proto.nested_type: self._SetAllFieldTypes(nested_package, nested_type, scope)
<SYSTEM_TASK:> Sets the field's type, cpp_type, message_type and enum_type. <END_TASK> <USER_TASK:> Description: def _SetFieldType(self, field_proto, field_desc, package, scope): """Sets the field's type, cpp_type, message_type and enum_type. Args: field_proto: Data about the field in proto format. field_desc: The descriptor to modiy. package: The package the field's container is in. scope: Enclosing scope of available types. """
if field_proto.type_name: desc = self._GetTypeFromScope(package, field_proto.type_name, scope) else: desc = None if not field_proto.HasField('type'): if isinstance(desc, descriptor.Descriptor): field_proto.type = descriptor.FieldDescriptor.TYPE_MESSAGE else: field_proto.type = descriptor.FieldDescriptor.TYPE_ENUM field_desc.cpp_type = descriptor.FieldDescriptor.ProtoTypeToCppProtoType( field_proto.type) if (field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE or field_proto.type == descriptor.FieldDescriptor.TYPE_GROUP): field_desc.message_type = desc if field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: field_desc.enum_type = desc if field_proto.label == descriptor.FieldDescriptor.LABEL_REPEATED: field_desc.has_default_value = False field_desc.default_value = [] elif field_proto.HasField('default_value'): field_desc.has_default_value = True if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT): field_desc.default_value = float(field_proto.default_value) elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING: field_desc.default_value = field_proto.default_value elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL: field_desc.default_value = field_proto.default_value.lower() == 'true' elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: field_desc.default_value = field_desc.enum_type.values_by_name[ field_proto.default_value].number elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES: field_desc.default_value = text_encoding.CUnescape( field_proto.default_value) else: # All other types are of the "int" type. field_desc.default_value = int(field_proto.default_value) else: field_desc.has_default_value = False if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT): field_desc.default_value = 0.0 elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING: field_desc.default_value = u'' elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL: field_desc.default_value = False elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: field_desc.default_value = field_desc.enum_type.values[0].number elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES: field_desc.default_value = b'' else: # All other types are of the "int" type. field_desc.default_value = 0 field_desc.type = field_proto.type
<SYSTEM_TASK:> Creates a enum value descriptor object from a enum value proto. <END_TASK> <USER_TASK:> Description: def _MakeEnumValueDescriptor(self, value_proto, index): """Creates a enum value descriptor object from a enum value proto. Args: value_proto: The proto describing the enum value. index: The index of the enum value. Returns: An initialized EnumValueDescriptor object. """
return descriptor.EnumValueDescriptor( name=value_proto.name, index=index, number=value_proto.number, options=_OptionsOrNone(value_proto), type=None)
<SYSTEM_TASK:> Make a protobuf ServiceDescriptor given a ServiceDescriptorProto. <END_TASK> <USER_TASK:> Description: def _MakeServiceDescriptor(self, service_proto, service_index, scope, package, file_desc): """Make a protobuf ServiceDescriptor given a ServiceDescriptorProto. Args: service_proto: The descriptor_pb2.ServiceDescriptorProto protobuf message. service_index: The index of the service in the File. scope: Dict mapping short and full symbols to message and enum types. package: Optional package name for the new message EnumDescriptor. file_desc: The file containing the service descriptor. Returns: The added descriptor. """
if package: service_name = '.'.join((package, service_proto.name)) else: service_name = service_proto.name methods = [self._MakeMethodDescriptor(method_proto, service_name, package, scope, index) for index, method_proto in enumerate(service_proto.method)] desc = descriptor.ServiceDescriptor(name=service_proto.name, full_name=service_name, index=service_index, methods=methods, options=_OptionsOrNone(service_proto), file=file_desc) self._service_descriptors[service_name] = desc return desc
<SYSTEM_TASK:> Creates a method descriptor from a MethodDescriptorProto. <END_TASK> <USER_TASK:> Description: def _MakeMethodDescriptor(self, method_proto, service_name, package, scope, index): """Creates a method descriptor from a MethodDescriptorProto. Args: method_proto: The proto describing the method. service_name: The name of the containing service. package: Optional package name to look up for types. scope: Scope containing available types. index: Index of the method in the service. Returns: An initialized MethodDescriptor object. """
full_name = '.'.join((service_name, method_proto.name)) input_type = self._GetTypeFromScope( package, method_proto.input_type, scope) output_type = self._GetTypeFromScope( package, method_proto.output_type, scope) return descriptor.MethodDescriptor(name=method_proto.name, full_name=full_name, index=index, containing_service=None, input_type=input_type, output_type=output_type, options=_OptionsOrNone(method_proto))
<SYSTEM_TASK:> Pulls out all the symbols from descriptor protos. <END_TASK> <USER_TASK:> Description: def _ExtractSymbols(self, descriptors): """Pulls out all the symbols from descriptor protos. Args: descriptors: The messages to extract descriptors from. Yields: A two element tuple of the type name and descriptor object. """
for desc in descriptors: yield (_PrefixWithDot(desc.full_name), desc) for symbol in self._ExtractSymbols(desc.nested_types): yield symbol for enum in desc.enum_types: yield (_PrefixWithDot(enum.full_name), enum)
<SYSTEM_TASK:> Recursively finds dependencies for file protos. <END_TASK> <USER_TASK:> Description: def _GetDeps(self, dependencies): """Recursively finds dependencies for file protos. Args: dependencies: The names of the files being depended on. Yields: Each direct and indirect dependency. """
for dependency in dependencies: dep_desc = self.FindFileByName(dependency) yield dep_desc for parent_dep in dep_desc.dependencies: yield parent_dep
<SYSTEM_TASK:> Finds a given type name in the current scope. <END_TASK> <USER_TASK:> Description: def _GetTypeFromScope(self, package, type_name, scope): """Finds a given type name in the current scope. Args: package: The package the proto should be located in. type_name: The name of the type to be found in the scope. scope: Dict mapping short and full symbols to message and enum types. Returns: The descriptor for the requested type. """
if type_name not in scope: components = _PrefixWithDot(package).split('.') while components: possible_match = '.'.join(components + [type_name]) if possible_match in scope: type_name = possible_match break else: components.pop(-1) return scope[type_name]
<SYSTEM_TASK:> Returns the maximum number in 'elements'. Uses 'ordered' for comparisons, <END_TASK> <USER_TASK:> Description: def max_element (elements, ordered = None): """ Returns the maximum number in 'elements'. Uses 'ordered' for comparisons, or '<' is none is provided. """
assert is_iterable(elements) assert callable(ordered) or ordered is None if not ordered: ordered = operator.lt max = elements [0] for e in elements [1:]: if ordered (max, e): max = e return max
<SYSTEM_TASK:> Returns all of 'elements' for which corresponding element in parallel <END_TASK> <USER_TASK:> Description: def select_highest_ranked (elements, ranks): """ Returns all of 'elements' for which corresponding element in parallel list 'rank' is equal to the maximum value in 'rank'. """
assert is_iterable(elements) assert is_iterable(ranks) if not elements: return [] max_rank = max_element (ranks) result = [] while elements: if ranks [0] == max_rank: result.append (elements [0]) elements = elements [1:] ranks = ranks [1:] return result
<SYSTEM_TASK:> Copies the content of the specified message into the current message. <END_TASK> <USER_TASK:> Description: def CopyFrom(self, other_msg): """Copies the content of the specified message into the current message. The method clears the current message and then merges the specified message using MergeFrom. Args: other_msg: Message to copy into the current one. """
if self is other_msg: return self.Clear() self.MergeFrom(other_msg)
<SYSTEM_TASK:> Convert a generic tree model to the protobuf spec. <END_TASK> <USER_TASK:> Description: def convert_tree_ensemble(model, feature_names, target, force_32bit_float): """Convert a generic tree model to the protobuf spec. This currently supports: * Decision tree regression Parameters ---------- model: str | Booster Path on disk where the XGboost JSON representation of the model is or a handle to the XGboost model. feature_names : list of strings or None Names of each of the features. When set to None, the feature names are extracted from the model. target: str, Name of the output column. force_32bit_float: bool If True, then the resulting CoreML model will use 32 bit floats internally. Returns ------- model_spec: An object of type Model_pb. Protobuf representation of the model """
if not(_HAS_XGBOOST): raise RuntimeError('xgboost not found. xgboost conversion API is disabled.') import json import os feature_map = None if isinstance(model, (_xgboost.core.Booster, _xgboost.XGBRegressor)): # Testing a few corner cases that we don't support if isinstance(model, _xgboost.XGBRegressor): try: objective = model.get_xgb_params()["objective"] except: objective = None if objective in ["reg:gamma", "reg:tweedie"]: raise ValueError("Regression objective '%s' not supported for export." % objective) # Now use the booster API. if isinstance(model, _xgboost.XGBRegressor): # Name change in 0.7 if hasattr(model, 'get_booster'): model = model.get_booster() else: model = model.booster() # Xgboost sometimes has feature names in there. Sometimes does not. if (feature_names is None) and (model.feature_names is None): raise ValueError("Feature names not present in the model. Must be provided during conversion.") feature_names = model.feature_names if feature_names is None: feature_names = model.feature_names xgb_model_str = model.get_dump(with_stats=True, dump_format = 'json') if model.feature_names: feature_map = {f:i for i,f in enumerate(model.feature_names)} # Path on the file system where the XGboost model exists. elif isinstance(model, str): if not os.path.exists(model): raise TypeError("Invalid path %s." % model) with open(model) as f: xgb_model_str = json.load(f) feature_map = {f:i for i,f in enumerate(feature_names)} else: raise TypeError("Unexpected type. Expecting XGBoost model.") mlkit_tree = _TreeEnsembleRegressor(feature_names, target) mlkit_tree.set_default_prediction_value(0.5) for xgb_tree_id, xgb_tree_str in enumerate(xgb_model_str): xgb_tree_json = json.loads(xgb_tree_str) recurse_json(mlkit_tree, xgb_tree_json, xgb_tree_id, node_id = 0, feature_map = feature_map, force_32bit_float = force_32bit_float) return mlkit_tree.spec
<SYSTEM_TASK:> Convert a scalar multiplication from mxnet to coreml. <END_TASK> <USER_TASK:> Description: def convert_elementwise_mul_scalar(net, node, module, builder): """Convert a scalar multiplication from mxnet to coreml. Parameters ---------- net: network A mxnet network object. node: layer Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """
import numpy input_name, output_name = _get_input_output_name(net, node) name = node['name'] param = _get_attr(node) mult = literal_eval(param['scalar']) builder.add_scale(name=name, W=numpy.array([mult]), b=0, has_bias=False, input_name=input_name, output_name=output_name)
<SYSTEM_TASK:> Convert a dense layer from mxnet to coreml. <END_TASK> <USER_TASK:> Description: def convert_dense(net, node, module, builder): """Convert a dense layer from mxnet to coreml. Parameters ---------- net: network A mxnet network object. node: layer Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """
input_name, output_name = _get_input_output_name(net, node) has_bias = True name = node['name'] inputs = node['inputs'] args, _ = module.get_params() W = args[_get_node_name(net, inputs[1][0])].asnumpy() if has_bias: Wb = args[_get_node_name(net, inputs[2][0])].asnumpy() else: Wb = None nC, nB = W.shape builder.add_inner_product( name=name, W=W, b=Wb, input_channels=nB, output_channels=nC, has_bias=has_bias, input_name=input_name, output_name=output_name )
<SYSTEM_TASK:> Convert a padding layer from mxnet to coreml. <END_TASK> <USER_TASK:> Description: def convert_padding(net, node, module, builder): """Convert a padding layer from mxnet to coreml. Parameters ---------- net: network A mxnet network object. node: layer Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """
input_name, output_name = _get_input_output_name(net, node) name = node['name'] param = _get_attr(node) pad = literal_eval(param['pad_width']) pad_left = pad[4] pad_top = pad[5] pad_right = pad[6] pad_bottom = pad[7] if param['mode'] == 'reflect': builder.add_padding( name=name, top=pad_top, bottom=pad_bottom, left=pad_left, right=pad_right, padding_type='reflection', input_name=input_name, output_name=output_name ) else: raise TypeError("Padding type %s not supported" % param['mode'])
<SYSTEM_TASK:> Convert a UpSampling layer from mxnet to coreml. <END_TASK> <USER_TASK:> Description: def convert_upsample(net, node, module, builder): """Convert a UpSampling layer from mxnet to coreml. Parameters ---------- net: network A mxnet network object. node: layer Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """
input_name, output_name = _get_input_output_name(net, node) name = node['name'] param = _get_attr(node) inputs = node['inputs'] args, _ = module.get_params() scale = literal_eval(param['scale']) #method if 'sample_type' in param.keys(): method = param['sample_type'] if method == 'nearest': mode = 'NN' elif method == '': mode = 'BILINEAR' builder.add_upsample(name, scaling_factor_h=scale, scaling_factor_w=scale, input_name=input_name, output_name=output_name, mode=mode)
<SYSTEM_TASK:> Convert an embedding layer from mxnet to coreml. <END_TASK> <USER_TASK:> Description: def convert_embedding(net, node, model, builder): """Convert an embedding layer from mxnet to coreml. Parameters ---------- net: network A mxnet network object. node: layer Node to convert. model: model An model for MXNet builder: NeuralNetworkBuilder A neural network builder object. """
input_name, output_name = _get_input_output_name(net, node) name = node['name'] inputs = node['inputs'] outputs = node['outputs'] arg_params, aux_params = model.get_params() W = arg_params[_get_node_name(net, inputs[1][0])].asnumpy() if not ONE_HOT_ENCODE_HACK: nC, nB = W.shape W = W.T builder.add_embedding(name = name, W = W, b = None, input_dim = nC, output_channels = nB, has_bias = False, input_name = input_name, output_name = output_name) else: W = W.T nC, nB = W.shape builder.add_inner_product(name = name, W = W, b = None, input_channels = nB, output_channels = nC, has_bias = False, input_name = input_name, output_name = output_name)
<SYSTEM_TASK:> Convert a scalar add layer from mxnet to coreml. <END_TASK> <USER_TASK:> Description: def convert_scalar_add(net, node, model, builder): """Convert a scalar add layer from mxnet to coreml. Parameters ---------- net: network A mxnet network object. node: layer Node to convert. model: model An model for MXNet builder: NeuralNetworkBuilder A neural network builder object. """
import numpy as _np input_name, output_name = _get_input_output_name(net, node) name = node['name'] param = _get_attr(node) mode = 'ADD' alpha = _np.array([float(param['scalar'])]) builder.add_scale(name = name, input_name = input_name, output_name = output_name, W = _np.array([1.0]), b = alpha, has_bias=True)
<SYSTEM_TASK:> Convert a scalar multiply layer from mxnet to coreml. <END_TASK> <USER_TASK:> Description: def convert_scalar_multiply(net, node, model, builder): """Convert a scalar multiply layer from mxnet to coreml. Parameters ---------- net: network A mxnet network object. node: layer Node to convert. model: model An model for MXNet builder: NeuralNetworkBuilder A neural network builder object. """
import numpy as _np input_name, output_name = _get_input_output_name(net, node) name = node['name'] param = _get_attr(node) alpha = _np.array([float(param['scalar'])]) builder.add_scale(name = name, input_name = input_name, output_name = output_name, W = alpha, has_bias=False, b=None)
<SYSTEM_TASK:> Convert an instance norm layer from mxnet to coreml. <END_TASK> <USER_TASK:> Description: def convert_instancenorm(net, node, model, builder): """Convert an instance norm layer from mxnet to coreml. Parameters ---------- net: network A mxnet network object. node: layer Node to convert. model: model An model for MXNet builder: NeuralNetworkBuilder A neural network builder object. """
import numpy as _np input_name, output_name = _get_input_output_name(net, node) name = node['name'] inputs = node['inputs'] outputs = node['outputs'] data_blob_name = _get_node_name(net, inputs[0][0]) gamma_blob_name = _get_node_name(net, inputs[1][0]) beta_blob_name = _get_node_name(net, inputs[2][0]) channels = _get_node_channels(net, inputs[0][0]) bn_output_name = output_name + '_bn_' builder.add_batchnorm( name = name + '_normalize', channels = channels, gamma = _np.ones((channels, )), beta = _np.zeros((channels, )), mean = None, variance = None, input_name = input_name, output_name = bn_output_name, compute_mean_var = True, instance_normalization = True) gamma_input_names = [bn_output_name, gamma_blob_name] gamma_output_name = output_name + '_mult_gamma' builder.add_elementwise(name=name+'_mult_gamma', input_names=gamma_input_names, output_name = gamma_output_name, mode='MULTIPLY', alpha = None) beta_input_names = [gamma_output_name, beta_blob_name] builder.add_elementwise(name=name+'_add_beta', input_names=beta_input_names, output_name = output_name, mode='ADD', alpha=None)
<SYSTEM_TASK:> Returns the values stored in the AWS credential environment variables. <END_TASK> <USER_TASK:> Description: def _get_aws_credentials(): """ Returns the values stored in the AWS credential environment variables. Returns the value stored in the AWS_ACCESS_KEY_ID environment variable and the value stored in the AWS_SECRET_ACCESS_KEY environment variable. Returns ------- out : tuple [string] The first string of the tuple is the value of the AWS_ACCESS_KEY_ID environment variable. The second string of the tuple is the value of the AWS_SECRET_ACCESS_KEY environment variable. Examples -------- >>> turicreate.aws.get_credentials() ('RBZH792CTQPP7T435BGQ', '7x2hMqplWsLpU/qQCN6xAPKcmWo46TlPJXYTvKcv') """
if (not 'AWS_ACCESS_KEY_ID' in _os.environ): raise KeyError('No access key found. Please set the environment variable AWS_ACCESS_KEY_ID.') if (not 'AWS_SECRET_ACCESS_KEY' in _os.environ): raise KeyError('No secret key found. Please set the environment variable AWS_SECRET_ACCESS_KEY.') return (_os.environ['AWS_ACCESS_KEY_ID'], _os.environ['AWS_SECRET_ACCESS_KEY'])
<SYSTEM_TASK:> Utility function that returns True if the path provided is a directory that has an SFrame or SGraph in it. <END_TASK> <USER_TASK:> Description: def is_directory_archive(path): """ Utility function that returns True if the path provided is a directory that has an SFrame or SGraph in it. SFrames are written to disk as a directory archive, this function identifies if a given directory is an archive for an SFrame. Parameters ---------- path : string Directory to evaluate. Returns ------- True if path provided is an archive location, False otherwise """
if path is None: return False if not _os.path.isdir(path): return False ini_path = '/'.join([_convert_slashes(path), 'dir_archive.ini']) if not _os.path.exists(ini_path): return False if _os.path.isfile(ini_path): return True return False
<SYSTEM_TASK:> Returns the contents type for the provided archive path. <END_TASK> <USER_TASK:> Description: def get_archive_type(path): """ Returns the contents type for the provided archive path. Parameters ---------- path : string Directory to evaluate. Returns ------- Returns a string of: sframe, sgraph, raises TypeError for anything else """
if not is_directory_archive(path): raise TypeError('Unable to determine the type of archive at path: %s' % path) try: ini_path = '/'.join([_convert_slashes(path), 'dir_archive.ini']) parser = _ConfigParser.SafeConfigParser() parser.read(ini_path) contents = parser.get('metadata', 'contents') return contents except Exception as e: raise TypeError('Unable to determine type of archive for path: %s' % path, e)
<SYSTEM_TASK:> Create an SFrame containing the crossproduct of all provided options. <END_TASK> <USER_TASK:> Description: def crossproduct(d): """ Create an SFrame containing the crossproduct of all provided options. Parameters ---------- d : dict Each key is the name of an option, and each value is a list of the possible values for that option. Returns ------- out : SFrame There will be a column for each key in the provided dictionary, and a row for each unique combination of all values. Example ------- settings = {'argument_1':[0, 1], 'argument_2':['a', 'b', 'c']} print crossproduct(settings) +------------+------------+ | argument_2 | argument_1 | +------------+------------+ | a | 0 | | a | 1 | | b | 0 | | b | 1 | | c | 0 | | c | 1 | +------------+------------+ [6 rows x 2 columns] """
from .. import SArray d = [list(zip(list(d.keys()), x)) for x in _itertools.product(*list(d.values()))] sa = [{k:v for (k,v) in x} for x in d] return SArray(sa).unpack(column_name_prefix='')
<SYSTEM_TASK:> Assert the two SFrames are equal. <END_TASK> <USER_TASK:> Description: def _assert_sframe_equal(sf1, sf2, check_column_names=True, check_column_order=True, check_row_order=True, float_column_delta=None): """ Assert the two SFrames are equal. The default behavior of this function uses the strictest possible definition of equality, where all columns must be in the same order, with the same names and have the same data in the same order. Each of these stipulations can be relaxed individually and in concert with another, with the exception of `check_column_order` and `check_column_names`, we must use one of these to determine which columns to compare with one another. Parameters ---------- sf1 : SFrame sf2 : SFrame check_column_names : bool If true, assert if the data values in two columns are the same, but they have different names. If False, column order is used to determine which columns to compare. check_column_order : bool If true, assert if the data values in two columns are the same, but are not in the same column position (one is the i-th column and the other is the j-th column, i != j). If False, column names are used to determine which columns to compare. check_row_order : bool If true, assert if all rows in the first SFrame exist in the second SFrame, but they are not in the same order. float_column_delta : float The acceptable delta that two float values can be and still be considered "equal". When this is None, only exact equality is accepted. This is the default behavior since columns of all Nones are often of float type. Applies to all float columns. """
from .. import SFrame as _SFrame if (type(sf1) is not _SFrame) or (type(sf2) is not _SFrame): raise TypeError("Cannot function on types other than SFrames.") if not check_column_order and not check_column_names: raise ValueError("Cannot ignore both column order and column names.") sf1.__materialize__() sf2.__materialize__() if sf1.num_columns() != sf2.num_columns(): raise AssertionError("Number of columns mismatched: " + str(sf1.num_columns()) + " != " + str(sf2.num_columns())) s1_names = sf1.column_names() s2_names = sf2.column_names() sorted_s1_names = sorted(s1_names) sorted_s2_names = sorted(s2_names) if check_column_names: if (check_column_order and (s1_names != s2_names)) or (sorted_s1_names != sorted_s2_names): raise AssertionError("SFrame does not have same column names: " + str(sf1.column_names()) + " != " + str(sf2.column_names())) if sf1.num_rows() != sf2.num_rows(): raise AssertionError("Number of rows mismatched: " + str(sf1.num_rows()) + " != " + str(sf2.num_rows())) if not check_row_order and (sf1.num_rows() > 1): sf1 = sf1.sort(s1_names) sf2 = sf2.sort(s2_names) names_to_check = None if check_column_names: names_to_check = list(zip(sorted_s1_names, sorted_s2_names)) else: names_to_check = list(zip(s1_names, s2_names)) for i in names_to_check: col1 = sf1[i[0]] col2 = sf2[i[1]] if col1.dtype != col2.dtype: raise AssertionError("Columns " + str(i) + " types mismatched.") compare_ary = None if col1.dtype == float and float_column_delta is not None: dt = float_column_delta compare_ary = ((col1 > col2-dt) & (col1 < col2+dt)) else: compare_ary = (sf1[i[0]] == sf2[i[1]]) if not compare_ary.all(): count = 0 for j in compare_ary: if not j: first_row = count break count += 1 raise AssertionError("Columns " + str(i) + " are not equal! First differing element is at row " + str(first_row) + ": " + str((col1[first_row],col2[first_row])))
<SYSTEM_TASK:> Implementation of the parameterization decorators. <END_TASK> <USER_TASK:> Description: def _ParameterDecorator(naming_type, testcases): """Implementation of the parameterization decorators. Args: naming_type: The naming type. testcases: Testcase parameters. Returns: A function for modifying the decorated object. """
def _Apply(obj): if isinstance(obj, type): _ModifyClass( obj, list(testcases) if not isinstance(testcases, collections.Sequence) else testcases, naming_type) return obj else: return _ParameterizedTestIter(obj, testcases, naming_type) if _IsSingletonList(testcases): assert _NonStringIterable(testcases[0]), ( 'Single parameter argument must be a non-string iterable') testcases = testcases[0] return _Apply
<SYSTEM_TASK:> Checks if the header-comment of the given file needs fixing. <END_TASK> <USER_TASK:> Description: def check_header_comment(filename): """Checks if the header-comment of the given file needs fixing."""
# Check input file. name = os.path.basename( filename ) # Read content of input file. sourcefile = open( filename, "rU" ) content = sourcefile.read() sourcefile.close() # Search content for '$Id$'. match = re.search(r'\$Id\$', content) if match == None: # Make sure that the correct value for '$Id$' was already set. match = re.search(r'\$Id: ' + name + r'\s+[^$]+\$', content) if match != None: # The given file needs no fixing. return False # The given file needs fixing. return True
<SYSTEM_TASK:> Checks if files, used as input when pre-processing MPL-containers in their variadic form, need fixing. <END_TASK> <USER_TASK:> Description: def check_input_files_for_variadic_seq(headerDir, sourceDir): """Checks if files, used as input when pre-processing MPL-containers in their variadic form, need fixing."""
# Check input files in include/source-directories. files = glob.glob( os.path.join( headerDir, "*.hpp" ) ) files += glob.glob( os.path.join( headerDir, "aux_", "*.hpp" ) ) files += glob.glob( os.path.join( sourceDir, "src", "*" ) ) for currentFile in sorted( files ): if check_header_comment( currentFile ): return True return False
<SYSTEM_TASK:> Check if files, used as input when pre-processing MPL-containers in their numbered form, need fixing. <END_TASK> <USER_TASK:> Description: def check_input_files_for_numbered_seq(sourceDir, suffix, containers): """Check if files, used as input when pre-processing MPL-containers in their numbered form, need fixing."""
# Check input files for each MPL-container type. for container in containers: files = glob.glob( os.path.join( sourceDir, container, container + '*' + suffix ) ) for currentFile in sorted( files ): if check_header_comment( currentFile ): return True return False
<SYSTEM_TASK:> Checks if source- and header-files, used as input when pre-processing MPL-containers, need fixing. <END_TASK> <USER_TASK:> Description: def check_input_files(headerDir, sourceDir, containers=['vector', 'list', 'set', 'map'], seqType='both', verbose=False): """Checks if source- and header-files, used as input when pre-processing MPL-containers, need fixing."""
# Check the input files for containers in their variadic form. result1 = False if seqType == "both" or seqType == "variadic": if verbose: print "Check if input files for pre-processing Boost.MPL variadic containers need fixing." result1 = check_input_files_for_variadic_seq(headerDir, sourceDir) if verbose: if result1: print " At least one input file needs fixing!" else: print " No input file needs fixing!" # Check the input files for containers in their numbered form. result2 = False result3 = False if seqType == "both" or seqType == "numbered": if verbose: print "Check input files for pre-processing Boost.MPL numbered containers." result2 = check_input_files_for_numbered_seq(headerDir, ".hpp", containers) result3 = check_input_files_for_numbered_seq(sourceDir, ".cpp", containers) if verbose: if result2 or result3: print " At least one input file needs fixing!" else: print " No input file needs fixing!" # Return result. return result1 or result2 or result3
<SYSTEM_TASK:> Fixes the header-comment of the given file. <END_TASK> <USER_TASK:> Description: def fix_header_comment(filename, timestamp): """Fixes the header-comment of the given file."""
# Fix input file. name = os.path.basename( filename ) for line in fileinput.input( filename, inplace=1, mode="rU" ): # If header-comment already contains anything for '$Id$', remove it. line = re.sub(r'\$Id:[^$]+\$', r'$Id$', line.rstrip()) # Replace '$Id$' by a string containing the file's name (and a timestamp)! line = re.sub(re.escape(r'$Id$'), r'$Id: ' + name + r' ' + timestamp.isoformat() + r' $', line.rstrip()) print(line)
<SYSTEM_TASK:> Fixes files used as input when pre-processing MPL-containers in their variadic form. <END_TASK> <USER_TASK:> Description: def fix_input_files_for_variadic_seq(headerDir, sourceDir, timestamp): """Fixes files used as input when pre-processing MPL-containers in their variadic form."""
# Fix files in include/source-directories. files = glob.glob( os.path.join( headerDir, "*.hpp" ) ) files += glob.glob( os.path.join( headerDir, "aux_", "*.hpp" ) ) files += glob.glob( os.path.join( sourceDir, "src", "*" ) ) for currentFile in sorted( files ): fix_header_comment( currentFile, timestamp )
<SYSTEM_TASK:> Fixes files used as input when pre-processing MPL-containers in their numbered form. <END_TASK> <USER_TASK:> Description: def fix_input_files_for_numbered_seq(sourceDir, suffix, timestamp, containers): """Fixes files used as input when pre-processing MPL-containers in their numbered form."""
# Fix input files for each MPL-container type. for container in containers: files = glob.glob( os.path.join( sourceDir, container, container + '*' + suffix ) ) for currentFile in sorted( files ): fix_header_comment( currentFile, timestamp )
<SYSTEM_TASK:> Fixes source- and header-files used as input when pre-processing MPL-containers. <END_TASK> <USER_TASK:> Description: def fix_input_files(headerDir, sourceDir, containers=['vector', 'list', 'set', 'map'], seqType='both', verbose=False): """Fixes source- and header-files used as input when pre-processing MPL-containers."""
# The new modification time. timestamp = datetime.datetime.now(); # Fix the input files for containers in their variadic form. if seqType == "both" or seqType == "variadic": if verbose: print "Fix input files for pre-processing Boost.MPL variadic containers." fix_input_files_for_variadic_seq(headerDir, sourceDir, timestamp) # Fix the input files for containers in their numbered form. if seqType == "both" or seqType == "numbered": if verbose: print "Fix input files for pre-processing Boost.MPL numbered containers." fix_input_files_for_numbered_seq(headerDir, ".hpp", timestamp, containers) fix_input_files_for_numbered_seq(sourceDir, ".cpp", timestamp, containers)
<SYSTEM_TASK:> Converts a path into its absolute path and verifies that it exists or throws an exception. <END_TASK> <USER_TASK:> Description: def to_existing_absolute_path(string): """Converts a path into its absolute path and verifies that it exists or throws an exception."""
value = os.path.abspath(string) if not os.path.exists( value ) or not os.path.isdir( value ): msg = '"%r" is not a valid path to a directory.' % string raise argparse.ArgumentTypeError(msg) return value
<SYSTEM_TASK:> For each image, retrieve the nearest neighbors from the model's stored <END_TASK> <USER_TASK:> Description: def query(self, dataset, label=None, k=5, radius=None, verbose=True, batch_size=64): """ For each image, retrieve the nearest neighbors from the model's stored data. In general, the query dataset does not need to be the same as the reference data stored in the model. Parameters ---------- dataset : SFrame | SArray | turicreate.Image Query data. If dataset is an SFrame, it must contain columns with the same names and types as the features used to train the model. Additional columns are ignored. label : str, optional Name of the query SFrame column with row labels. If 'label' is not specified, row numbers are used to identify query dataset rows in the output SFrame. k : int, optional Number of nearest neighbors to return from the reference set for each query observation. The default is 5 neighbors, but setting it to ``None`` will return all neighbors within ``radius`` of the query point. radius : float, optional Only neighbors whose distance to a query point is smaller than this value are returned. The default is ``None``, in which case the ``k`` nearest neighbors are returned for each query point, regardless of distance. verbose: bool, optional If True, print progress updates and model details. batch_size : int, optional If you are getting memory errors, try decreasing this value. If you have a powerful computer, increasing this value may improve performance. Returns ------- out : SFrame An SFrame with the k-nearest neighbors of each query observation. The result contains four columns: the first is the label of the query observation, the second is the label of the nearby reference observation, the third is the distance between the query and reference observations, and the fourth is the rank of the reference observation among the query's k-nearest neighbors. See Also -------- similarity_graph Notes ----- - If both ``k`` and ``radius`` are set to ``None``, each query point returns all of the reference set. If the reference dataset has :math:`n` rows and the query dataset has :math:`m` rows, the output is an SFrame with :math:`nm` rows. Examples -------- >>> model.query(queries, 'label', k=2) +-------------+-----------------+----------------+------+ | query_label | reference_label | distance | rank | +-------------+-----------------+----------------+------+ | 0 | 2 | 0.305941170816 | 1 | | 0 | 1 | 0.771556867638 | 2 | | 1 | 1 | 0.390128184063 | 1 | | 1 | 0 | 0.464004310325 | 2 | | 2 | 0 | 0.170293863659 | 1 | | 2 | 1 | 0.464004310325 | 2 | +-------------+-----------------+----------------+------+ """
if not isinstance(dataset, (_tc.SFrame, _tc.SArray, _tc.Image)): raise TypeError('dataset must be either an SFrame, SArray or turicreate.Image') if(batch_size < 1): raise ValueError("'batch_size' must be greater than or equal to 1") if isinstance(dataset, _tc.SArray): dataset = _tc.SFrame({self.feature: dataset}) elif isinstance(dataset, _tc.Image): dataset = _tc.SFrame({self.feature: [dataset]}) extracted_features = self._extract_features(dataset, verbose=verbose, batch_size=batch_size) if label is not None: extracted_features[label] = dataset[label] return self.similarity_model.query(extracted_features, label, k, radius, verbose)
<SYSTEM_TASK:> Construct the similarity graph on the reference dataset, which is <END_TASK> <USER_TASK:> Description: def similarity_graph(self, k=5, radius=None, include_self_edges=False, output_type='SGraph', verbose=True): """ Construct the similarity graph on the reference dataset, which is already stored in the model to find the top `k` similar images for each image in your input dataset. This is conceptually very similar to running `query` with the reference set, but this method is optimized for the purpose, syntactically simpler, and automatically removes self-edges. WARNING: This method can take time. Parameters ---------- k : int, optional Maximum number of neighbors to return for each point in the dataset. Setting this to ``None`` deactivates the constraint, so that all neighbors are returned within ``radius`` of a given point. radius : float, optional For a given point, only neighbors within this distance are returned. The default is ``None``, in which case the ``k`` nearest neighbors are returned for each query point, regardless of distance. include_self_edges : bool, optional For most distance functions, each point in the model's reference dataset is its own nearest neighbor. If this parameter is set to False, this result is ignored, and the nearest neighbors are returned *excluding* the point itself. output_type : {'SGraph', 'SFrame'}, optional By default, the results are returned in the form of an SGraph, where each point in the reference dataset is a vertex and an edge A -> B indicates that vertex B is a nearest neighbor of vertex A. If 'output_type' is set to 'SFrame', the output is in the same form as the results of the 'query' method: an SFrame with columns indicating the query label (in this case the query data is the same as the reference data), reference label, distance between the two points, and the rank of the neighbor. verbose : bool, optional If True, print progress updates and model details. Returns ------- out : SFrame or SGraph The type of the output object depends on the 'output_type' parameter. See the parameter description for more detail. Notes ----- - If both ``k`` and ``radius`` are set to ``None``, each data point is matched to the entire dataset. If the reference dataset has :math:`n` rows, the output is an SFrame with :math:`n^2` rows (or an SGraph with :math:`n^2` edges). Examples -------- >>> graph = model.similarity_graph(k=1) # an SGraph >>> >>> # Most similar image for each image in the input dataset >>> graph.edges +----------+----------+----------------+------+ | __src_id | __dst_id | distance | rank | +----------+----------+----------------+------+ | 0 | 1 | 0.376430604494 | 1 | | 2 | 1 | 0.55542776308 | 1 | | 1 | 0 | 0.376430604494 | 1 | +----------+----------+----------------+------+ """
return self.similarity_model.similarity_graph(k, radius, include_self_edges, output_type, verbose)
<SYSTEM_TASK:> A sizer which uses the function compute_value_size to compute the size of <END_TASK> <USER_TASK:> Description: def _SimpleSizer(compute_value_size): """A sizer which uses the function compute_value_size to compute the size of each value. Typically compute_value_size is _VarintSize."""
def SpecificSizer(field_number, is_repeated, is_packed): tag_size = _TagSize(field_number) if is_packed: local_VarintSize = _VarintSize def PackedFieldSize(value): result = 0 for element in value: result += compute_value_size(element) return result + local_VarintSize(result) + tag_size return PackedFieldSize elif is_repeated: def RepeatedFieldSize(value): result = tag_size * len(value) for element in value: result += compute_value_size(element) return result return RepeatedFieldSize else: def FieldSize(value): return tag_size + compute_value_size(value) return FieldSize return SpecificSizer
<SYSTEM_TASK:> Like _SimpleSizer except for a fixed-size field. The input is the size <END_TASK> <USER_TASK:> Description: def _FixedSizer(value_size): """Like _SimpleSizer except for a fixed-size field. The input is the size of one value."""
def SpecificSizer(field_number, is_repeated, is_packed): tag_size = _TagSize(field_number) if is_packed: local_VarintSize = _VarintSize def PackedFieldSize(value): result = len(value) * value_size return result + local_VarintSize(result) + tag_size return PackedFieldSize elif is_repeated: element_size = value_size + tag_size def RepeatedFieldSize(value): return len(value) * element_size return RepeatedFieldSize else: field_size = value_size + tag_size def FieldSize(value): return field_size return FieldSize return SpecificSizer
<SYSTEM_TASK:> Returns a sizer for a bytes field. <END_TASK> <USER_TASK:> Description: def BytesSizer(field_number, is_repeated, is_packed): """Returns a sizer for a bytes field."""
tag_size = _TagSize(field_number) local_VarintSize = _VarintSize local_len = len assert not is_packed if is_repeated: def RepeatedFieldSize(value): result = tag_size * len(value) for element in value: l = local_len(element) result += local_VarintSize(l) + l return result return RepeatedFieldSize else: def FieldSize(value): l = local_len(value) return tag_size + local_VarintSize(l) + l return FieldSize
<SYSTEM_TASK:> Returns a sizer for a group field. <END_TASK> <USER_TASK:> Description: def GroupSizer(field_number, is_repeated, is_packed): """Returns a sizer for a group field."""
tag_size = _TagSize(field_number) * 2 assert not is_packed if is_repeated: def RepeatedFieldSize(value): result = tag_size * len(value) for element in value: result += element.ByteSize() return result return RepeatedFieldSize else: def FieldSize(value): return tag_size + value.ByteSize() return FieldSize
<SYSTEM_TASK:> Returns a sizer for a message field. <END_TASK> <USER_TASK:> Description: def MessageSizer(field_number, is_repeated, is_packed): """Returns a sizer for a message field."""
tag_size = _TagSize(field_number) local_VarintSize = _VarintSize assert not is_packed if is_repeated: def RepeatedFieldSize(value): result = tag_size * len(value) for element in value: l = element.ByteSize() result += local_VarintSize(l) + l return result return RepeatedFieldSize else: def FieldSize(value): l = value.ByteSize() return tag_size + local_VarintSize(l) + l return FieldSize
<SYSTEM_TASK:> Returns a sizer for extensions of MessageSet. <END_TASK> <USER_TASK:> Description: def MessageSetItemSizer(field_number): """Returns a sizer for extensions of MessageSet. The message set message looks like this: message MessageSet { repeated group Item = 1 { required int32 type_id = 2; required string message = 3; } } """
static_size = (_TagSize(1) * 2 + _TagSize(2) + _VarintSize(field_number) + _TagSize(3)) local_VarintSize = _VarintSize def FieldSize(value): l = value.ByteSize() return static_size + local_VarintSize(l) + l return FieldSize
<SYSTEM_TASK:> Returns a sizer for a map field. <END_TASK> <USER_TASK:> Description: def MapSizer(field_descriptor, is_message_map): """Returns a sizer for a map field."""
# Can't look at field_descriptor.message_type._concrete_class because it may # not have been initialized yet. message_type = field_descriptor.message_type message_sizer = MessageSizer(field_descriptor.number, False, False) def FieldSize(map_value): total = 0 for key in map_value: value = map_value[key] # It's wasteful to create the messages and throw them away one second # later since we'll do the same for the actual encode. But there's not an # obvious way to avoid this within the current design without tons of code # duplication. For message map, value.ByteSize() should be called to # update the status. entry_msg = message_type._concrete_class(key=key, value=value) total += message_sizer(entry_msg) if is_message_map: value.ByteSize() return total return FieldSize
<SYSTEM_TASK:> Encode the given integer as a varint and return the bytes. This is only <END_TASK> <USER_TASK:> Description: def _VarintBytes(value): """Encode the given integer as a varint and return the bytes. This is only called at startup time so it doesn't need to be fast."""
pieces = [] _EncodeVarint(pieces.append, value) return b"".join(pieces)
<SYSTEM_TASK:> Return a constructor for an encoder for fields of a particular type. <END_TASK> <USER_TASK:> Description: def _SimpleEncoder(wire_type, encode_value, compute_value_size): """Return a constructor for an encoder for fields of a particular type. Args: wire_type: The field's wire type, for encoding tags. encode_value: A function which encodes an individual value, e.g. _EncodeVarint(). compute_value_size: A function which computes the size of an individual value, e.g. _VarintSize(). """
def SpecificEncoder(field_number, is_repeated, is_packed): if is_packed: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint def EncodePackedField(write, value): write(tag_bytes) size = 0 for element in value: size += compute_value_size(element) local_EncodeVarint(write, size) for element in value: encode_value(write, element) return EncodePackedField elif is_repeated: tag_bytes = TagBytes(field_number, wire_type) def EncodeRepeatedField(write, value): for element in value: write(tag_bytes) encode_value(write, element) return EncodeRepeatedField else: tag_bytes = TagBytes(field_number, wire_type) def EncodeField(write, value): write(tag_bytes) return encode_value(write, value) return EncodeField return SpecificEncoder
<SYSTEM_TASK:> Return a constructor for an encoder for a fixed-width field. <END_TASK> <USER_TASK:> Description: def _StructPackEncoder(wire_type, format): """Return a constructor for an encoder for a fixed-width field. Args: wire_type: The field's wire type, for encoding tags. format: The format string to pass to struct.pack(). """
value_size = struct.calcsize(format) def SpecificEncoder(field_number, is_repeated, is_packed): local_struct_pack = struct.pack if is_packed: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint def EncodePackedField(write, value): write(tag_bytes) local_EncodeVarint(write, len(value) * value_size) for element in value: write(local_struct_pack(format, element)) return EncodePackedField elif is_repeated: tag_bytes = TagBytes(field_number, wire_type) def EncodeRepeatedField(write, value): for element in value: write(tag_bytes) write(local_struct_pack(format, element)) return EncodeRepeatedField else: tag_bytes = TagBytes(field_number, wire_type) def EncodeField(write, value): write(tag_bytes) return write(local_struct_pack(format, value)) return EncodeField return SpecificEncoder
<SYSTEM_TASK:> Return a constructor for an encoder for float fields. <END_TASK> <USER_TASK:> Description: def _FloatingPointEncoder(wire_type, format): """Return a constructor for an encoder for float fields. This is like StructPackEncoder, but catches errors that may be due to passing non-finite floating-point values to struct.pack, and makes a second attempt to encode those values. Args: wire_type: The field's wire type, for encoding tags. format: The format string to pass to struct.pack(). """
value_size = struct.calcsize(format) if value_size == 4: def EncodeNonFiniteOrRaise(write, value): # Remember that the serialized form uses little-endian byte order. if value == _POS_INF: write(b'\x00\x00\x80\x7F') elif value == _NEG_INF: write(b'\x00\x00\x80\xFF') elif value != value: # NaN write(b'\x00\x00\xC0\x7F') else: raise elif value_size == 8: def EncodeNonFiniteOrRaise(write, value): if value == _POS_INF: write(b'\x00\x00\x00\x00\x00\x00\xF0\x7F') elif value == _NEG_INF: write(b'\x00\x00\x00\x00\x00\x00\xF0\xFF') elif value != value: # NaN write(b'\x00\x00\x00\x00\x00\x00\xF8\x7F') else: raise else: raise ValueError('Can\'t encode floating-point values that are ' '%d bytes long (only 4 or 8)' % value_size) def SpecificEncoder(field_number, is_repeated, is_packed): local_struct_pack = struct.pack if is_packed: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint def EncodePackedField(write, value): write(tag_bytes) local_EncodeVarint(write, len(value) * value_size) for element in value: # This try/except block is going to be faster than any code that # we could write to check whether element is finite. try: write(local_struct_pack(format, element)) except SystemError: EncodeNonFiniteOrRaise(write, element) return EncodePackedField elif is_repeated: tag_bytes = TagBytes(field_number, wire_type) def EncodeRepeatedField(write, value): for element in value: write(tag_bytes) try: write(local_struct_pack(format, element)) except SystemError: EncodeNonFiniteOrRaise(write, element) return EncodeRepeatedField else: tag_bytes = TagBytes(field_number, wire_type) def EncodeField(write, value): write(tag_bytes) try: write(local_struct_pack(format, value)) except SystemError: EncodeNonFiniteOrRaise(write, value) return EncodeField return SpecificEncoder
<SYSTEM_TASK:> Returns an encoder for a boolean field. <END_TASK> <USER_TASK:> Description: def BoolEncoder(field_number, is_repeated, is_packed): """Returns an encoder for a boolean field."""
false_byte = b'\x00' true_byte = b'\x01' if is_packed: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint def EncodePackedField(write, value): write(tag_bytes) local_EncodeVarint(write, len(value)) for element in value: if element: write(true_byte) else: write(false_byte) return EncodePackedField elif is_repeated: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT) def EncodeRepeatedField(write, value): for element in value: write(tag_bytes) if element: write(true_byte) else: write(false_byte) return EncodeRepeatedField else: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT) def EncodeField(write, value): write(tag_bytes) if value: return write(true_byte) return write(false_byte) return EncodeField
<SYSTEM_TASK:> Returns an encoder for a string field. <END_TASK> <USER_TASK:> Description: def StringEncoder(field_number, is_repeated, is_packed): """Returns an encoder for a string field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint local_len = len assert not is_packed if is_repeated: def EncodeRepeatedField(write, value): for element in value: encoded = element.encode('utf-8') write(tag) local_EncodeVarint(write, local_len(encoded)) write(encoded) return EncodeRepeatedField else: def EncodeField(write, value): encoded = value.encode('utf-8') write(tag) local_EncodeVarint(write, local_len(encoded)) return write(encoded) return EncodeField
<SYSTEM_TASK:> Returns an encoder for a group field. <END_TASK> <USER_TASK:> Description: def GroupEncoder(field_number, is_repeated, is_packed): """Returns an encoder for a group field."""
start_tag = TagBytes(field_number, wire_format.WIRETYPE_START_GROUP) end_tag = TagBytes(field_number, wire_format.WIRETYPE_END_GROUP) assert not is_packed if is_repeated: def EncodeRepeatedField(write, value): for element in value: write(start_tag) element._InternalSerialize(write) write(end_tag) return EncodeRepeatedField else: def EncodeField(write, value): write(start_tag) value._InternalSerialize(write) return write(end_tag) return EncodeField
<SYSTEM_TASK:> Returns an encoder for a message field. <END_TASK> <USER_TASK:> Description: def MessageEncoder(field_number, is_repeated, is_packed): """Returns an encoder for a message field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint assert not is_packed if is_repeated: def EncodeRepeatedField(write, value): for element in value: write(tag) local_EncodeVarint(write, element.ByteSize()) element._InternalSerialize(write) return EncodeRepeatedField else: def EncodeField(write, value): write(tag) local_EncodeVarint(write, value.ByteSize()) return value._InternalSerialize(write) return EncodeField
<SYSTEM_TASK:> Convert a Caffe model to Core ML format. <END_TASK> <USER_TASK:> Description: def convert(model, image_input_names=[], is_bgr=False, red_bias=0.0, blue_bias=0.0, green_bias=0.0, gray_bias=0.0, image_scale=1.0, class_labels=None, predicted_feature_name=None, model_precision=_MLMODEL_FULL_PRECISION): """ Convert a Caffe model to Core ML format. Parameters ---------- model: str | (str, str) | (str, str, str) | (str, str, dict) A trained Caffe neural network model which can be represented as: - Path on disk to a trained Caffe model (.caffemodel) - A tuple of two paths, where the first path is the path to the .caffemodel file while the second is the path to the deploy.prototxt. - A tuple of three paths, where the first path is the path to the trained .caffemodel file, the second is the path to the deploy.prototxt while the third is a path to the mean image binary, data in which is subtracted from the input image as a preprocessing step. - A tuple of two paths to .caffemodel and .prototxt and a dict with image input names as keys and paths to mean image binaryprotos as values. The keys should be same as the input names provided via the argument 'image_input_name'. image_input_names: [str] | str The name(s) of the input blob(s) in the Caffe model that can be treated as images by Core ML. All other inputs are treated as MultiArrays (N-D Arrays) by Core ML. is_bgr: bool | dict() Flag indicating the channel order the model internally uses to represent color images. Set to True if the internal channel order is BGR, otherwise it will be assumed RGB. This flag is applicable only if image_input_names is specified. To specify a different value for each image input, provide a dictionary with input names as keys. Note that this flag is about the models internal channel order. An input image can be passed to the model in any color pixel layout containing red, green and blue values (e.g. 32BGRA or 32ARGB). This flag determines how those pixel values get mapped to the internal multiarray representation. red_bias: float | dict() Bias value to be added to the red channel of the input image. Defaults to 0.0. Applicable only if image_input_names is specified. To specify different values for each image input provide a dictionary with input names as keys. blue_bias: float | dict() Bias value to be added to the the blue channel of the input image. Defaults to 0.0. Applicable only if image_input_names is specified. To specify different values for each image input provide a dictionary with input names as keys. green_bias: float | dict() Bias value to be added to the green channel of the input image. Defaults to 0.0. Applicable only if image_input_names is specified. To specify different values for each image input provide a dictionary with input names as keys. gray_bias: float | dict() Bias value to be added to the input image (in grayscale). Defaults to 0.0. Applicable only if image_input_names is specified. To specify different values for each image input provide a dictionary with input names as keys. image_scale: float | dict() Value by which the input images will be scaled before bias is added and Core ML model makes a prediction. Defaults to 1.0. Applicable only if image_input_names is specified. To specify different values for each image input provide a dictionary with input names as keys. class_labels: str Filepath where classes are parsed as a list of newline separated strings. Class labels map the index of the output of a neural network to labels in a classifier. Provide this argument to get a model of type classifier. predicted_feature_name: str Name of the output feature for the class labels exposed in the Core ML model (applies to classifiers only). Defaults to 'classLabel' model_precision: str Precision at which model will be saved. Currently full precision (float) and half precision (float16) models are supported. Defaults to '_MLMODEL_FULL_PRECISION' (full precision). Returns ------- model: MLModel Model in Core ML format. Examples -------- .. sourcecode:: python # Convert it with default input and output names >>> import coremltools >>> coreml_model = coremltools.converters.caffe.convert('my_caffe_model.caffemodel') # Saving the Core ML model to a file. >>> coreml_model.save('my_model.mlmodel') Sometimes, critical information in the Caffe converter is missing from the .caffemodel file. This information is present in the deploy.prototxt file. You can provide us with both files in the conversion process. .. sourcecode:: python >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', 'my_deploy.prototxt')) Some models (like Resnet-50) also require a mean image file which is subtracted from the input image before passing through the network. This file can also be provided during conversion: .. sourcecode:: python >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', ... 'my_deploy.prototxt', 'mean_image.binaryproto'), image_input_names = 'image_input') # Multiple mean images for preprocessing >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', ... 'my_deploy.prototxt', {'image1': 'mean_image1.binaryproto', 'image2': 'mean_image2.binaryproto'}), ... image_input_names = ['image1', 'image2']) # Multiple image inputs and bias/scale values >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', 'my_deploy.prototxt'), ... red_bias = {'image1': -100, 'image2': -110}, ... green_bias = {'image1': -90, 'image2': -125}, ... blue_bias = {'image1': -105, 'image2': -120}, ... image_input_names = ['image1', 'image2']) Input and output names used in the interface of the converted Core ML model are inferred from the .prototxt file, which contains a description of the network architecture. Input names are read from the input layer definition in the .prototxt. By default, they are of type MultiArray. Argument "image_input_names" can be used to assign image type to specific inputs. All the blobs that are "dangling", i.e. which do not feed as input to any other layer are taken as outputs. The .prototxt file can be modified to specify custom input and output names. The converted Core ML model is of type classifier when the argument "class_labels" is specified. Advanced usage with custom classifiers, and images: .. sourcecode:: python # Mark some inputs as Images >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', 'my_caffe_model.prototxt'), ... image_input_names = 'my_image_input') # Export as a classifier with classes from a file >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', 'my_caffe_model.prototxt'), ... image_input_names = 'my_image_input', class_labels = 'labels.txt') Sometimes the converter might return a message about not able to infer input data dimensions. This happens when the input size information is absent from the deploy.prototxt file. This can be easily provided by editing the .prototxt in a text editor. Simply add a snippet in the beginning, similar to the following, for each of the inputs to the model: .. code-block:: bash input: "my_image_input" input_dim: 1 input_dim: 3 input_dim: 227 input_dim: 227 Here we have specified an input with dimensions (1,3,227,227), using Caffe's convention, in the order (batch, channel, height, width). Input name string ("my_image_input") must also match the name of the input (or "bottom", as inputs are known in Caffe) of the first layer in the .prototxt. """
from ...models import MLModel from ...models.utils import convert_neural_network_weights_to_fp16 as convert_neural_network_weights_to_fp16 if model_precision not in _VALID_MLMODEL_PRECISION_TYPES: raise RuntimeError('Model precision {} is not valid'.format(model_precision)) import tempfile model_path = tempfile.mktemp() _export(model_path, model, image_input_names, is_bgr, red_bias, blue_bias, green_bias, gray_bias, image_scale, class_labels, predicted_feature_name) model = MLModel(model_path) if model_precision == _MLMODEL_HALF_PRECISION and model is not None: model = convert_neural_network_weights_to_fp16(model) return model
<SYSTEM_TASK:> Takes the sklearn SVM model and returns the spec with the protobuf kernel for that model. <END_TASK> <USER_TASK:> Description: def _set_kernel(model, spec): """ Takes the sklearn SVM model and returns the spec with the protobuf kernel for that model. """
def gamma_value(model): if(model.gamma == 'auto'): # auto gamma value is 1/num_features return 1/float(len(model.support_vectors_[0])) else: return model.gamma result = None if(model.kernel == 'linear'): spec.kernel.linearKernel.MergeFromString(b'') # hack to set kernel to an empty type elif(model.kernel == 'rbf'): spec.kernel.rbfKernel.gamma = gamma_value(model) elif(model.kernel == 'poly'): spec.kernel.polyKernel.gamma = gamma_value(model) spec.kernel.polyKernel.c = model.coef0 spec.kernel.polyKernel.degree = model.degree elif(model.kernel == 'sigmoid'): spec.kernel.sigmoidKernel.gamma = gamma_value(model) spec.kernel.sigmoidKernel.c = model.coef0 else: raise ValueError('Unsupported kernel. The following kernel are supported: linear, RBF, polynomial and sigmoid.') return result
<SYSTEM_TASK:> Append a single row to an SFrame. <END_TASK> <USER_TASK:> Description: def append(self, data, segment=0): """ Append a single row to an SFrame. Throws a RuntimeError if one or more column's type is incompatible with a type appended. Parameters ---------- data : iterable An iterable representation of a single row. segment : int The segment to write this row. Each segment is numbered sequentially, starting with 0. Any value in segment 1 will be after any value in segment 0, and the order of rows in each segment is preserved as they are added. """
# Assume this case refers to an SFrame with a single column if not hasattr(data, '__iter__'): data = [data] self._builder.append(data, segment)
<SYSTEM_TASK:> Append multiple rows to an SFrame. <END_TASK> <USER_TASK:> Description: def append_multiple(self, data, segment=0): """ Append multiple rows to an SFrame. Throws a RuntimeError if one or more column's type is incompatible with a type appended. Parameters ---------- data : iterable[iterable] A collection of multiple iterables, each representing a single row. segment : int The segment to write the given rows. Each segment is numbered sequentially, starting with 0. Any value in segment 1 will be after any value in segment 0, and the order of rows in each segment is preserved as they are added. """
if not hasattr(data, '__iter__'): raise TypeError("append_multiple must be passed an iterable object") tmp_list = [] # Avoid copy in cases that we are passed materialized data that is # smaller than our block size if hasattr(data, '__len__'): if len(data) <= self._block_size: self._builder.append_multiple(data, segment) return for i in data: tmp_list.append(i) if len(tmp_list) >= self._block_size: self._builder.append_multiple(tmp_list, segment) tmp_list = [] if len(tmp_list) > 0: self._builder.append_multiple(tmp_list, segment)
<SYSTEM_TASK:> Given the list of source targets explicitly passed to 'stage', returns the <END_TASK> <USER_TASK:> Description: def targets_to_stage(self, source_targets, ps): """Given the list of source targets explicitly passed to 'stage', returns the list of targets which must be staged."""
result = [] # Traverse the dependencies, if needed. if ps.get('install-dependencies') == ['on']: source_targets = self.collect_targets(source_targets) # Filter the target types, if needed. included_types = ps.get('install-type') for r in source_targets: ty = r.type() if ty: # Do not stage searched libs. if ty != "SEARCHED_LIB": if included_types: if self.include_type(ty, included_types): result.append(r) else: result.append(r) elif not included_types: # Don't install typeless target if there is an explicit list of # allowed types. result.append(r) return result
<SYSTEM_TASK:> Initialize the logging configuration for the turicreate package. <END_TASK> <USER_TASK:> Description: def init_logger(): """ Initialize the logging configuration for the turicreate package. This does not affect the root logging config. """
import logging as _logging import logging.config # Package level logger _logging.config.dictConfig({ 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'standard': { 'format': '%(asctime)s [%(levelname)s] %(name)s, %(lineno)s: %(message)s' }, 'brief': { 'format': '[%(levelname)s] %(name)s: %(message)s' } }, 'handlers': { 'default': { 'class': 'logging.StreamHandler', 'formatter': 'brief' }, 'file': { 'class': 'logging.FileHandler', 'formatter': 'standard', 'filename': _client_log_file, 'encoding': 'UTF-8', 'delay': 'False', } }, 'loggers': { _root_package_name: { 'handlers': ['default', 'file'], 'propagate': 'True' } } }) # Set module specific log levels _logging.getLogger('requests').setLevel(_logging.CRITICAL) if _i_am_a_lambda_worker(): _logging.getLogger(_root_package_name).setLevel(_logging.WARNING) else: _logging.getLogger(_root_package_name).setLevel(_logging.INFO)
<SYSTEM_TASK:> Returns all the Turi Create configuration variables that can only <END_TASK> <USER_TASK:> Description: def get_environment_config(): """ Returns all the Turi Create configuration variables that can only be set via environment variables. - *TURI_FILEIO_WRITER_BUFFER_SIZE*: The file write buffer size. - *TURI_FILEIO_READER_BUFFER_SIZE*: The file read buffer size. - *OMP_NUM_THREADS*: The maximum number of threads to use for parallel processing. Returns ------- Returns a dictionary of {key:value,..} """
from .._connect import main as _glconnect unity = _glconnect.get_unity() return unity.list_globals(False)
<SYSTEM_TASK:> Sets the log level. <END_TASK> <USER_TASK:> Description: def set_log_level(level): """ Sets the log level. Lower log levels log more. if level is 8, nothing is logged. If level is 0, everything is logged. """
from .._connect import main as _glconnect unity = _glconnect.get_unity() return unity.set_log_level(level)
<SYSTEM_TASK:> Load SGraph from text file or previously saved SGraph binary. <END_TASK> <USER_TASK:> Description: def load_sgraph(filename, format='binary', delimiter='auto'): """ Load SGraph from text file or previously saved SGraph binary. Parameters ---------- filename : string Location of the file. Can be a local path or a remote URL. format : {'binary', 'snap', 'csv', 'tsv'}, optional Format to of the file to load. - 'binary': native graph format obtained from `SGraph.save`. - 'snap': tab or space separated edge list format with comments, used in the `Stanford Network Analysis Platform <http://snap.stanford.edu/snap/>`_. - 'csv': comma-separated edge list without header or comments. - 'tsv': tab-separated edge list without header or comments. delimiter : str, optional Specifying the Delimiter used in 'snap', 'csv' or 'tsv' format. Those format has default delimiter, but sometimes it is useful to overwrite the default delimiter. Returns ------- out : SGraph Loaded SGraph. See Also -------- SGraph, SGraph.save Examples -------- >>> g = turicreate.SGraph().add_vertices([turicreate.Vertex(i) for i in range(5)]) Save and load in binary format. >>> g.save('mygraph') >>> g2 = turicreate.load_sgraph('mygraph') """
if not format in ['binary', 'snap', 'csv', 'tsv']: raise ValueError('Invalid format: %s' % format) with cython_context(): g = None if format is 'binary': proxy = glconnect.get_unity().load_graph(_make_internal_url(filename)) g = SGraph(_proxy=proxy) elif format is 'snap': if delimiter == 'auto': delimiter = '\t' sf = SFrame.read_csv(filename, comment_char='#', delimiter=delimiter, header=False, column_type_hints=int) g = SGraph().add_edges(sf, 'X1', 'X2') elif format is 'csv': if delimiter == 'auto': delimiter = ',' sf = SFrame.read_csv(filename, header=False, delimiter=delimiter) g = SGraph().add_edges(sf, 'X1', 'X2') elif format is 'tsv': if delimiter == 'auto': delimiter = '\t' sf = SFrame.read_csv(filename, header=False, delimiter=delimiter) g = SGraph().add_edges(sf, 'X1', 'X2') g.summary() # materialize return g
<SYSTEM_TASK:> Convert a list of vertices into dataframe. <END_TASK> <USER_TASK:> Description: def _vertex_list_to_dataframe(ls, id_column_name): """ Convert a list of vertices into dataframe. """
assert HAS_PANDAS, 'Cannot use dataframe because Pandas is not available or version is too low.' cols = reduce(set.union, (set(v.attr.keys()) for v in ls)) df = pd.DataFrame({id_column_name: [v.vid for v in ls]}) for c in cols: df[c] = [v.attr.get(c) for v in ls] return df
<SYSTEM_TASK:> Convert a list of vertices into an SFrame. <END_TASK> <USER_TASK:> Description: def _vertex_list_to_sframe(ls, id_column_name): """ Convert a list of vertices into an SFrame. """
sf = SFrame() if type(ls) == list: cols = reduce(set.union, (set(v.attr.keys()) for v in ls)) sf[id_column_name] = [v.vid for v in ls] for c in cols: sf[c] = [v.attr.get(c) for v in ls] elif type(ls) == Vertex: sf[id_column_name] = [ls.vid] for col, val in ls.attr.iteritems(): sf[col] = [val] else: raise TypeError('Vertices type {} is Not supported.'.format(type(ls))) return sf
<SYSTEM_TASK:> Convert a list of edges into dataframe. <END_TASK> <USER_TASK:> Description: def _edge_list_to_dataframe(ls, src_column_name, dst_column_name): """ Convert a list of edges into dataframe. """
assert HAS_PANDAS, 'Cannot use dataframe because Pandas is not available or version is too low.' cols = reduce(set.union, (set(e.attr.keys()) for e in ls)) df = pd.DataFrame({ src_column_name: [e.src_vid for e in ls], dst_column_name: [e.dst_vid for e in ls]}) for c in cols: df[c] = [e.attr.get(c) for e in ls] return df
<SYSTEM_TASK:> Convert a list of edges into an SFrame. <END_TASK> <USER_TASK:> Description: def _edge_list_to_sframe(ls, src_column_name, dst_column_name): """ Convert a list of edges into an SFrame. """
sf = SFrame() if type(ls) == list: cols = reduce(set.union, (set(v.attr.keys()) for v in ls)) sf[src_column_name] = [e.src_vid for e in ls] sf[dst_column_name] = [e.dst_vid for e in ls] for c in cols: sf[c] = [e.attr.get(c) for e in ls] elif type(ls) == Edge: sf[src_column_name] = [ls.src_vid] sf[dst_column_name] = [ls.dst_vid] else: raise TypeError('Edges type {} is Not supported.'.format(type(ls))) return sf
<SYSTEM_TASK:> Convert dataframe into list of vertices, assuming that vertex ids are stored in _VID_COLUMN. <END_TASK> <USER_TASK:> Description: def _dataframe_to_vertex_list(df): """ Convert dataframe into list of vertices, assuming that vertex ids are stored in _VID_COLUMN. """
cols = df.columns if len(cols): assert _VID_COLUMN in cols, "Vertex DataFrame must contain column %s" % _VID_COLUMN df = df[cols].T ret = [Vertex(None, _series=df[col]) for col in df] return ret else: return []
<SYSTEM_TASK:> Convert dataframe into list of edges, assuming that source and target ids are stored in _SRC_VID_COLUMN, and _DST_VID_COLUMN respectively. <END_TASK> <USER_TASK:> Description: def _dataframe_to_edge_list(df): """ Convert dataframe into list of edges, assuming that source and target ids are stored in _SRC_VID_COLUMN, and _DST_VID_COLUMN respectively. """
cols = df.columns if len(cols): assert _SRC_VID_COLUMN in cols, "Vertex DataFrame must contain column %s" % _SRC_VID_COLUMN assert _DST_VID_COLUMN in cols, "Vertex DataFrame must contain column %s" % _DST_VID_COLUMN df = df[cols].T ret = [Edge(None, None, _series=df[col]) for col in df] return ret else: return []
<SYSTEM_TASK:> Return a new SGraph with only the selected fields. Other fields are <END_TASK> <USER_TASK:> Description: def select_fields(self, fields): """ Return a new SGraph with only the selected fields. Other fields are discarded, while fields that do not exist in the SGraph are ignored. Parameters ---------- fields : string | list [string] A single field name or a list of field names to select. Returns ------- out : SGraph A new graph whose vertex and edge data are projected to the selected fields. See Also -------- get_fields, get_vertex_fields, get_edge_fields Examples -------- >>> from turicreate import SGraph, Vertex >>> verts = [Vertex(0, attr={'breed': 'labrador', 'age': 5}), Vertex(1, attr={'breed': 'labrador', 'age': 3}), Vertex(2, attr={'breed': 'vizsla', 'age': 8})] >>> g = SGraph() >>> g = g.add_vertices(verts) >>> g2 = g.select_fields(fields=['breed']) """
if (type(fields) is str): fields = [fields] if not isinstance(fields, list) or not all(type(x) is str for x in fields): raise TypeError('\"fields\" must be a str or list[str]') vfields = self.__proxy__.get_vertex_fields() efields = self.__proxy__.get_edge_fields() selected_vfields = [] selected_efields = [] for f in fields: found = False if f in vfields: selected_vfields.append(f) found = True if f in efields: selected_efields.append(f) found = True if not found: raise ValueError('Field \'%s\' not in graph' % f) with cython_context(): proxy = self.__proxy__ proxy = proxy.select_vertex_fields(selected_vfields) proxy = proxy.select_edge_fields(selected_efields) return SGraph(_proxy=proxy)
<SYSTEM_TASK:> Retrieve the graph neighborhood around a set of vertices, ignoring edge <END_TASK> <USER_TASK:> Description: def get_neighborhood(self, ids, radius=1, full_subgraph=True): """ Retrieve the graph neighborhood around a set of vertices, ignoring edge directions. Note that setting radius greater than two often results in a time-consuming query for a very large subgraph. Parameters ---------- ids : list [int | float | str] List of target vertex IDs. radius : int, optional Radius of the neighborhood. Every vertex in the returned subgraph is reachable from at least one of the target vertices on a path of length no longer than ``radius``. Setting radius larger than 2 may result in a very large subgraph. full_subgraph : bool, optional If True, return all edges between vertices in the returned neighborhood. The result is also known as the subgraph induced by the target nodes' neighbors, or the egocentric network for the target nodes. If False, return only edges on paths of length <= ``radius`` from the target node, also known as the reachability graph. Returns ------- out : Graph The subgraph with the neighborhoods around the target vertices. See Also -------- get_edges, get_vertices References ---------- - Marsden, P. (2002) `Egocentric and sociocentric measures of network centrality <http://www.sciencedirect.com/science/article/pii/S03788733 02000163>`_. - `Wikipedia - Reachability <http://en.wikipedia.org/wiki/Reachability>`_ Examples -------- >>> sf_edge = turicreate.SFrame({'source': range(9), 'dest': range(1, 10)}) >>> g = turicreate.SGraph() >>> g = g.add_edges(sf_edge, src_field='source', dst_field='dest') >>> subgraph = g.get_neighborhood(ids=[1, 7], radius=2, full_subgraph=True) """
verts = ids ## find the vertices within radius (and the path edges) for i in range(radius): edges_out = self.get_edges(src_ids=verts) edges_in = self.get_edges(dst_ids=verts) verts = list(edges_in['__src_id']) + list(edges_in['__dst_id']) + \ list(edges_out['__src_id']) + list(edges_out['__dst_id']) verts = list(set(verts)) ## make a new graph to return and add the vertices g = SGraph() g = g.add_vertices(self.get_vertices(verts), vid_field='__id') ## add the requested edge set if full_subgraph is True: induced_edge_out = self.get_edges(src_ids=verts) induced_edge_in = self.get_edges(dst_ids=verts) df_induced = induced_edge_out.append(induced_edge_in) df_induced = df_induced.groupby(df_induced.column_names(), {}) verts_sa = SArray(list(verts)) edges = df_induced.filter_by(verts_sa, "__src_id") edges = edges.filter_by(verts_sa, "__dst_id") else: path_edges = edges_out.append(edges_in) edges = path_edges.groupby(path_edges.column_names(), {}) g = g.add_edges(edges, src_field='__src_id', dst_field='__dst_id') return g
<SYSTEM_TASK:> Return the value for the queried field. <END_TASK> <USER_TASK:> Description: def _get(self, field): """ Return the value for the queried field. Get the value of a given field. The list of all queryable fields is documented in the beginning of the model class. >>> out = m._get('graph') Parameters ---------- field : string Name of the field to be retrieved. Returns ------- out : value The current value of the requested field. """
if field in self._list_fields(): return self.__proxy__.get(field) else: raise KeyError('Key \"%s\" not in model. Available fields are %s.' % (field, ', '.join(self._list_fields())))
<SYSTEM_TASK:> Return a dictionary for the class fields description. <END_TASK> <USER_TASK:> Description: def _describe_fields(cls): """ Return a dictionary for the class fields description. Fields should NOT be wrapped by _precomputed_field, if necessary """
dispatch_table = { 'ShortestPathModel': 'sssp', 'GraphColoringModel': 'graph_coloring', 'PagerankModel': 'pagerank', 'ConnectedComponentsModel': 'connected_components', 'TriangleCountingModel': 'triangle_counting', 'KcoreModel': 'kcore', 'DegreeCountingModel': 'degree_count', 'LabelPropagationModel': 'label_propagation' } try: toolkit_name = dispatch_table[cls.__name__] toolkit = _tc.extensions._toolkits.graph.__dict__[toolkit_name] return toolkit.get_model_fields({}) except: raise RuntimeError('Model %s does not have fields description' % cls.__name__)
<SYSTEM_TASK:> Check if the input is of expected type. <END_TASK> <USER_TASK:> Description: def _raise_error_if_not_of_type(arg, expected_type, arg_name=None): """ Check if the input is of expected type. Parameters ---------- arg : Input argument. expected_type : A type OR a list of types that the argument is expected to be. arg_name : The name of the variable in the function being used. No name is assumed if set to None. Examples -------- _raise_error_if_not_of_type(sf, str, 'sf') _raise_error_if_not_of_type(sf, [str, int], 'sf') """
display_name = "%s " % arg_name if arg_name is not None else "Argument " lst_expected_type = [expected_type] if \ type(expected_type) == type else expected_type err_msg = "%smust be of type %s " % (display_name, ' or '.join([x.__name__ for x in lst_expected_type])) err_msg += "(not %s)." % type(arg).__name__ if not any(map(lambda x: isinstance(arg, x), lst_expected_type)): raise TypeError(err_msg)
<SYSTEM_TASK:> Converts audio waveform into an array of examples for VGGish. <END_TASK> <USER_TASK:> Description: def waveform_to_examples(data, sample_rate): """Converts audio waveform into an array of examples for VGGish. Args: data: np.array of either one dimension (mono) or two dimensions (multi-channel, with the outer dimension representing channels). Each sample is generally expected to lie in the range [-1.0, +1.0], although this is not required. sample_rate: Sample rate of data. Returns: 3-D np.array of shape [num_examples, num_frames, num_bands] which represents a sequence of examples, each of which contains a patch of log mel spectrogram, covering num_frames frames of audio and num_bands mel frequency bands, where the frame length is vggish_params.STFT_HOP_LENGTH_SECONDS. """
import resampy # Convert to mono. if len(data.shape) > 1: data = np.mean(data, axis=1) # Resample to the rate assumed by VGGish. if sample_rate != vggish_params.SAMPLE_RATE: data = resampy.resample(data, sample_rate, vggish_params.SAMPLE_RATE) # Compute log mel spectrogram features. log_mel = mel_features.log_mel_spectrogram( data, audio_sample_rate=vggish_params.SAMPLE_RATE, log_offset=vggish_params.LOG_OFFSET, window_length_secs=vggish_params.STFT_WINDOW_LENGTH_SECONDS, hop_length_secs=vggish_params.STFT_HOP_LENGTH_SECONDS, num_mel_bins=vggish_params.NUM_MEL_BINS, lower_edge_hertz=vggish_params.MEL_MIN_HZ, upper_edge_hertz=vggish_params.MEL_MAX_HZ) # Frame features into examples. features_sample_rate = 1.0 / vggish_params.STFT_HOP_LENGTH_SECONDS example_window_length = int(round( vggish_params.EXAMPLE_WINDOW_SECONDS * features_sample_rate)) example_hop_length = int(round( vggish_params.EXAMPLE_HOP_SECONDS * features_sample_rate)) log_mel_examples = mel_features.frame( log_mel, window_length=example_window_length, hop_length=example_hop_length) return log_mel_examples
<SYSTEM_TASK:> Expand the given build request by combining all property_sets which don't <END_TASK> <USER_TASK:> Description: def expand_no_defaults (property_sets): """ Expand the given build request by combining all property_sets which don't specify conflicting non-free features. """
assert is_iterable_typed(property_sets, property_set.PropertySet) # First make all features and subfeatures explicit expanded_property_sets = [ps.expand_subfeatures() for ps in property_sets] # Now combine all of the expanded property_sets product = __x_product (expanded_property_sets) return [property_set.create(p) for p in product]
<SYSTEM_TASK:> Return the cross-product of all elements of property_sets, less any <END_TASK> <USER_TASK:> Description: def __x_product (property_sets): """ Return the cross-product of all elements of property_sets, less any that would contain conflicting values for single-valued features. """
assert is_iterable_typed(property_sets, property_set.PropertySet) x_product_seen = set() return __x_product_aux (property_sets, x_product_seen)[0]
<SYSTEM_TASK:> Returns non-conflicting combinations of property sets. <END_TASK> <USER_TASK:> Description: def __x_product_aux (property_sets, seen_features): """Returns non-conflicting combinations of property sets. property_sets is a list of PropertySet instances. seen_features is a set of Property instances. Returns a tuple of: - list of lists of Property instances, such that within each list, no two Property instance have the same feature, and no Property is for feature in seen_features. - set of features we saw in property_sets """
assert is_iterable_typed(property_sets, property_set.PropertySet) assert isinstance(seen_features, set) if not property_sets: return ([], set()) properties = property_sets[0].all() these_features = set() for p in property_sets[0].non_free(): these_features.add(p.feature) # Note: the algorithm as implemented here, as in original Jam code, appears to # detect conflicts based on features, not properties. For example, if command # line build request say: # # <a>1/<b>1 c<1>/<b>1 # # It will decide that those two property sets conflict, because they both specify # a value for 'b' and will not try building "<a>1 <c1> <b1>", but rather two # different property sets. This is a topic for future fixing, maybe. if these_features & seen_features: (inner_result, inner_seen) = __x_product_aux(property_sets[1:], seen_features) return (inner_result, inner_seen | these_features) else: result = [] (inner_result, inner_seen) = __x_product_aux(property_sets[1:], seen_features | these_features) if inner_result: for inner in inner_result: result.append(properties + inner) else: result.append(properties) if inner_seen & these_features: # Some of elements in property_sets[1:] conflict with elements of property_sets[0], # Try again, this time omitting elements of property_sets[0] (inner_result2, inner_seen2) = __x_product_aux(property_sets[1:], seen_features) result.extend(inner_result2) return (result, inner_seen | these_features)
<SYSTEM_TASK:> Returns true if 'v' is either implicit value, or <END_TASK> <USER_TASK:> Description: def looks_like_implicit_value(v): """Returns true if 'v' is either implicit value, or the part before the first '-' symbol is implicit value."""
assert isinstance(v, basestring) if feature.is_implicit_value(v): return 1 else: split = v.split("-") if feature.is_implicit_value(split[0]): return 1 return 0
<SYSTEM_TASK:> Format a human-readable error message from a regex <END_TASK> <USER_TASK:> Description: def regex_to_error_msg(regex): """Format a human-readable error message from a regex"""
return re.sub('([^\\\\])[()]', '\\1', regex) \ .replace('[ \t]*$', '') \ .replace('^', '') \ .replace('$', '') \ .replace('[ \t]*', ' ') \ .replace('[ \t]+', ' ') \ .replace('[0-9]+', 'X') \ \ .replace('\\[', '[') \ .replace('\\]', ']') \ .replace('\\(', '(') \ .replace('\\)', ')') \ .replace('\\.', '.')
<SYSTEM_TASK:> Enumerate the templates found in path <END_TASK> <USER_TASK:> Description: def templates_in(path): """Enumerate the templates found in path"""
ext = '.cpp' return ( Template(f[0:-len(ext)], load_file(os.path.join(path, f))) for f in os.listdir(path) if f.endswith(ext) )
<SYSTEM_TASK:> Returns the nth character of a character->occurrence map <END_TASK> <USER_TASK:> Description: def nth_char(char_map, index): """Returns the nth character of a character->occurrence map"""
for char in char_map: if index < char_map[char]: return char index = index - char_map[char] return None
<SYSTEM_TASK:> Returns the C-formatting of the character <END_TASK> <USER_TASK:> Description: def format_character(char): """Returns the C-formatting of the character"""
if \ char in string.ascii_letters \ or char in string.digits \ or char in [ '_', '.', ':', ';', ' ', '!', '?', '+', '-', '/', '=', '<', '>', '$', '(', ')', '@', '~', '`', '|', '#', '[', ']', '{', '}', '&', '*', '^', '%']: return char elif char in ['"', '\'', '\\']: return '\\{0}'.format(char) elif char == '\n': return '\\n' elif char == '\r': return '\\r' elif char == '\t': return '\\t' else: return '\\x{:02x}'.format(ord(char))
<SYSTEM_TASK:> Create the file with the given content <END_TASK> <USER_TASK:> Description: def write_file(filename, content): """Create the file with the given content"""
print 'Generating {0}'.format(filename) with open(filename, 'wb') as out_f: out_f.write(content)
<SYSTEM_TASK:> Instantiates the template <END_TASK> <USER_TASK:> Description: def instantiate(self, value_of_n): """Instantiates the template"""
template = Cheetah.Template.Template( self.content, searchList={'n': value_of_n} ) template.random_string = random_string return str(template)
<SYSTEM_TASK:> Find the first line matching regex and return the match object <END_TASK> <USER_TASK:> Description: def _match(self, regex): """Find the first line matching regex and return the match object"""
cregex = re.compile(regex) for line in self.content.splitlines(): match = cregex.match(line) if match: return match raise Exception('No "{0}" line in {1}.cpp'.format( regex_to_error_msg(regex), self.name ))
<SYSTEM_TASK:> Invokes the Protocol Compiler to generate a _pb2.py from the given <END_TASK> <USER_TASK:> Description: def generate_proto(source, require = True): """Invokes the Protocol Compiler to generate a _pb2.py from the given .proto file. Does nothing if the output already exists and is newer than the input."""
if not require and not os.path.exists(source): return output = source.replace(".proto", "_pb2.py").replace("../src/", "") if (not os.path.exists(output) or (os.path.exists(source) and os.path.getmtime(source) > os.path.getmtime(output))): print("Generating %s..." % output) if not os.path.exists(source): sys.stderr.write("Can't find required file: %s\n" % source) sys.exit(-1) if protoc is None: sys.stderr.write( "protoc is not installed nor found in ../src. Please compile it " "or install the binary package.\n") sys.exit(-1) protoc_command = [ protoc, "-I../src", "-I.", "--python_out=.", source ] if subprocess.call(protoc_command) != 0: sys.exit(-1)
<SYSTEM_TASK:> Validate a row label column. <END_TASK> <USER_TASK:> Description: def _validate_row_label(label, column_type_map): """ Validate a row label column. Parameters ---------- label : str Name of the row label column. column_type_map : dict[str, type] Dictionary mapping the name of each column in an SFrame to the type of the values in the column. """
if not isinstance(label, str): raise TypeError("The row label column name must be a string.") if not label in column_type_map.keys(): raise ToolkitError("Row label column not found in the dataset.") if not column_type_map[label] in (str, int): raise TypeError("Row labels must be integers or strings.")
<SYSTEM_TASK:> Generate a new column name that is guaranteed not to conflict with an <END_TASK> <USER_TASK:> Description: def _robust_column_name(base_name, column_names): """ Generate a new column name that is guaranteed not to conflict with an existing set of column names. Parameters ---------- base_name : str The base of the new column name. Usually this does not conflict with the existing column names, in which case this function simply returns `base_name`. column_names : list[str] List of existing column names. Returns ------- robust_name : str The new column name. If `base_name` isn't in `column_names`, then `robust_name` is the same as `base_name`. If there are conflicts, a numeric suffix is added to `base_name` until it no longer conflicts with the column names. """
robust_name = base_name i = 1 while robust_name in column_names: robust_name = base_name + '.{}'.format(i) i += 1 return robust_name
<SYSTEM_TASK:> Utility function for selecting columns of only valid feature types. <END_TASK> <USER_TASK:> Description: def _select_valid_features(dataset, features, valid_feature_types, target_column=None): """ Utility function for selecting columns of only valid feature types. Parameters ---------- dataset: SFrame The input SFrame containing columns of potential features. features: list[str] List of feature column names. If None, the candidate feature set is taken to be all the columns in the dataset. valid_feature_types: list[type] List of Python types that represent valid features. If type is array.array, then an extra check is done to ensure that the individual elements of the array are of numeric type. If type is dict, then an extra check is done to ensure that dictionary values are numeric. target_column: str Name of the target column. If not None, the target column is excluded from the list of valid feature columns. Returns ------- out: list[str] List of valid feature column names. Warnings are given for each candidate feature column that is excluded. Examples -------- # Select all the columns of type `str` in sf, excluding the target column named # 'rating' >>> valid_columns = _select_valid_features(sf, None, [str], target_column='rating') # Select the subset of columns 'X1', 'X2', 'X3' that has dictionary type or defines # numeric array type >>> valid_columns = _select_valid_features(sf, ['X1', 'X2', 'X3'], [dict, array.array]) """
if features is not None: if not hasattr(features, '__iter__'): raise TypeError("Input 'features' must be an iterable type.") if not all([isinstance(x, str) for x in features]): raise TypeError("Input 'features' must contain only strings.") ## Extract the features and labels if features is None: features = dataset.column_names() col_type_map = { col_name: col_type for (col_name, col_type) in zip(dataset.column_names(), dataset.column_types())} valid_features = [] for col_name in features: if col_name not in dataset.column_names(): _logging.warning("Column '{}' is not in the input dataset.".format(col_name)) elif col_name == target_column: _logging.warning("Excluding target column " + target_column + " as a feature.") elif col_type_map[col_name] not in valid_feature_types: _logging.warning("Column '{}' is excluded as a ".format(col_name) + "feature due to invalid column type.") else: valid_features.append(col_name) if len(valid_features) == 0: raise ValueError("The dataset does not contain any valid feature columns. " + "Accepted feature types are " + str(valid_feature_types) + ".") return valid_features
<SYSTEM_TASK:> Returns true if all of the elements in the list are equal. <END_TASK> <USER_TASK:> Description: def _check_elements_equal(lst): """ Returns true if all of the elements in the list are equal. """
assert isinstance(lst, list), "Input value must be a list." return not lst or lst.count(lst[0]) == len(lst)