docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Returns a vocabulary after eliminating the words that appear < `n`. Args: n (integer): specifies the minimum word frequency allowed.
def min_count(self, n=1): word_count = {w:c for w,c in iteritems(self.word_count) if c >= n} return CountedVocabulary(word_count=word_count)
165,544
Apply `func` to lines of text in parallel or sequential. Args: func : a function that takes a list of lines.
def apply(self, func, workers=1, job_size=10000): if workers == 1: for lines in self.iter_chunks(job_size): yield func(lines) else: with ProcessPoolExecutor(max_workers=workers) as executor: for result in executor.map(func, self.iter_chunks(job_size)): yield result
165,560
Annotate a squence of words with entity tags. Args: sent: sequence of strings/words.
def annotate(self, sent): preds = [] words = [] for word, fv in self.sent2examples(sent): probs = self.predictor(fv) tags = probs.argsort() tag = self.ID_TAG[tags[-1]] words.append(word) preds.append(tag) # fix_chunks(preds) annotations = zip(words, preds) return annotations
165,566
Recurses DynamicMap to find DynamicMaps inputs Args: dmap: DynamicMap to recurse to look for DynamicMap inputs Returns: List of DynamicMap instances that were found
def get_nested_dmaps(dmap): if not isinstance(dmap, DynamicMap): return [] dmaps = [dmap] for o in dmap.callback.inputs: dmaps.extend(get_nested_dmaps(o)) return list(set(dmaps))
166,106
Recurses supplied DynamicMap to find all streams Args: dmap: DynamicMap to recurse to look for streams Returns: List of streams that were found
def get_nested_streams(dmap): return list({s for dmap in get_nested_dmaps(dmap) for s in dmap.streams})
166,107
Group by supplied dimension(s) and overlay each group Groups data by supplied dimension(s) overlaying the groups along the dimension(s). Args: dimensions: Dimension(s) of dimensions to group by Returns: NdOverlay object(s) with supplied dimensions
def overlay(self, dimensions=None, **kwargs): dimensions = self._valid_dimensions(dimensions) if len(dimensions) == self.ndims: with item_check(False): return NdOverlay(self, **kwargs).reindex(dimensions) else: dims = [d for d in self.kdims if d not in dimensions] return self.groupby(dims, group_type=NdOverlay, **kwargs)
166,110
Group by supplied dimension(s) and lay out groups in grid Groups data by supplied dimension(s) laying the groups along the dimension(s) out in a GridSpace. Args: dimensions: Dimension/str or list Dimension or list of dimensions to group by Returns: GridSpace with supplied dimensions
def grid(self, dimensions=None, **kwargs): dimensions = self._valid_dimensions(dimensions) if len(dimensions) == self.ndims: with item_check(False): return GridSpace(self, **kwargs).reindex(dimensions) return self.groupby(dimensions, container_type=GridSpace, **kwargs)
166,111
Group by supplied dimension(s) and lay out groups Groups data by supplied dimension(s) laying the groups along the dimension(s) out in a NdLayout. Args: dimensions: Dimension(s) to group by Returns: NdLayout with supplied dimensions
def layout(self, dimensions=None, **kwargs): dimensions = self._valid_dimensions(dimensions) if len(dimensions) == self.ndims: with item_check(False): return NdLayout(self, **kwargs).reindex(dimensions) return self.groupby(dimensions, container_type=NdLayout, **kwargs)
166,112
Clones the Callable optionally with new settings Args: callable: New callable function to wrap **overrides: Parameter overrides to apply Returns: Cloned Callable object
def clone(self, callable=None, **overrides): old = {k: v for k, v in self.get_param_values() if k not in ['callable', 'name']} params = dict(old, **overrides) callable = self.callable if callable is None else callable return self.__class__(callable, **params)
166,127
Calls the callable function with supplied args and kwargs. If enabled uses memoization to avoid calling function unneccessarily. Args: *args: Arguments passed to the callable function **kwargs: Keyword arguments passed to the callable function Returns: Return value of the wrapped callable function
def __call__(self, *args, **kwargs): # Nothing to do for callbacks that accept no arguments kwarg_hash = kwargs.pop('_memoization_hash_', ()) (self.args, self.kwargs) = (args, kwargs) if not args and not kwargs and not any(kwarg_hash): return self.callable() inputs = [i for i in self.inputs if isinstance(i, DynamicMap)] streams = [] for stream in [s for i in inputs for s in get_nested_streams(i)]: if stream not in streams: streams.append(stream) memoize = self._stream_memoization and not any(s.transient and s._triggering for s in streams) values = tuple(tuple(sorted(s.hashkey.items())) for s in streams) key = args + kwarg_hash + values hashed_key = util.deephash(key) if self.memoize else None if hashed_key is not None and memoize and hashed_key in self._memoized: return self._memoized[hashed_key] if self.argspec.varargs is not None: # Missing information on positional argument names, cannot promote to keywords pass elif len(args) != 0: # Turn positional arguments into keyword arguments pos_kwargs = {k:v for k,v in zip(self.argspec.args, args)} ignored = range(len(self.argspec.args),len(args)) if len(ignored): self.param.warning('Ignoring extra positional argument %s' % ', '.join('%s' % i for i in ignored)) clashes = set(pos_kwargs.keys()) & set(kwargs.keys()) if clashes: self.param.warning( 'Positional arguments %r overriden by keywords' % list(clashes)) args, kwargs = (), dict(pos_kwargs, **kwargs) try: ret = self.callable(*args, **kwargs) except KeyError: # KeyError is caught separately because it is used to signal # invalid keys on DynamicMap and should not warn raise except Exception as e: posstr = ', '.join(['%r' % el for el in self.args]) if self.args else '' kwstr = ', '.join('%s=%r' % (k,v) for k,v in self.kwargs.items()) argstr = ', '.join([el for el in [posstr, kwstr] if el]) message = ("Callable raised \"{e}\".\n" "Invoked as {name}({argstr})") self.param.warning(message.format(name=self.name, argstr=argstr, e=repr(e))) raise if hashed_key is not None: self._memoized = {hashed_key : ret} return ret
166,128
Updates attached streams and triggers events Automatically find streams matching the supplied kwargs to update and trigger events on them. Args: **kwargs: Events to update streams with
def event(self, **kwargs): if self.callback.noargs and self.streams == []: self.param.warning( 'No streams declared. To update a DynamicMaps using ' 'generators (or callables without arguments) use streams=[Next()]') return if self.streams == []: self.param.warning('No streams on DynamicMap, calling event ' 'will have no effect') return stream_params = set(util.stream_parameters(self.streams)) invalid = [k for k in kwargs.keys() if k not in stream_params] if invalid: msg = 'Key(s) {invalid} do not correspond to stream parameters' raise KeyError(msg.format(invalid = ', '.join('%r' % i for i in invalid))) streams = [] for stream in self.streams: contents = stream.contents applicable_kws = {k:v for k,v in kwargs.items() if k in set(contents.keys())} if not applicable_kws and contents: continue streams.append(stream) rkwargs = util.rename_stream_kwargs(stream, applicable_kws, reverse=True) stream.update(**rkwargs) Stream.trigger(streams)
166,135
Groups data by supplied dimension(s) laying the groups along the dimension(s) out in a GridSpace. Args: dimensions: Dimension/str or list Dimension or list of dimensions to group by Returns: grid: GridSpace GridSpace with supplied dimensions
def grid(self, dimensions=None, **kwargs): return self.groupby(dimensions, container_type=GridSpace, **kwargs)
166,150
Groups data by supplied dimension(s) laying the groups along the dimension(s) out in a NdLayout. Args: dimensions: Dimension/str or list Dimension or list of dimensions to group by Returns: layout: NdLayout NdLayout with supplied dimensions
def layout(self, dimensions=None, **kwargs): return self.groupby(dimensions, container_type=NdLayout, **kwargs)
166,151
Group by supplied dimension(s) and overlay each group Groups data by supplied dimension(s) overlaying the groups along the dimension(s). Args: dimensions: Dimension(s) of dimensions to group by Returns: NdOverlay object(s) with supplied dimensions
def overlay(self, dimensions=None, **kwargs): if dimensions is None: dimensions = self.kdims else: if not isinstance(dimensions, (list, tuple)): dimensions = [dimensions] dimensions = [self.get_dimension(d, strict=True) for d in dimensions] dims = [d for d in self.kdims if d not in dimensions] return self.groupby(dims, group_type=NdOverlay)
166,152
Reorders key dimensions on DynamicMap Create a new object with a reordered set of key dimensions. Dropping dimensions is not allowed on a DynamicMap. Args: kdims: List of dimensions to reindex the mapping with force: Not applicable to a DynamicMap Returns: Reindexed DynamicMap
def reindex(self, kdims=[], force=False): if not isinstance(kdims, list): kdims = [kdims] kdims = [self.get_dimension(kd, strict=True) for kd in kdims] dropped = [kd for kd in self.kdims if kd not in kdims] if dropped: raise ValueError("DynamicMap does not allow dropping dimensions, " "reindex may only be used to reorder dimensions.") return super(DynamicMap, self).reindex(kdims, force)
166,154
Snaps indices into the GridSpace to the closest coordinate. Args: key: Tuple index into the GridSpace Returns: Transformed key snapped to closest numeric coordinates
def _transform_indices(self, key): ndims = self.ndims if all(not (isinstance(el, slice) or callable(el)) for el in key): dim_inds = [] for dim in self.kdims: dim_type = self.get_dimension_type(dim) if isinstance(dim_type, type) and issubclass(dim_type, Number): dim_inds.append(self.get_dimension_index(dim)) str_keys = iter(key[i] for i in range(self.ndims) if i not in dim_inds) num_keys = [] if len(dim_inds): keys = list({tuple(k[i] if ndims > 1 else k for i in dim_inds) for k in self.keys()}) q = np.array([tuple(key[i] if ndims > 1 else key for i in dim_inds)]) idx = np.argmin([np.inner(q - np.array(x), q - np.array(x)) if len(dim_inds) == 2 else np.abs(q-x) for x in keys]) num_keys = iter(keys[idx]) key = tuple(next(num_keys) if i in dim_inds else next(str_keys) for i in range(self.ndims)) elif any(not (isinstance(el, slice) or callable(el)) for el in key): keys = self.keys() for i, k in enumerate(key): if isinstance(k, slice): continue dim_keys = np.array([ke[i] for ke in keys]) if dim_keys.dtype.kind in 'OSU': continue snapped_val = dim_keys[np.argmin(np.abs(dim_keys-k))] key = list(key) key[i] = snapped_val key = tuple(key) return key
166,156
Returns the keys of the GridSpace Args: full_grid (bool, optional): Return full cross-product of keys Returns: List of keys
def keys(self, full_grid=False): keys = super(GridSpace, self).keys() if self.ndims == 1 or not full_grid: return keys dim1_keys = sorted(set(k[0] for k in keys)) dim2_keys = sorted(set(k[1] for k in keys)) return [(d1, d2) for d1 in dim1_keys for d2 in dim2_keys]
166,157
Get a layer in the Overlay. Get a particular layer in the Overlay using its path string or an integer index. Args: identifier: Index or path string of the item to return default: Value to return if no item is found Returns: The indexed layer of the Overlay
def get(self, identifier, default=None): if isinstance(identifier, int): values = list(self.data.values()) if 0 <= identifier < len(values): return values[identifier] else: return default return super(Overlay, self).get(identifier, default)
166,176
Returns the current frame in a mapping given a key mapping. Args: obj: Nested Dimensioned object key_map: Dictionary mapping between dimensions and key value cached: Whether to allow looking up key in cache Returns: The item in the mapping corresponding to the supplied key.
def get_plot_frame(map_obj, key_map, cached=False): if map_obj.kdims and len(map_obj.kdims) == 1 and map_obj.kdims[0] == 'Frame': # Special handling for static plots return map_obj.last key = tuple(key_map[kd.name] for kd in map_obj.kdims if kd.name in key_map) if key in map_obj.data and cached: return map_obj.data[key] else: try: return map_obj[key] except KeyError: return None except StopIteration as e: raise e except Exception: print(traceback.format_exc()) return None
166,192
Extracts a single frame from a nested object. Replaces any HoloMap or DynamicMap in the nested data structure, with the item corresponding to the supplied key. Args: obj: Nested Dimensioned object key_map: Dictionary mapping between dimensions and key value cached: Whether to allow looking up key in cache Returns: Nested datastructure where maps are replaced with single frames
def get_nested_plot_frame(obj, key_map, cached=False): clone = obj.map(lambda x: x) # Ensure that DynamicMaps in the cloned frame have # identical callback inputs to allow memoization to work for it1, it2 in zip(obj.traverse(lambda x: x), clone.traverse(lambda x: x)): if isinstance(it1, DynamicMap): with disable_constant(it2.callback): it2.callback.inputs = it1.callback.inputs with item_check(False): return clone.map(lambda x: get_plot_frame(x, key_map, cached=cached), [DynamicMap, HoloMap], clone=False)
166,193
Clones the NdLayout, overriding data and parameters. Args: data: New data replacing the existing data shared_data (bool, optional): Whether to use existing data new_type (optional): Type to cast object to *args: Additional arguments to pass to constructor **overrides: New keyword arguments to pass to constructor Returns: Cloned NdLayout object
def clone(self, *args, **overrides): clone = super(NdLayout, self).clone(*args, **overrides) clone._max_cols = self._max_cols clone.id = self.id return clone
166,480
Clones the Layout, overriding data and parameters. Args: data: New data replacing the existing data shared_data (bool, optional): Whether to use existing data new_type (optional): Type to cast object to *args: Additional arguments to pass to constructor **overrides: New keyword arguments to pass to constructor Returns: Cloned Layout object
def clone(self, *args, **overrides): clone = super(Layout, self).clone(*args, **overrides) clone._max_cols = self._max_cols return clone
166,484
Converts a cmap spec to a plotly colorscale Args: cmap: A recognized colormap by name or list of colors levels: A list or integer declaring the color-levels cmin: The lower bound of the color range cmax: The upper bound of the color range Returns: A valid plotly colorscale
def get_colorscale(cmap, levels=None, cmin=None, cmax=None): ncolors = levels if isinstance(levels, int) else None if isinstance(levels, list): ncolors = len(levels) - 1 if isinstance(cmap, list) and len(cmap) != ncolors: raise ValueError('The number of colors in the colormap ' 'must match the intervals defined in the ' 'color_levels, expected %d colors found %d.' % (ncolors, len(cmap))) try: palette = process_cmap(cmap, ncolors) except Exception as e: colorscale = colors.PLOTLY_SCALES.get(cmap) if colorscale is None: raise e return colorscale if isinstance(levels, int): colorscale = [] scale = np.linspace(0, 1, levels+1) for i in range(levels+1): if i == 0: colorscale.append((scale[0], palette[i])) elif i == levels: colorscale.append((scale[-1], palette[-1])) else: colorscale.append((scale[i], palette[i-1])) colorscale.append((scale[i], palette[i])) return colorscale elif isinstance(levels, list): palette, (cmin, cmax) = color_intervals( palette, levels, clip=(cmin, cmax)) return colors.make_colorscale(palette)
166,532
Convert dimension values to DataFrame. Returns a pandas dataframe of columns along each dimension, either completely flat or indexed by key dimensions. Args: dimensions: Dimensions to return as columns multi_index: Convert key dimensions to (multi-)index Returns: DataFrame of columns corresponding to each dimension
def dframe(self, dimensions=None, multi_index=False): import pandas as pd if dimensions is None: dimensions = [d.name for d in self.dimensions()] else: dimensions = [self.get_dimension(d, strict=True).name for d in dimensions] column_names = dimensions dim_vals = OrderedDict([(dim, self.dimension_values(dim)) for dim in column_names]) df = pd.DataFrame(dim_vals) if multi_index: df = df.set_index([d for d in dimensions if d in self.kdims]) return df
166,589
Convert dimension values to columnar array. Args: dimensions: List of dimensions to return Returns: Array of columns corresponding to each dimension
def array(self, dimensions=None): if dimensions is None: dims = [d for d in self.kdims + self.vdims] else: dims = [self.get_dimension(d, strict=True) for d in dimensions] columns, types = [], [] for dim in dims: column = self.dimension_values(dim) columns.append(column) types.append(column.dtype.kind) if len(set(types)) > 1: columns = [c.astype('object') for c in columns] return np.column_stack(columns)
166,590
Formatted contents of table cell. Args: row (int): Integer index of table row col (int): Integer index of table column Returns: Formatted table cell contents
def pprint_cell(self, row, col): ndims = self.ndims if col >= self.cols: raise Exception("Maximum column index is %d" % self.cols-1) elif row >= self.rows: raise Exception("Maximum row index is %d" % self.rows-1) elif row == 0: if col >= ndims: if self.vdims: return self.vdims[col - ndims].pprint_label else: return '' return self.kdims[col].pprint_label else: dim = self.get_dimension(col) return dim.pprint_value(self.iloc[row-1, col])
166,593
Computes the maximal lower and upper bounds from a list bounds. Args: ranges (list of tuples): A list of range tuples combined (boolean, optional): Whether to combine bounds Whether range should be computed on lower and upper bound independently or both at once Returns: The maximum range as a single tuple
def max_range(ranges, combined=True): try: with warnings.catch_warnings(): warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered') values = [tuple(np.NaN if v is None else v for v in r) for r in ranges] if pd and any(isinstance(v, datetime_types) and not isinstance(v, cftime_types) for r in values for v in r): converted = [] for l, h in values: if isinstance(l, datetime_types) and isinstance(h, datetime_types): l, h = (pd.Timestamp(l).to_datetime64(), pd.Timestamp(h).to_datetime64()) converted.append((l, h)) values = converted arr = np.array(values) if not len(arr): return np.NaN, np.NaN elif arr.dtype.kind in 'OSU': arr = list(python2sort([ v for r in values for v in r if not is_nan(v) and v is not None])) return arr[0], arr[-1] elif arr.dtype.kind in 'M': return ((arr.min(), arr.max()) if combined else (arr[:, 0].min(), arr[:, 1].min())) if combined: return (np.nanmin(arr), np.nanmax(arr)) else: return (np.nanmin(arr[:, 0]), np.nanmax(arr[:, 1])) except: return (np.NaN, np.NaN)
166,763
Returns an array of unique values in the input order. Args: arr (np.ndarray or list): The array to compute unique values on Returns: A new array of unique values
def unique_array(arr): if not len(arr): return np.asarray(arr) elif pd: if isinstance(arr, np.ndarray) and arr.dtype.kind not in 'MO': # Avoid expensive unpacking if not potentially datetime return pd.unique(arr) values = [] for v in arr: if (isinstance(v, datetime_types) and not isinstance(v, cftime_types)): v = pd.Timestamp(v).to_datetime64() values.append(v) return pd.unique(values) else: arr = np.asarray(arr) _, uniq_inds = np.unique(arr, return_index=True) return arr[np.sort(uniq_inds)]
166,770
Whether the object is a method on a parameterized object. Args: obj: Object to check has_deps (boolean, optional): Check for dependencies Whether to also check whether the method has been annotated with param.depends Returns: A boolean value indicating whether the object is a method on a Parameterized object and if enabled whether it has any dependencies
def is_param_method(obj, has_deps=False): parameterized = (inspect.ismethod(obj) and isinstance(get_method_owner(obj), param.Parameterized)) if parameterized and has_deps: return getattr(obj, "_dinfo", {}).get('dependencies') return parameterized
166,788
Replaces dimensions in list with dictionary of overrides. Args: dimensions: List of dimensions overrides: Dictionary of dimension specs indexed by name Returns: list: List of dimensions with replacements applied
def replace_dimensions(cls, dimensions, overrides): from .dimension import Dimension replaced = [] for d in dimensions: if d.name in overrides: override = overrides[d.name] elif d.label in overrides: override = overrides[d.label] else: override = None if override is None: replaced.append(d) elif isinstance(override, (util.basestring, tuple)): replaced.append(d.clone(override)) elif isinstance(override, Dimension): replaced.append(override) elif isinstance(override, dict): replaced.append(d.clone(override.get('name',None), **{k:v for k,v in override.items() if k != 'name'})) else: raise ValueError('Dimension can only be overridden ' 'with another dimension or a dictionary ' 'of attributes') return replaced
166,957
Returns the corresponding Options object. Args: group: The options group. Flattens across groups if None. backend: Current backend if None otherwise chosen backend. Returns: Options object associated with the object containing the applied option keywords.
def get(self, group=None, backend=None): from .options import Store, Options keywords = {} groups = Options._option_groups if group is None else [group] backend = backend if backend else Store.current_backend for group in groups: optsobj = Store.lookup_options(backend, self._obj, group) keywords = dict(keywords, **optsobj.kwargs) return Options(**keywords)
166,962
Prints a repr of the object including any applied options. Args: show_defaults: Whether to include default options
def info(self, show_defaults=False): pprinter = PrettyPrinter(show_options=True, show_defaults=show_defaults) print(pprinter.pprint(self._obj))
166,965
Drops dimension(s) from keys Args: dimensions: Dimension(s) to drop Returns: Clone of object with with dropped dimension(s)
def drop_dimension(self, dimensions): dimensions = [dimensions] if np.isscalar(dimensions) else dimensions dims = [d for d in self.kdims if d not in dimensions] dim_inds = [self.get_dimension_index(d) for d in dims] key_getter = itemgetter(*dim_inds) return self.clone([(key_getter(k), v) for k, v in self.data.items()], kdims=dims)
167,035
Merges other item with this object Args: other: Object containing items to merge into this object Must be a dictionary or NdMapping type
def update(self, other): if isinstance(other, NdMapping): dims = [d for d in other.kdims if d not in self.kdims] if len(dims) == other.ndims: raise KeyError("Cannot update with NdMapping that has" " a different set of key dimensions.") elif dims: other = other.drop_dimension(dims) other = other.data for key, data in other.items(): self._add_item(key, data, sort=False) if self.sort: self._resort()
167,039
Convert dimension values to DataFrame. Returns a pandas dataframe of columns along each dimension, either completely flat or indexed by key dimensions. Args: dimensions: Dimensions to return as columns multi_index: Convert key dimensions to (multi-)index Returns: DataFrame of columns corresponding to each dimension
def dframe(self, dimensions=None, multi_index=False): import pandas as pd if dimensions is None: outer_dimensions = self.kdims inner_dimensions = None else: outer_dimensions = [self.get_dimension(d) for d in dimensions if d in self.kdims] inner_dimensions = [d for d in dimensions if d not in outer_dimensions] inds = [(d, self.get_dimension_index(d)) for d in outer_dimensions] dframes = [] for key, element in self.data.items(): df = element.dframe(inner_dimensions, multi_index) names = [d.name for d in outer_dimensions] key_dims = [(d.name, key[i]) for d, i in inds] if multi_index: length = len(df) indexes = [[v]*length for _, v in key_dims] if df.index.names != [None]: indexes += [df.index] names += list(df.index.names) df = df.set_index(indexes) df.index.names = names else: for dim, val in key_dims: dimn = 1 while dim in df: dim = dim+'_%d' % dimn if dim in df: dimn += 1 df.insert(0, dim, val) dframes.append(df) return pd.concat(dframes)
167,056
Snaps coordinate(s) to closest coordinate in Dataset Args: coords: List of coordinates expressed as tuples **kwargs: Coordinates defined as keyword pairs Returns: List of tuples of the snapped coordinates Raises: NotImplementedError: Raised if snapping is not supported
def closest(self, coords=[], **kwargs): if self.ndims > 1: raise NotImplementedError("Closest method currently only " "implemented for 1D Elements") if kwargs: if len(kwargs) > 1: raise NotImplementedError("Closest method currently only " "supports 1D indexes") samples = list(kwargs.values())[0] coords = samples if isinstance(samples, list) else [samples] xs = self.dimension_values(0) if xs.dtype.kind in 'SO': raise NotImplementedError("Closest only supported for numeric types") idxs = [np.argmin(np.abs(xs-coord)) for coord in coords] return [xs[idx] for idx in idxs]
167,065
Sorts the data by the values along the supplied dimensions. Args: by: Dimension(s) to sort by reverse (bool, optional): Reverse sort order Returns: Sorted Dataset
def sort(self, by=None, reverse=False): if by is None: by = self.kdims elif not isinstance(by, list): by = [by] sorted_columns = self.interface.sort(self, by, reverse) return self.clone(sorted_columns)
167,066
Return the lower and upper bounds of values along dimension. Args: dimension: The dimension to compute the range on. data_range (bool): Compute range from data values dimension_range (bool): Include Dimension ranges Whether to include Dimension range and soft_range in range calculation Returns: Tuple containing the lower and upper bound
def range(self, dim, data_range=True, dimension_range=True): dim = self.get_dimension(dim) if dim is None or (not data_range and not dimension_range): return (None, None) elif all(util.isfinite(v) for v in dim.range) and dimension_range: return dim.range elif dim in self.dimensions() and data_range and bool(self): lower, upper = self.interface.range(self, dim) else: lower, upper = (np.NaN, np.NaN) if not dimension_range: return lower, upper return util.dimension_range(lower, upper, dim.range, dim.soft_range)
167,067
Reindexes Dataset dropping static or supplied kdims Creates a new object with a reordered or reduced set of key dimensions. By default drops all non-varying key dimensions.x Args: kdims (optional): New list of key dimensionsx vdims (optional): New list of value dimensions Returns: Reindexed object
def reindex(self, kdims=None, vdims=None): gridded = self.interface.gridded scalars = [] if gridded: coords = [(d, self.interface.coords(self, d.name)) for d in self.kdims] scalars = [d for d, vs in coords if len(vs) == 1] if kdims is None: # If no key dimensions are defined and interface is gridded # drop all scalar key dimensions key_dims = [d for d in self.kdims if (not vdims or d not in vdims) and not d in scalars] elif not isinstance(kdims, list): key_dims = [self.get_dimension(kdims, strict=True)] else: key_dims = [self.get_dimension(k, strict=True) for k in kdims] dropped = [d for d in self.kdims if not d in key_dims and not d in scalars] new_type = None if vdims is None: val_dims = [d for d in self.vdims if not kdims or d not in kdims] else: val_dims = [self.get_dimension(v, strict=True) for v in vdims] new_type = self._vdim_reductions.get(len(val_dims), type(self)) data = self.interface.reindex(self, key_dims, val_dims) datatype = self.datatype if gridded and dropped: interfaces = self.interface.interfaces datatype = [dt for dt in datatype if not getattr(interfaces.get(dt, None), 'gridded', True)] return self.clone(data, kdims=key_dims, vdims=val_dims, new_type=new_type, datatype=datatype)
167,070
Get the type of the requested dimension. Type is determined by Dimension.type attribute or common type of the dimension values, otherwise None. Args: dimension: Dimension to look up by name or by index Returns: Declared type of values along the dimension
def get_dimension_type(self, dim): dim_obj = self.get_dimension(dim) if dim_obj and dim_obj.type is not None: return dim_obj.type return self.interface.dimension_type(self, dim_obj)
167,077
Convert dimension values to DataFrame. Returns a pandas dataframe of columns along each dimension, either completely flat or indexed by key dimensions. Args: dimensions: Dimensions to return as columns multi_index: Convert key dimensions to (multi-)index Returns: DataFrame of columns corresponding to each dimension
def dframe(self, dimensions=None, multi_index=False): if dimensions is None: dimensions = [d.name for d in self.dimensions()] else: dimensions = [self.get_dimension(d, strict=True).name for d in dimensions] df = self.interface.dframe(self, dimensions) if multi_index: df = df.set_index([d for d in dimensions if d in self.kdims]) return df
167,078
Convert dimension values to a dictionary. Returns a dictionary of column arrays along each dimension of the element. Args: dimensions: Dimensions to return as columns Returns: Dictionary of arrays for each dimension
def columns(self, dimensions=None): if dimensions is None: dimensions = self.dimensions() else: dimensions = [self.get_dimension(d, strict=True) for d in dimensions] return OrderedDict([(d.name, self.dimension_values(d)) for d in dimensions])
167,079
Clones the object, overriding data and parameters. Args: data: New data replacing the existing data shared_data (bool, optional): Whether to use existing data new_type (optional): Type to cast object to *args: Additional arguments to pass to constructor **overrides: New keyword arguments to pass to constructor Returns: Cloned object
def clone(self, data=None, shared_data=True, new_type=None, *args, **overrides): if 'datatype' not in overrides: datatypes = [self.interface.datatype] + self.datatype overrides['datatype'] = list(util.unique_iterator(datatypes)) return super(Dataset, self).clone(data, shared_data, new_type, *args, **overrides)
167,080
Returns Params streams given a dictionary of parameters Args: params (dict): Dictionary of parameters Returns: List of Params streams
def from_params(cls, params): key_fn = lambda x: id(x[1].owner) streams = [] for _, group in groupby(sorted(params.items(), key=key_fn), key_fn): group = list(group) inst = [p.owner for _, p in group][0] if not isinstance(inst, param.Parameterized): continue names = [p.name for _, p in group] rename = {p.name: n for n, p in group} streams.append(cls(inst, names, rename=rename)) return streams
167,130
Converts support date types to milliseconds since epoch Attempts highest precision conversion of different datetime formats to milliseconds since the epoch (1970-01-01 00:00:00). If datetime is a cftime with a non-standard calendar the caveats described in hv.core.util.cftime_to_timestamp apply. Args: date: Date- or datetime-like object Returns: Milliseconds since 1970-01-01 00:00:00
def date_to_integer(date): if pd and isinstance(date, pd.Timestamp): try: date = date.to_datetime64() except: date = date.to_datetime() if isinstance(date, np.datetime64): return date.astype('datetime64[ms]').astype(float) elif isinstance(date, cftime_types): return cftime_to_timestamp(date, 'ms') if hasattr(date, 'timetuple'): dt_int = calendar.timegm(date.timetuple())*1000 else: raise ValueError('Datetime type not recognized') return dt_int
167,183
Convert the input to a Dimension. Args: dimension: tuple, dict or string type to convert to Dimension Returns: A Dimension object constructed from the dimension spec. No copy is performed if the input is already a Dimension.
def asdim(dimension): if isinstance(dimension, Dimension): return dimension elif isinstance(dimension, (tuple, dict, basestring)): return Dimension(dimension) else: raise ValueError('%s type could not be interpreted as Dimension. ' 'Dimensions must be declared as a string, tuple, ' 'dictionary or Dimension type.')
167,220
Return the Dimension.name for a dimension-like object. Args: dimension: Dimension or dimension string, tuple or dict Returns: The name of the Dimension or what would be the name if the input as converted to a Dimension.
def dimension_name(dimension): if isinstance(dimension, Dimension): return dimension.name elif isinstance(dimension, basestring): return dimension elif isinstance(dimension, tuple): return dimension[0] elif isinstance(dimension, dict): return dimension['name'] elif dimension is None: return None else: raise ValueError('%s type could not be interpreted as Dimension. ' 'Dimensions must be declared as a string, tuple, ' 'dictionary or Dimension type.' % type(dimension).__name__)
167,221
Converts kdims and vdims to Dimension objects. Args: kdims: List or single key dimension(s) specified as strings, tuples dicts or Dimension objects. vdims: List or single value dimension(s) specified as strings, tuples dicts or Dimension objects. Returns: Dictionary containing kdims and vdims converted to Dimension objects: {'kdims': [Dimension('x')], 'vdims': [Dimension('y')]
def process_dimensions(kdims, vdims): dimensions = {} for group, dims in [('kdims', kdims), ('vdims', vdims)]: if dims is None: continue elif isinstance(dims, (tuple, basestring, Dimension, dict)): dims = [dims] elif not isinstance(dims, list): raise ValueError("%s argument expects a Dimension or list of dimensions, " "specified as tuples, strings, dictionaries or Dimension " "instances, not a %s type. Ensure you passed the data as the " "first argument." % (group, type(dims).__name__)) for dim in dims: if not isinstance(dim, (tuple, basestring, Dimension, dict)): raise ValueError('Dimensions must be defined as a tuple, ' 'string, dictionary or Dimension instance, ' 'found a %s type.' % type(dim).__name__) dimensions[group] = [asdim(d) for d in dims] return dimensions
167,222
Clones the Dimension with new parameters Derive a new Dimension that inherits existing parameters except for the supplied, explicit overrides Args: spec (tuple, optional): Dimension tuple specification **overrides: Dimension parameter overrides Returns: Cloned Dimension object
def clone(self, spec=None, **overrides): settings = dict(self.get_param_values(), **overrides) if spec is None: spec = (self.name, overrides.get('label', self.label)) if 'label' in overrides and isinstance(spec, basestring) : spec = (spec, overrides['label']) elif 'label' in overrides and isinstance(spec, tuple) : if overrides['label'] != spec[1]: self.param.warning( 'Using label as supplied by keyword ({!r}), ignoring ' 'tuple value {!r}'.format(overrides['label'], spec[1])) spec = (spec[0], overrides['label']) return self.__class__(spec, **{k:v for k,v in settings.items() if k not in ['name', 'label']})
167,225
Applies the applicable formatter to the value. Args: value: Dimension value to format Returns: Formatted dimension value
def pprint_value(self, value): own_type = type(value) if self.type is None else self.type formatter = (self.value_format if self.value_format else self.type_formatters.get(own_type)) if formatter: if callable(formatter): return formatter(value) elif isinstance(formatter, basestring): if isinstance(value, (dt.datetime, dt.date)): return value.strftime(formatter) elif isinstance(value, np.datetime64): return util.dt64_to_dt(value).strftime(formatter) elif re.findall(r"\{(\w+)\}", formatter): return formatter.format(value) else: return formatter % value return unicode(bytes_to_unicode(value))
167,231
Pretty print the dimension value and unit. Args: value: Dimension value to format Returns: Formatted dimension value string with unit
def pprint_value_string(self, value): unit = '' if self.unit is None else ' ' + bytes_to_unicode(self.unit) value = self.pprint_value(value) return title_format.format(name=bytes_to_unicode(self.label), val=value, unit=unit)
167,232
Whether the spec applies to this object. Args: spec: A function, spec or type to check for a match * A 'type[[.group].label]' string which is compared against the type, group and label of this object * A function which is given the object and returns a boolean. * An object type matched using isinstance. Returns: bool: Whether the spec matched this object.
def matches(self, spec): if callable(spec) and not isinstance(spec, type): return spec(self) elif isinstance(spec, type): return isinstance(self, spec) specification = (self.__class__.__name__, self.group, self.label) split_spec = tuple(spec.split('.')) if not isinstance(spec, tuple) else spec split_spec, nocompare = zip(*((None, True) if s == '*' or s is None else (s, False) for s in split_spec)) if all(nocompare): return True match_fn = itemgetter(*(idx for idx, nc in enumerate(nocompare) if not nc)) self_spec = match_fn(split_spec) unescaped_match = match_fn(specification[:len(split_spec)]) == self_spec if unescaped_match: return True sanitizers = [util.sanitize_identifier, util.group_sanitizer, util.label_sanitizer] identifier_specification = tuple(fn(ident, escape=False) for ident, fn in zip(specification, sanitizers)) identifier_match = match_fn(identifier_specification[:len(split_spec)]) == self_spec return identifier_match
167,237
Get a Dimension object by name or index. Args: dimension: Dimension to look up by name or integer index default (optional): Value returned if Dimension not found strict (bool, optional): Raise a KeyError if not found Returns: Dimension object for the requested dimension or default
def get_dimension(self, dimension, default=None, strict=False): if dimension is not None and not isinstance(dimension, (int, basestring, Dimension)): raise TypeError('Dimension lookup supports int, string, ' 'and Dimension instances, cannot lookup ' 'Dimensions using %s type.' % type(dimension).__name__) all_dims = self.dimensions() if isinstance(dimension, int): if 0 <= dimension < len(all_dims): return all_dims[dimension] elif strict: raise KeyError("Dimension %r not found" % dimension) else: return default dimension = dimension_name(dimension) name_map = {dim.name: dim for dim in all_dims} name_map.update({dim.label: dim for dim in all_dims}) name_map.update({util.dimension_sanitizer(dim.name): dim for dim in all_dims}) if strict and dimension not in name_map: raise KeyError("Dimension %r not found." % dimension) else: return name_map.get(dimension, default)
167,245
Get the index of the requested dimension. Args: dimension: Dimension to look up by name or by index Returns: Integer index of the requested dimension
def get_dimension_index(self, dimension): if isinstance(dimension, int): if (dimension < (self.ndims + len(self.vdims)) or dimension < len(self.dimensions())): return dimension else: return IndexError('Dimension index out of bounds') dim = dimension_name(dimension) try: dimensions = self.kdims+self.vdims return [i for i, d in enumerate(dimensions) if d == dim][0] except IndexError: raise Exception("Dimension %s not found in %s." % (dim, self.__class__.__name__))
167,246
Get the type of the requested dimension. Type is determined by Dimension.type attribute or common type of the dimension values, otherwise None. Args: dimension: Dimension to look up by name or by index Returns: Declared type of values along the dimension
def get_dimension_type(self, dim): dim_obj = self.get_dimension(dim) if dim_obj and dim_obj.type is not None: return dim_obj.type dim_vals = [type(v) for v in self.dimension_values(dim)] if len(set(dim_vals)) == 1: return dim_vals[0] else: return None
167,247
Return the lower and upper bounds of values along dimension. Args: dimension: The dimension to compute the range on. data_range (bool): Compute range from data values dimension_range (bool): Include Dimension ranges Whether to include Dimension range and soft_range in range calculation Returns: Tuple containing the lower and upper bound
def range(self, dimension, data_range=True, dimension_range=True): dimension = self.get_dimension(dimension) if dimension is None or (not data_range and not dimension_range): return (None, None) elif all(util.isfinite(v) for v in dimension.range) and dimension_range: return dimension.range elif data_range: if dimension in self.kdims+self.vdims: dim_vals = self.dimension_values(dimension.name) lower, upper = util.find_range(dim_vals) else: dname = dimension.name match_fn = lambda x: dname in x.kdims + x.vdims range_fn = lambda x: x.range(dname) ranges = self.traverse(range_fn, [match_fn]) lower, upper = util.max_range(ranges) else: lower, upper = (np.NaN, np.NaN) if not dimension_range: return lower, upper return util.dimension_range(lower, upper, dimension.range, dimension.soft_range)
167,250
Set default options for a session. Set default options for a session. whether in a Python script or a Jupyter notebook. Args: *options: Option objects used to specify the defaults. backend: The plotting extension the options apply to
def defaults(cls, *options, **kwargs): if kwargs and len(kwargs) != 1 and list(kwargs.keys())[0] != 'backend': raise Exception('opts.defaults only accepts "backend" keyword argument') cls._linemagic(cls._expand_options(merge_options_to_dict(options)), backend=kwargs.get('backend'))
167,468
Unity-based normalization to scale data into 0-1 range. (values - min) / (max - min) Args: values: Array of values to be normalized min (float, optional): Lower bound of normalization range max (float, optional): Upper bound of normalization range Returns: Array of normalized values
def norm(values, min=None, max=None): min = np.min(values) if min is None else min max = np.max(values) if max is None else max return (values - min) / (max-min)
167,493
Bins data into declared bins Bins data into declared bins. By default each bin is labelled with bin center values but an explicit list of bin labels may be defined. Args: values: Array of values to be binned bins: List or array containing the bin boundaries labels: List of labels to assign to each bin If the bins are length N the labels should be length N-1 Returns: Array of binned values
def bin(values, bins, labels=None): bins = np.asarray(bins) if labels is None: labels = (bins[:-1] + np.diff(bins)/2.) else: labels = np.asarray(labels) dtype = 'float' if labels.dtype.kind == 'f' else 'O' binned = np.full_like(values, (np.nan if dtype == 'f' else None), dtype=dtype) for lower, upper, label in zip(bins[:-1], bins[1:], labels): condition = (values > lower) & (values <= upper) binned[np.where(condition)[0]] = label return binned
167,494
Maps discrete values to supplied categories. Replaces discrete values in input array with a fixed set of categories defined either as a list or dictionary. Args: values: Array of values to be categorized categories: List or dict of categories to map inputs to default: Default value to assign if value not in categories Returns: Array of categorized values
def categorize(values, categories, default=None): uniq_cats = list(unique_iterator(values)) cats = [] for c in values: if isinstance(categories, list): cat_ind = uniq_cats.index(c) if cat_ind < len(categories): cat = categories[cat_ind] else: cat = default else: cat = categories.get(c, default) cats.append(cat) return np.asarray(cats)
167,495
Bins continuous values. Bins continuous using the provided bins and assigns labels either computed from each bins center point or from the supplied labels. Args: bins: List or array containing the bin boundaries labels: List of labels to assign to each bin If the bins are length N the labels should be length N-1
def bin(self, bins, labels=None): return dim(self, bin, bins, labels=labels)
167,509
Replaces discrete values with supplied categories Replaces discrete values in input array into a fixed set of categories defined either as a list or dictionary. Args: categories: List or dict of categories to map inputs to default: Default value to assign if value not in categories
def categorize(self, categories, default=None): return dim(self, categorize, categories=categories, default=default)
167,510
Unity-based normalization to scale data into 0-1 range. (values - min) / (max - min) Args: limits: tuple of (min, max) defining the normalization range
def norm(self, limits=None): kwargs = {} if limits is not None: kwargs = {'min': limits[0], 'max': limits[1]} return dim(self, norm, **kwargs)
167,511
Get a node of the AttrTree using its path string. Args: identifier: Path string of the node to return default: Value to return if no node is found Returns: The indexed node of the AttrTree
def get(self, identifier, default=None): split_label = (tuple(identifier.split('.')) if isinstance(identifier, str) else tuple(identifier)) if len(split_label) == 1: identifier = split_label[0] return self.__dict__.get(identifier, default) path_item = self for identifier in split_label: if path_item == default or path_item is None: return default path_item = path_item.get(identifier, default) return path_item
167,648
Pop a node of the AttrTree using its path string. Args: identifier: Path string of the node to return default: Value to return if no node is found Returns: The node that was removed from the AttrTree
def pop(self, identifier, default=None): if identifier in self.children: item = self[identifier] self.__delitem__(identifier) return item else: return default
167,649
Return the values along the requested dimension. Args: dimension: The dimension to return values for expanded (bool, optional): Whether to expand values flat (bool, optional): Whether to flatten array Returns: NumPy array of values along the requested dimension
def dimension_values(self, dimension, expanded=True, flat=True): index = self.get_dimension_index(dimension) if index == 0: return np.array([self.data if np.isscalar(self.data) else self.data[index]]) elif index == 1: return [] if np.isscalar(self.data) else np.array([self.data[1]]) else: return super(Annotation, self).dimension_values(dimension)
167,691
Clones the object, overriding data and parameters. Args: data: New data replacing the existing data shared_data (bool, optional): Whether to use existing data new_type (optional): Type to cast object to *args: Additional arguments to pass to constructor **overrides: New keyword arguments to pass to constructor Returns: Cloned Spline
def clone(self, data=None, shared_data=True, new_type=None, *args, **overrides): return Element2D.clone(self, data, shared_data, new_type, *args, **overrides)
167,696
Return the values along the requested dimension. Args: dimension: The dimension to return values for expanded (bool, optional): Whether to expand values flat (bool, optional): Whether to flatten array Returns: NumPy array of values along the requested dimension
def dimension_values(self, dimension, expanded=True, flat=True): index = self.get_dimension_index(dimension) if index in [0, 1]: return np.array([point[index] for point in self.data[0]]) else: return super(Spline, self).dimension_values(dimension)
167,697
Return the values along the requested dimension. Args: dimension: The dimension to return values for expanded (bool, optional): Whether to expand values flat (bool, optional): Whether to flatten array Returns: NumPy array of values along the requested dimension
def dimension_values(self, dimension, expanded=True, flat=True): index = self.get_dimension_index(dimension) if index == 0: return np.array([self.x]) elif index == 1: return np.array([self.y]) else: return super(Arrow, self).dimension_values(dimension)
167,700
Return the lower and upper bounds of values along dimension. Args: dimension: The dimension to compute the range on. data_range (bool): Compute range from data values dimension_range (bool): Include Dimension ranges Whether to include Dimension range and soft_range in range calculation Returns: Tuple containing the lower and upper bound
def range(self, dim, data_range=True, dimension_range=True): iskdim = self.get_dimension(dim) not in self.vdims return super(StatisticsElement, self).range(dim, iskdim, dimension_range)
167,753
Get the type of the requested dimension. Type is determined by Dimension.type attribute or common type of the dimension values, otherwise None. Args: dimension: Dimension to look up by name or by index Returns: Declared type of values along the dimension
def get_dimension_type(self, dim): dim = self.get_dimension(dim) if dim is None: return None elif dim.type is not None: return dim.type elif dim in self.vdims: return np.float64 return self.interface.dimension_type(self, dim)
167,755
Convert dimension values to DataFrame. Returns a pandas dataframe of columns along each dimension, either completely flat or indexed by key dimensions. Args: dimensions: Dimensions to return as columns multi_index: Convert key dimensions to (multi-)index Returns: DataFrame of columns corresponding to each dimension
def dframe(self, dimensions=None, multi_index=False): if dimensions: dimensions = [self.get_dimension(d, strict=True) for d in dimensions] else: dimensions = self.kdims vdims = [d for d in dimensions if d in self.vdims] if vdims: raise ValueError('%s element does not hold data for value ' 'dimensions. Could not return data for %s ' 'dimension(s).' % (type(self).__name__, ', '.join([d.name for d in vdims]))) return super(StatisticsElement, self).dframe(dimensions, False)
167,756
Convert dimension values to a dictionary. Returns a dictionary of column arrays along each dimension of the element. Args: dimensions: Dimensions to return as columns Returns: Dictionary of arrays for each dimension
def columns(self, dimensions=None): if dimensions is None: dimensions = self.kdims else: dimensions = [self.get_dimension(d, strict=True) for d in dimensions] vdims = [d for d in dimensions if d in self.vdims] if vdims: raise ValueError('%s element does not hold data for value ' 'dimensions. Could not return data for %s ' 'dimension(s).' % (type(self).__name__, ', '.join([d.name for d in vdims]))) return OrderedDict([(d.name, self.dimension_values(d)) for d in dimensions])
167,757
Summary Args: weld_type (TYPE): Description dim (TYPE): Description Returns: TYPE: Description
def to_weld_type(weld_type, dim): for i in xrange(dim): weld_type = WeldVec(weld_type) return weld_type
168,028
Summary Args: expr (TYPE): Description weld_type (TYPE): Description dim (TYPE): Description
def __init__(self, expr, weld_type, dim): self.expr = expr self.weld_type = weld_type self.dim = dim
168,029
Summary Args: verbose (bool, optional): Description decode (bool, optional): Description Returns: TYPE: Description
def evaluate(self, verbose=True, decode=True, passes=None, num_threads=1, apply_experimental_transforms=False): if isinstance(self.expr, WeldObject): return self.expr.evaluate( to_weld_type( self.weld_type, self.dim), verbose, decode, passes=passes, num_threads=num_threads, apply_experimental_transforms=apply_experimental_transforms) return self.expr
168,030
Summary Args: key (TYPE): Description Returns: TYPE: Description Raises: Exception: Description
def __getitem__(self, key): if isinstance(key, str): # Single-key get # First check if key corresponds to an un-materialized column if key in self.unmaterialized_cols: return self.unmaterialized_cols[key] raw_column = self.df[key].values dtype = str(raw_column.dtype) # If column type is "object", then cast as "vec[char]" in Weld if dtype == 'object': raw_column = self.raw_columns[key] weld_type = WeldVec(WeldChar()) else: weld_type = grizzly_impl.numpy_to_weld_type_mapping[dtype] if self.predicates is None: return SeriesWeld(raw_column, weld_type, self, key) return SeriesWeld( grizzly_impl.filter( raw_column, self.predicates.expr, weld_type ), weld_type, self, key ) elif isinstance(key, list): # For multi-key get, return type is a dataframe return DataFrameWeld(self.df[key], self.predicates) elif isinstance(key, SeriesWeld): # Can also apply predicate to a dataframe if self.predicates is not None: return DataFrameWeld(self.df, key.per_element_and(self.predicates)) return DataFrameWeld(self.df, key) raise Exception("Invalid type in __getitem__")
168,053
Summary Args: key (TYPE): Description value (TYPE): Description Returns: TYPE: Description
def __setitem__(self, key, value): if isinstance(value, np.ndarray): dtype = str(value.dtype) weld_type = grizzly_impl.numpy_to_weld_type_mapping[dtype] self.unmaterialized_cols[key] = SeriesWeld( value, weld_type, self, key ) elif isinstance(value, SeriesWeld): self.unmaterialized_cols[key] = value elif isinstance(value, LazyOpResult): self.unmaterialized_cols[key] = SeriesWeld( value.expr, value.weld_type, self, key )
168,054
Summary Args: grouping_column_name (TYPE): Description Returns: TYPE: Description
def filter(self, predicates): tys = [] for col_name, raw_column in self.raw_columns.items(): dtype = str(raw_column.dtype) if dtype == 'object' or dtype == '|S64': weld_type = WeldVec(WeldChar()) else: weld_type = grizzly_impl.numpy_to_weld_type_mapping[dtype] tys.append(weld_type) if len(tys) == 1: weld_type = tys[0] else: weld_type = WeldStruct(tys) if isinstance(predicates, SeriesWeld): predicates = predicates.expr return DataFrameWeldExpr( grizzly_impl.filter( grizzly_impl.zip_columns( self.raw_columns.values(), ), predicates ), self.raw_columns.keys(), weld_type )
168,057
Summary TODO: Implement an actual Index Object like how Pandas does Args: expr (TYPE): Description weld_type (TYPE): Description df (None, optional): Description column_name (None, optional): Description
def __init__(self, expr, weld_type, df=None, column_name=None, index_type=None, index_name=None): self.expr = expr self.weld_type = weld_type self.dim = 1 self.df = df self.column_name = column_name self.index_type = index_type self.index_name = index_name
168,069
Summary Args: predicates (TYPE): Description new_value (TYPE): Description Returns: TYPE: Description
def __getitem__(self, key): if isinstance(key, slice): start = key.start # TODO : We currently do nothing with step step = key.step stop = key.stop if self.index_type is not None: index_expr = grizzly_impl.get_field(self.expr, 0) column_expr = grizzly_impl.get_field(self.expr, 1) zip_expr = grizzly_impl.zip_columns([index_expr, column_expr]) sliced_expr = grizzly_impl.slice_vec(zip_expr, start, stop) unzip_expr = grizzly_impl.unzip_columns( sliced_expr, [self.index_type, self.weld_type] ) return SeriesWeld( unzip_expr, self.weld_type, self.df, self.column_name, self.index_type, self.index_name ) else: return SeriesWeld( grizzly_impl.slice_vec( self.expr, start, stop ) ) else: # By default we return as if the key were predicates to filter by return self.filter(key)
168,070
Summary Args: predicates (TYPE): Description new_value (TYPE): Description Returns: TYPE: Description
def __setitem__(self, predicates, new_value): if self.df is not None and self.column_name is not None: self.df[self.column_name] = self.mask(predicates, new_value)
168,071
Summary Args: key (TYPE): Description Returns: TYPE: Description Raises: Exception: Description
def __getattr__(self, key): if key == 'str' and self.weld_type == WeldVec(WeldChar()): return StringSeriesWeld( self.expr, self.weld_type, self.df, self.column_name ) raise AttributeError("Attr %s does not exist" % key)
168,072
Summary Args: predicates (TYPE): Description new_value (TYPE): Description Returns: TYPE: Description
def mask(self, predicates, new_value): if isinstance(predicates, SeriesWeld): predicates = predicates.expr return SeriesWeld( grizzly_impl.mask( self.expr, predicates, new_value, self.weld_type ), self.weld_type, self.df, self.column_name )
168,083
Summary Args: other (TYPE): Description Returns: TYPE: Description
def add(self, other): if isinstance(other, SeriesWeld): other = other.expr return SeriesWeld( grizzly_impl.element_wise_op( self.expr, other, "+", self.weld_type ), self.weld_type, self.df, self.column_name )
168,085
Summary Args: other (TYPE): Description Returns: TYPE: Description
def __eq__(self, other): return SeriesWeld( grizzly_impl.compare( self.expr, other, "==", self.weld_type ), WeldBit(), self.df, self.column_name )
168,087
Summary Args: other (TYPE): Description Returns: TYPE: Description
def __ge__(self, other): if self.index_type is not None: expr = grizzly_impl.get_field(self.expr, 1) else: expr = self.expr return SeriesWeld( grizzly_impl.compare( expr, other, ">=", self.weld_type ), WeldBit(), self.df, self.column_name )
168,088
Summary Args: expr (TYPE): Description weld_type (TYPE): Description df (None, optional): Description column_name (None, optional): Description
def __init__(self, expr, weld_type, df=None, column_name=None): self.expr = expr self.weld_type = weld_type self.dim = 1 self.df = df self.column_name = column_name
168,089
Summary Args: start (TYPE): Description size (TYPE): Description Returns: TYPE: Description
def slice(self, start, size): return SeriesWeld( grizzly_impl.slice( self.expr, start, size, self.weld_type ), self.weld_type, self.df, self.column_name )
168,090
Summary Args: df (TYPE): Description grouping_column_name (TYPE): Description
def __init__(self, df, grouping_column_names): self.df = df self.grouping_columns = [] self.grouping_column_types = [] if isinstance(grouping_column_names, str): grouping_column_names = [grouping_column_names] for column_name in grouping_column_names: column = df[column_name] if isinstance(column, LazyOpResult): self.grouping_column_types.append(column.weld_type) self.grouping_columns.append(column.expr) elif isinstance(column, np.ndarray): column_type = numpyImpl.numpy_to_weld_type_mapping[ str(column.dtype)] self.grouping_column_types.append(column_type) self.grouping_columns.append(column) self.grouping_column_names = grouping_column_names self.column_names = [] for x in df._get_column_names(): if x not in self.grouping_column_names: self.column_names.append(x) self.columns = [] self.column_types = [] for column_name in self.column_names: column = df[column_name] column_type = None if isinstance(column, LazyOpResult): column_type = column.weld_type column = column.expr elif isinstance(column, np.ndarray): column_type = numpyImpl.numpy_to_weld_type_mapping[ str(column.dtype)] self.columns.append(column) self.column_types.append(column_type)
168,097
Summary Args: expr (TYPE): Description grouping_column_name (TYPE): Description column_names (TYPE): Description grouping_column_type (TYPE): Description column_types (TYPE): Description
def __init__( self, expr, grouping_column_names, column_names, grouping_column_types, column_types): self.expr = expr self.grouping_column_name = grouping_column_names self.column_names = column_names self.grouping_column_types = grouping_column_types self.column_types = column_types if isinstance(self.column_types, list): if len(self.column_types) == 1: column_types = self.column_types[0] else: column_types = WeldStruct(self.column_types) if len(self.grouping_column_types) == 1: grouping_column_types = self.grouping_column_types[0] else: grouping_column_types = WeldStruct(self.grouping_column_types) self.weld_type = WeldStruct([grouping_column_types, column_types])
168,102
Summary Args: column_name (TYPE): Description column_type (TYPE): Description index (TYPE): Description Returns: TYPE: Description
def get_column(self, column_name, column_type, index, verbose=True): return LazyOpResult( grizzly_impl.get_column( self.expr, self.weld_type, index ), column_type, 1 )
168,104
Computes the aggregate of elements in the array. Args: array (WeldObject / Numpy.ndarray): Input array to aggregate op (str): Op string used to aggregate the array (+ / *) initial_value (int): Initial value for aggregation ty (WeldType): Type of each element in the input array Returns: A WeldObject representing this computation
def aggr(array, op, initial_value, ty): weld_obj = WeldObject(encoder_, decoder_) array_var = weld_obj.update(array) if isinstance(array, WeldObject): array_var = array.obj_id weld_obj.dependencies[array_var] = array weld_template = weld_obj.weld_code = weld_template % { "array": array_var, "ty": ty, "op": op} return weld_obj
168,123
Computes the dot product between a matrix and a vector. Args: matrix (WeldObject / Numpy.ndarray): 2-d input matrix vector (WeldObject / Numpy.ndarray): 1-d input vector ty (WeldType): Type of each element in the input matrix and vector Returns: A WeldObject representing this computation
def dot(matrix, vector, matrix_ty, vector_ty): weld_obj = WeldObject(encoder_, decoder_) matrix_var = weld_obj.update(matrix) if isinstance(matrix, WeldObject): matrix_var = matrix.obj_id weld_obj.dependencies[matrix_var] = matrix vector_var = weld_obj.update(vector) loopsize_annotation = "" if isinstance(vector, WeldObject): vector_var = vector.obj_id weld_obj.dependencies[vector_var] = vector if isinstance(vector, np.ndarray): loopsize_annotation = "@(loopsize: %dL)" % len(vector) weld_template = weld_obj.weld_code = weld_template % {"matrix": matrix_var, "vector": vector_var, "matrix_ty": matrix_ty, "vector_ty": vector_ty, "loopsize_annotation": loopsize_annotation} return weld_obj
168,124
Computes the per-element exponenet of the passed-in array. Args: array (WeldObject / Numpy.ndarray): Input array ty (WeldType): Type of each element in the input array Returns: A WeldObject representing this computation
def exp(array, ty): weld_obj = WeldObject(encoder_, decoder_) array_var = weld_obj.update(array) if isinstance(array, WeldObject): array_var = array.obj_id weld_obj.dependencies[array_var] = array weld_template = weld_obj.weld_code = weld_template % {"array": array_var, "ty": ty} return weld_obj
168,125
Summary Args: field_types (TYPE): Description
def __init__(self, field_types): assert False not in [isinstance(e, WeldType) for e in field_types] self.field_types = field_types
168,138
Returns a new array, with each element in the original array satisfying the passed-in predicate set to `new_value` Args: array (WeldObject / Numpy.ndarray): Input array predicates (WeldObject / Numpy.ndarray<bool>): Predicate set new_value (WeldObject / Numpy.ndarray / str): mask value ty (WeldType): Type of each element in the input array Returns: A WeldObject representing this computation
def mask(array, predicates, new_value, ty): weld_obj = WeldObject(encoder_, decoder_) array_var = weld_obj.update(array) if isinstance(array, WeldObject): array_var = array.obj_id weld_obj.dependencies[array_var] = array predicates_var = weld_obj.update(predicates) if isinstance(predicates, WeldObject): predicates_var = predicates.obj_id weld_obj.dependencies[predicates_var] = predicates if str(ty).startswith("vec"): new_value_var = weld_obj.update(new_value) if isinstance(new_value, WeldObject): new_value_var = new_value.obj_id weld_obj.dependencies[new_value_var] = new_value else: new_value_var = "%s(%s)" % (ty, str(new_value)) weld_template = weld_obj.weld_code = weld_template % { "array": array_var, "predicates": predicates_var, "new_value": new_value_var, "ty": ty} return weld_obj
168,143
Returns a new array, with each element in the original array satisfying the passed-in predicate set to `new_value` Args: array (WeldObject / Numpy.ndarray): Input array predicates (WeldObject / Numpy.ndarray<bool>): Predicate set ty (WeldType): Type of each element in the input array Returns: A WeldObject representing this computation
def filter(array, predicates, ty=None): weld_obj = WeldObject(encoder_, decoder_) array_var = weld_obj.update(array) if isinstance(array, WeldObject): array_var = array.obj_id weld_obj.dependencies[array_var] = array predicates_var = weld_obj.update(predicates) if isinstance(predicates, WeldObject): predicates_var = predicates.obj_id weld_obj.dependencies[predicates_var] = predicates weld_template = weld_obj.weld_code = weld_template % { "array": array_var, "predicates": predicates_var} return weld_obj
168,144
Returns a new array, with each element in the original array satisfying the passed-in predicate set to `new_value` Args: array (WeldObject / Numpy.ndarray): Input array predicates (WeldObject / Numpy.ndarray<bool>): Predicate set ty (WeldType): Type of each element in the input array Returns: A WeldObject representing this computation
def pivot_filter(pivot_array, predicates, ty=None): weld_obj = WeldObject(encoder_, decoder_) pivot_array_var = weld_obj.update(pivot_array) if isinstance(pivot_array, WeldObject): pivot_array_var = pivot_array.obj_id weld_obj.dependencies[pivot_array_var] = pivot_array predicates_var = weld_obj.update(predicates) if isinstance(predicates, WeldObject): predicates_var = predicates.obj_id weld_obj.dependencies[predicates_var] = predicates weld_template = weld_obj.weld_code = weld_template % { "array": pivot_array_var, "predicates": predicates_var} return weld_obj
168,145
Operation of series and other, element-wise (binary operator add) Args: array (WeldObject / Numpy.ndarray): Input array other (WeldObject / Numpy.ndarray): Second Input array op (str): Op string used to compute element-wise operation (+ / *) ty (WeldType): Type of each element in the input array Returns: A WeldObject representing this computation
def element_wise_op(array, other, op, ty): weld_obj = WeldObject(encoder_, decoder_) array_var = weld_obj.update(array) if isinstance(array, WeldObject): array_var = array.obj_id weld_obj.dependencies[array_var] = array other_var = weld_obj.update(other) if isinstance(other, WeldObject): other_var = other.obj_id weld_obj.dependencies[other_var] = other weld_template = weld_obj.weld_code = weld_template % {"array": array_var, "other": other_var, "ty": ty, "op": op} return weld_obj
168,146