code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def __init__( self, ratings_file: str, padding_length: int, ignore_last_n: int, # used for creating train/valid/test sets shift_id_by: int = 0, chronological: bool = False, sample_ratio: float = 1.0, ) -> None: """ Args: csv_file (string): Path to the csv file. """ super().__init__() self.ratings_frame: pd.DataFrame = pd.read_csv( ratings_file, delimiter=",", # iterator=True, ) self._padding_length: int = padding_length self._ignore_last_n: int = ignore_last_n self._cache: Dict[int, Dict[str, torch.Tensor]] = dict() self._shift_id_by: int = shift_id_by self._chronological: bool = chronological self._sample_ratio: float = sample_ratio
Args: csv_file (string): Path to the csv file.
__init__
python
facebookresearch/generative-recommenders
generative_recommenders/research/data/dataset.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/data/dataset.py
Apache-2.0
def create_mol_interaction_module( query_embedding_dim: int, item_embedding_dim: int, dot_product_dimension: int, query_dot_product_groups: int, item_dot_product_groups: int, temperature: float, query_dropout_rate: float, query_hidden_dim: int, item_dropout_rate: float, item_hidden_dim: int, gating_query_hidden_dim: int, gating_qi_hidden_dim: int, gating_item_hidden_dim: int, softmax_dropout_rate: float, bf16_training: bool, gating_query_fn: bool = True, gating_item_fn: bool = True, dot_product_l2_norm: bool = True, query_nonlinearity: str = "geglu", item_nonlinearity: str = "geglu", uid_dropout_rate: float = 0.5, uid_embedding_hash_sizes: Optional[List[int]] = None, uid_embedding_level_dropout: bool = False, gating_combination_type: str = "glu_silu", gating_item_dropout_rate: float = 0.0, gating_qi_dropout_rate: float = 0.0, eps: float = 1e-6, ) -> Tuple[MoLSimilarity, str]: """ Gin wrapper for creating MoL learned similarity. """ mol_module = MoLSimilarity( query_embedding_dim=query_embedding_dim, item_embedding_dim=item_embedding_dim, dot_product_dimension=dot_product_dimension, query_dot_product_groups=query_dot_product_groups, item_dot_product_groups=item_dot_product_groups, temperature=temperature, dot_product_l2_norm=dot_product_l2_norm, query_embeddings_fn=RecoMoLQueryEmbeddingsFn( query_embedding_dim=query_embedding_dim, query_dot_product_groups=query_dot_product_groups, dot_product_dimension=dot_product_dimension, dot_product_l2_norm=dot_product_l2_norm, proj_fn=lambda input_dim, output_dim: ( torch.nn.Sequential( torch.nn.Dropout(p=query_dropout_rate), SwiGLU( in_features=input_dim, out_features=query_hidden_dim, ), torch.nn.Linear( in_features=query_hidden_dim, out_features=output_dim, ), ).apply(init_mlp_xavier_weights_zero_bias) ), eps=eps, ), item_embeddings_fn=RecoMoLItemEmbeddingsFn( item_embedding_dim=item_embedding_dim, item_dot_product_groups=item_dot_product_groups, dot_product_dimension=dot_product_dimension, dot_product_l2_norm=dot_product_l2_norm, proj_fn=lambda input_dim, output_dim: ( torch.nn.Sequential( torch.nn.Dropout(p=item_dropout_rate), SwiGLU(in_features=input_dim, out_features=item_hidden_dim), torch.nn.Linear( in_features=item_hidden_dim, out_features=output_dim, ), ).apply(init_mlp_xavier_weights_zero_bias) ), eps=eps, ), gating_query_only_partial_fn=lambda input_dim, output_dim: ( # pyre-ignore [6] torch.nn.Sequential( torch.nn.Linear( in_features=input_dim, out_features=gating_query_hidden_dim, ), torch.nn.SiLU(), torch.nn.Linear( in_features=gating_query_hidden_dim, out_features=output_dim, bias=False, ), ).apply(init_mlp_xavier_weights_zero_bias) if gating_query_fn else None ), gating_item_only_partial_fn=lambda input_dim, output_dim: ( # pyre-ignore [6] torch.nn.Sequential( torch.nn.Dropout(p=gating_item_dropout_rate), torch.nn.Linear( in_features=input_dim, out_features=gating_item_hidden_dim, ), torch.nn.SiLU(), torch.nn.Linear( in_features=gating_item_hidden_dim, out_features=output_dim, bias=False, ), ).apply(init_mlp_xavier_weights_zero_bias) if gating_item_fn else None ), gating_qi_partial_fn=lambda input_dim, output_dim: ( # pyre-ignore [6] torch.nn.Sequential( torch.nn.Dropout(p=gating_qi_dropout_rate), torch.nn.Linear( in_features=input_dim, out_features=gating_qi_hidden_dim, ), torch.nn.SiLU(), torch.nn.Linear( in_features=gating_qi_hidden_dim, out_features=output_dim, ), ).apply(init_mlp_xavier_weights_zero_bias) if gating_qi_hidden_dim > 0 else torch.nn.Sequential( torch.nn.Dropout(p=gating_qi_dropout_rate), torch.nn.Linear( in_features=input_dim, out_features=output_dim, ), ).apply(init_mlp_xavier_weights_zero_bias) ), gating_combination_type=gating_combination_type, gating_normalization_fn=lambda _: SoftmaxDropoutCombiner( dropout_rate=softmax_dropout_rate, eps=1e-6 ), eps=eps, autocast_bf16=bf16_training, ) interaction_module_debug_str = ( f"MoL-{query_dot_product_groups}x{item_dot_product_groups}x{dot_product_dimension}" + f"-t{temperature}-d{softmax_dropout_rate}" + f"{'-l2' if dot_product_l2_norm else ''}" + f"-q{query_hidden_dim}d{query_dropout_rate}{query_nonlinearity}" + f"-i{item_hidden_dim}d{item_dropout_rate}{item_nonlinearity}" + (f"-gq{gating_query_hidden_dim}" if gating_query_fn else "") + ( f"-gi{gating_item_hidden_dim}d{gating_item_dropout_rate}" if gating_item_fn else "" ) + f"-gqi{gating_qi_hidden_dim}d{gating_qi_dropout_rate}-x-{gating_combination_type}" ) return mol_module, interaction_module_debug_str
Gin wrapper for creating MoL learned similarity.
create_mol_interaction_module
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/similarity_utils.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/similarity_utils.py
Apache-2.0
def batch_gather_embeddings( rowwise_indices: torch.Tensor, embeddings: torch.Tensor, ) -> torch.Tensor: """ Args: rowwise_indices: (B, N) x int, where each entry is in [0, X). embeddings: (B, X, D,) x float. Returns: (B, N, D,) x float, embeddings corresponding to rowwise_indices. """ _, N = rowwise_indices.size() B, X, D = embeddings.size() flattened_indices = ( rowwise_indices + torch.arange( start=0, end=B, step=1, dtype=rowwise_indices.dtype, device=rowwise_indices.device, ) .unsqueeze(1) .expand(-1, N) * X ) return embeddings.view(-1, D)[flattened_indices, :].reshape( rowwise_indices.size() + (D,) )
Args: rowwise_indices: (B, N) x int, where each entry is in [0, X). embeddings: (B, X, D,) x float. Returns: (B, N, D,) x float, embeddings corresponding to rowwise_indices.
batch_gather_embeddings
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/sequential/utils.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/sequential/utils.py
Apache-2.0
def batch_scatter_embeddings( dst_embeddings: torch.Tensor, rowwise_indices: torch.Tensor, src_embeddings: torch.Tensor, ) -> None: """ Args: dst_embeddings: (B, N, D,) x float. rowwise_indices: (B,) x int, where each entry is in [0, N - 1). source_embeddings: (B, D,) x float. """ B, N, D = dst_embeddings.size() flattened_indices = rowwise_indices + torch.arange( start=0, end=B * N, step=N, dtype=rowwise_indices.dtype, device=rowwise_indices.device, ) dst_embeddings.view(B * N, D)[flattened_indices, :] = src_embeddings
Args: dst_embeddings: (B, N, D,) x float. rowwise_indices: (B,) x int, where each entry is in [0, N - 1). source_embeddings: (B, D,) x float.
batch_scatter_embeddings
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/sequential/utils.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/sequential/utils.py
Apache-2.0
def get_current_embeddings( lengths: torch.Tensor, encoded_embeddings: torch.Tensor, ) -> torch.Tensor: """ Args: lengths: (B,) x int seq_embeddings: (B, N, D,) x float Returns: (B, D,) x float, where [i, :] == encoded_embeddings[i, lengths[i] - 1, :] """ B, N, D = encoded_embeddings.size() flattened_offsets = (lengths - 1) + torch.arange( start=0, end=B, step=1, dtype=lengths.dtype, device=lengths.device ) * N return encoded_embeddings.reshape(-1, D)[flattened_offsets, :].reshape(B, D)
Args: lengths: (B,) x int seq_embeddings: (B, N, D,) x float Returns: (B, D,) x float, where [i, :] == encoded_embeddings[i, lengths[i] - 1, :]
get_current_embeddings
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/sequential/utils.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/sequential/utils.py
Apache-2.0
def generate_user_embeddings( self, past_lengths: torch.Tensor, past_ids: torch.Tensor, past_embeddings: torch.Tensor, past_payloads: Dict[str, torch.Tensor], ) -> torch.Tensor: """ Args: past_ids: (B, N,) x int Returns: (B, N, D,) x float """ past_lengths, user_embeddings, valid_mask = self._input_features_preproc( past_lengths=past_lengths, past_ids=past_ids, past_embeddings=past_embeddings, past_payloads=past_payloads, ) for i in range(len(self.attention_layers)): if self._activation_checkpoint: user_embeddings = torch.utils.checkpoint.checkpoint( self._run_one_layer, i, user_embeddings, valid_mask, use_reentrant=False, ) else: user_embeddings = self._run_one_layer(i, user_embeddings, valid_mask) return self._output_postproc(user_embeddings)
Args: past_ids: (B, N,) x int Returns: (B, N, D,) x float
generate_user_embeddings
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/sequential/sasrec.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/sequential/sasrec.py
Apache-2.0
def forward( self, past_lengths: torch.Tensor, past_ids: torch.Tensor, past_embeddings: torch.Tensor, past_payloads: Dict[str, torch.Tensor], batch_id: Optional[int] = None, ) -> torch.Tensor: """ Args: past_ids: [B, N] x int64 where the latest engaged ids come first. In particular, [:, 0] should correspond to the last engaged values. past_ratings: [B, N] x int64. past_timestamps: [B, N] x int64. Returns: encoded_embeddings of [B, N, D]. """ encoded_embeddings = self.generate_user_embeddings( past_lengths, past_ids, past_embeddings, past_payloads, ) return encoded_embeddings
Args: past_ids: [B, N] x int64 where the latest engaged ids come first. In particular, [:, 0] should correspond to the last engaged values. past_ratings: [B, N] x int64. past_timestamps: [B, N] x int64. Returns: encoded_embeddings of [B, N, D].
forward
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/sequential/sasrec.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/sequential/sasrec.py
Apache-2.0
def forward( self, positive_ids: torch.Tensor, num_to_sample: int, ) -> Tuple[torch.Tensor, torch.Tensor]: """ Returns: A tuple of (sampled_ids, sampled_negative_embeddings). """ pass
Returns: A tuple of (sampled_ids, sampled_negative_embeddings).
forward
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/sequential/autoregressive_losses.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/sequential/autoregressive_losses.py
Apache-2.0
def forward( self, positive_ids: torch.Tensor, num_to_sample: int, ) -> Tuple[torch.Tensor, torch.Tensor]: """ Returns: A tuple of (sampled_ids, sampled_negative_embeddings). """ # assert torch.max(torch.abs(self._item_emb(positive_ids) - positive_embeddings)) < 1e-4 output_shape = positive_ids.size() + (num_to_sample,) sampled_offsets = torch.randint( low=0, high=self._num_items, size=output_shape, dtype=positive_ids.dtype, device=positive_ids.device, ) sampled_ids = self._all_item_ids[sampled_offsets.view(-1)].reshape(output_shape) return sampled_ids, self.normalize_embeddings(self._item_emb(sampled_ids))
Returns: A tuple of (sampled_ids, sampled_negative_embeddings).
forward
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/sequential/autoregressive_losses.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/sequential/autoregressive_losses.py
Apache-2.0
def process_batch( self, ids: torch.Tensor, presences: torch.Tensor, embeddings: torch.Tensor, ) -> None: """ Args: ids: (N') or (B, N) x int64 presences: (N') or (B, N) x bool embeddings: (N', D) or (B, N, D) x float """ assert ids.size() == presences.size() assert ids.size() == embeddings.size()[:-1] if self._dedup_embeddings: valid_ids = ids[presences] unique_ids, unique_ids_inverse_indices = torch.unique( input=valid_ids, sorted=False, return_inverse=True ) device = unique_ids.device unique_embedding_offsets = torch.empty( (unique_ids.numel(),), dtype=torch.int64, device=device, ) unique_embedding_offsets[unique_ids_inverse_indices] = torch.arange( valid_ids.numel(), dtype=torch.int64, device=device ) unique_embeddings = embeddings[presences][unique_embedding_offsets, :] self._cached_embeddings = self._maybe_l2_norm( # pyre-ignore [16] unique_embeddings ) self._cached_ids = unique_ids # pyre-ignore [16] else: self._cached_embeddings = self._maybe_l2_norm(embeddings[presences]) self._cached_ids = ids[presences]
Args: ids: (N') or (B, N) x int64 presences: (N') or (B, N) x bool embeddings: (N', D) or (B, N, D) x float
process_batch
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/sequential/autoregressive_losses.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/sequential/autoregressive_losses.py
Apache-2.0
def forward( self, positive_ids: torch.Tensor, num_to_sample: int, ) -> Tuple[torch.Tensor, torch.Tensor]: """ Returns: A tuple of (sampled_ids, sampled_negative_embeddings,). """ X = self._cached_ids.size(0) sampled_offsets = torch.randint( low=0, high=X, size=positive_ids.size() + (num_to_sample,), dtype=positive_ids.dtype, device=positive_ids.device, ) return ( self._cached_ids[sampled_offsets], # pyre-ignore [29] self._cached_embeddings[sampled_offsets], # pyre-ignore [29] )
Returns: A tuple of (sampled_ids, sampled_negative_embeddings,).
forward
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/sequential/autoregressive_losses.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/sequential/autoregressive_losses.py
Apache-2.0
def jagged_forward( self, output_embeddings: torch.Tensor, supervision_ids: torch.Tensor, supervision_embeddings: torch.Tensor, supervision_weights: torch.Tensor, negatives_sampler: NegativesSampler, ) -> torch.Tensor: """ Variant of forward() when the tensors are already in jagged format. Args: output_embeddings: [N', D] x float, embeddings for the current input sequence. supervision_ids: [N'] x int64, (positive) supervision ids. supervision_embeddings: [N', D] x float. supervision_weights: Optional [N'] x float. Optional weights for masking out invalid positions, or reweighting supervision labels. negatives_sampler: sampler used to obtain negative examples paired with positives. Returns: (1), loss for the current engaged sequence. """ pass
Variant of forward() when the tensors are already in jagged format. Args: output_embeddings: [N', D] x float, embeddings for the current input sequence. supervision_ids: [N'] x int64, (positive) supervision ids. supervision_embeddings: [N', D] x float. supervision_weights: Optional [N'] x float. Optional weights for masking out invalid positions, or reweighting supervision labels. negatives_sampler: sampler used to obtain negative examples paired with positives. Returns: (1), loss for the current engaged sequence.
jagged_forward
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/sequential/autoregressive_losses.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/sequential/autoregressive_losses.py
Apache-2.0
def forward( self, lengths: torch.Tensor, output_embeddings: torch.Tensor, supervision_ids: torch.Tensor, supervision_embeddings: torch.Tensor, supervision_weights: torch.Tensor, negatives_sampler: NegativesSampler, ) -> torch.Tensor: """ Args: lengths: [B] x int32 representing number of non-zero elements per row. output_embeddings: [B, N, D] x float, embeddings for the current input sequence. supervision_ids: [B, N] x int64, (positive) supervision ids. supervision_embeddings: [B, N, D] x float. supervision_weights: Optional [B, N] x float. Optional weights for masking out invalid positions, or reweighting supervision labels. negatives_sampler: sampler used to obtain negative examples paired with positives. Returns: (1), loss for the current engaged sequence. """ pass
Args: lengths: [B] x int32 representing number of non-zero elements per row. output_embeddings: [B, N, D] x float, embeddings for the current input sequence. supervision_ids: [B, N] x int64, (positive) supervision ids. supervision_embeddings: [B, N, D] x float. supervision_weights: Optional [B, N] x float. Optional weights for masking out invalid positions, or reweighting supervision labels. negatives_sampler: sampler used to obtain negative examples paired with positives. Returns: (1), loss for the current engaged sequence.
forward
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/sequential/autoregressive_losses.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/sequential/autoregressive_losses.py
Apache-2.0
def forward( self, lengths: torch.Tensor, output_embeddings: torch.Tensor, supervision_ids: torch.Tensor, supervision_embeddings: torch.Tensor, supervision_weights: torch.Tensor, negatives_sampler: NegativesSampler, ) -> torch.Tensor: """ Args: lengths: [B] x int32 representing number of non-zero elements per row. output_embeddings: [B, N, D] x float, embeddings for the current input sequence. supervision_ids: [B, N] x int64, (positive) supervision ids. supervision_embeddings: [B, N, D] x float. supervision_weights: Optional [B, N] x float. Optional weights for masking out invalid positions, or reweighting supervision labels. negatives_sampler: sampler used to obtain negative examples paired with positives. Returns: (1), loss for the current engaged sequence. """ assert output_embeddings.size() == supervision_embeddings.size() assert supervision_ids.size() == supervision_embeddings.size()[:-1] jagged_id_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths) jagged_supervision_ids = ( torch.ops.fbgemm.dense_to_jagged( supervision_ids.unsqueeze(-1).float(), [jagged_id_offsets] )[0] .squeeze(1) .long() ) jagged_supervision_weights = torch.ops.fbgemm.dense_to_jagged( supervision_weights.unsqueeze(-1), [jagged_id_offsets], )[0].squeeze(1) return self.jagged_forward( output_embeddings=torch.ops.fbgemm.dense_to_jagged( output_embeddings, [jagged_id_offsets], )[0], supervision_ids=jagged_supervision_ids, supervision_embeddings=torch.ops.fbgemm.dense_to_jagged( supervision_embeddings, [jagged_id_offsets], )[0], supervision_weights=jagged_supervision_weights, negatives_sampler=negatives_sampler, )
Args: lengths: [B] x int32 representing number of non-zero elements per row. output_embeddings: [B, N, D] x float, embeddings for the current input sequence. supervision_ids: [B, N] x int64, (positive) supervision ids. supervision_embeddings: [B, N, D] x float. supervision_weights: Optional [B, N] x float. Optional weights for masking out invalid positions, or reweighting supervision labels. negatives_sampler: sampler used to obtain negative examples paired with positives. Returns: (1), loss for the current engaged sequence.
forward
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/sequential/autoregressive_losses.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/sequential/autoregressive_losses.py
Apache-2.0
def forward( self, lengths: torch.Tensor, output_embeddings: torch.Tensor, supervision_ids: torch.Tensor, supervision_embeddings: torch.Tensor, supervision_weights: torch.Tensor, supervision_ratings: torch.Tensor, negatives_sampler: NegativesSampler, ) -> torch.Tensor: """ Args: lengths: [B] x int32 representing number of non-zero elements per row. output_embeddings: [B, N, D] x float, embeddings for the current input sequence. supervision_ids: [B, N] x int64, (positive) supervision ids. supervision_embeddings: [B, N, D] x float. supervision_weights: Optional [B, N] x float. Optional weights for masking out invalid positions, or reweighting supervision labels. negatives_sampler: sampler used to obtain negative examples paired with positives. Returns: (1), loss for the current engaged sequence. """ assert output_embeddings.size() == supervision_embeddings.size() assert supervision_ids.size() == supervision_embeddings.size()[:-1] jagged_id_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths) jagged_supervision_ids = ( torch.ops.fbgemm.dense_to_jagged( supervision_ids.unsqueeze(-1).float(), [jagged_id_offsets] )[0] .squeeze(1) .long() ) jagged_supervision_weights = torch.ops.fbgemm.dense_to_jagged( supervision_weights.unsqueeze(-1), [jagged_id_offsets], )[0].squeeze(1) return self.jagged_forward( output_embeddings=torch.ops.fbgemm.dense_to_jagged( output_embeddings, [jagged_id_offsets], )[0], supervision_ids=jagged_supervision_ids, supervision_embeddings=torch.ops.fbgemm.dense_to_jagged( supervision_embeddings, [jagged_id_offsets], )[0], supervision_weights=jagged_supervision_weights, supervision_ratings=torch.ops.fbgemm.dense_to_jagged( supervision_ratings.unsqueeze(-1), [jagged_id_offsets], )[0].squeeze(1), negatives_sampler=negatives_sampler, )
Args: lengths: [B] x int32 representing number of non-zero elements per row. output_embeddings: [B, N, D] x float, embeddings for the current input sequence. supervision_ids: [B, N] x int64, (positive) supervision ids. supervision_embeddings: [B, N, D] x float. supervision_weights: Optional [B, N] x float. Optional weights for masking out invalid positions, or reweighting supervision labels. negatives_sampler: sampler used to obtain negative examples paired with positives. Returns: (1), loss for the current engaged sequence.
forward
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/sequential/autoregressive_losses.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/sequential/autoregressive_losses.py
Apache-2.0
def forward( self, all_timestamps: torch.Tensor, ) -> torch.Tensor: """ Args: all_timestamps: [B, N] x int64 Returns: torch.float tensor broadcastable to [B, N, N] """ pass
Args: all_timestamps: [B, N] x int64 Returns: torch.float tensor broadcastable to [B, N, N]
forward
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/sequential/hstu.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/sequential/hstu.py
Apache-2.0
def forward( self, all_timestamps: torch.Tensor, ) -> torch.Tensor: """ Args: all_timestamps: (B, N). Returns: (B, N, N). """ B = all_timestamps.size(0) N = self._max_seq_len t = F.pad(self._pos_w[: 2 * N - 1], [0, N]).repeat(N) t = t[..., :-N].reshape(1, N, 3 * N - 2) r = (2 * N - 1) // 2 # [B, N + 1] to simplify tensor manipulations. ext_timestamps = torch.cat( [all_timestamps, all_timestamps[:, N - 1 : N]], dim=1 ) # causal masking. Otherwise [:, :-1] - [:, 1:] works bucketed_timestamps = torch.clamp( self._bucketization_fn( ext_timestamps[:, 1:].unsqueeze(2) - ext_timestamps[:, :-1].unsqueeze(1) ), min=0, max=self._num_buckets, ).detach() rel_pos_bias = t[:, :, r:-r] rel_ts_bias = torch.index_select( self._ts_w, dim=0, index=bucketed_timestamps.view(-1) ).view(B, N, N) return rel_pos_bias + rel_ts_bias
Args: all_timestamps: (B, N). Returns: (B, N, N).
forward
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/sequential/hstu.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/sequential/hstu.py
Apache-2.0
def forward( # pyre-ignore [3] self, x: torch.Tensor, x_offsets: torch.Tensor, all_timestamps: Optional[torch.Tensor], invalid_attn_mask: torch.Tensor, delta_x_offsets: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, cache: Optional[HSTUCacheState] = None, return_cache_states: bool = False, ): """ Args: x: (\sum_i N_i, D) x float. x_offsets: (B + 1) x int32. all_timestamps: optional (B, N) x int64. invalid_attn_mask: (B, N, N) x float, each element in {0, 1}. delta_x_offsets: optional 2-tuple ((B,) x int32, (B,) x int32). For the 1st element in the tuple, each element is in [0, x_offsets[-1]). For the 2nd element in the tuple, each element is in [0, N). cache: Optional 4-tuple of (v, padded_q, padded_k, output) from prior runs, where all except padded_q, padded_k are jagged. Returns: x' = f(x), (\sum_i N_i, D) x float. """ n: int = invalid_attn_mask.size(-1) cached_q = None cached_k = None if delta_x_offsets is not None: # In this case, for all the following code, x, u, v, q, k become restricted to # [delta_x_offsets[0], :]. assert cache is not None x = x[delta_x_offsets[0], :] cached_v, cached_q, cached_k, cached_outputs = cache normed_x = self._norm_input(x) if self._linear_config == "uvqk": batched_mm_output = torch.mm(normed_x, self._uvqk) if self._linear_activation == "silu": batched_mm_output = F.silu(batched_mm_output) elif self._linear_activation == "none": batched_mm_output = batched_mm_output u, v, q, k = torch.split( batched_mm_output, [ self._linear_dim * self._num_heads, self._linear_dim * self._num_heads, self._attention_dim * self._num_heads, self._attention_dim * self._num_heads, ], dim=1, ) else: raise ValueError(f"Unknown self._linear_config {self._linear_config}") if delta_x_offsets is not None: v = cached_v.index_copy_(dim=0, index=delta_x_offsets[0], source=v) B: int = x_offsets.size(0) - 1 if self._normalization == "rel_bias" or self._normalization == "hstu_rel_bias": assert self._rel_attn_bias is not None attn_output, padded_q, padded_k = _hstu_attention_maybe_from_cache( num_heads=self._num_heads, attention_dim=self._attention_dim, linear_dim=self._linear_dim, q=q, k=k, v=v, cached_q=cached_q, cached_k=cached_k, delta_x_offsets=delta_x_offsets, x_offsets=x_offsets, all_timestamps=all_timestamps, invalid_attn_mask=invalid_attn_mask, rel_attn_bias=self._rel_attn_bias, ) elif self._normalization == "softmax_rel_bias": if delta_x_offsets is not None: B = x_offsets.size(0) - 1 padded_q, padded_k = cached_q, cached_k flattened_offsets = delta_x_offsets[1] + torch.arange( start=0, end=B * n, step=n, device=delta_x_offsets[1].device, dtype=delta_x_offsets[1].dtype, ) assert padded_q is not None assert padded_k is not None padded_q = ( padded_q.view(B * n, -1) .index_copy_( dim=0, index=flattened_offsets, source=q, ) .view(B, n, -1) ) padded_k = ( padded_k.view(B * n, -1) .index_copy_( dim=0, index=flattened_offsets, source=k, ) .view(B, n, -1) ) else: padded_q = torch.ops.fbgemm.jagged_to_padded_dense( values=q, offsets=[x_offsets], max_lengths=[n], padding_value=0.0 ) padded_k = torch.ops.fbgemm.jagged_to_padded_dense( values=k, offsets=[x_offsets], max_lengths=[n], padding_value=0.0 ) qk_attn = torch.einsum("bnd,bmd->bnm", padded_q, padded_k) if self._rel_attn_bias is not None: qk_attn = qk_attn + self._rel_attn_bias(all_timestamps) qk_attn = F.softmax(qk_attn / math.sqrt(self._attention_dim), dim=-1) qk_attn = qk_attn * invalid_attn_mask attn_output = torch.ops.fbgemm.dense_to_jagged( torch.bmm( qk_attn, torch.ops.fbgemm.jagged_to_padded_dense(v, [x_offsets], [n]), ), [x_offsets], )[0] else: raise ValueError(f"Unknown normalization method {self._normalization}") attn_output = ( attn_output if delta_x_offsets is None else attn_output[delta_x_offsets[0], :] ) if self._concat_ua: a = self._norm_attn_output(attn_output) o_input = torch.cat([u, a, u * a], dim=-1) else: o_input = u * self._norm_attn_output(attn_output) new_outputs = ( self._o( F.dropout( o_input, p=self._dropout_ratio, training=self.training, ) ) + x ) if delta_x_offsets is not None: new_outputs = cached_outputs.index_copy_( dim=0, index=delta_x_offsets[0], source=new_outputs ) if return_cache_states and delta_x_offsets is None: v = v.contiguous() return new_outputs, (v, padded_q, padded_k, new_outputs)
Args: x: (\sum_i N_i, D) x float. x_offsets: (B + 1) x int32. all_timestamps: optional (B, N) x int64. invalid_attn_mask: (B, N, N) x float, each element in {0, 1}. delta_x_offsets: optional 2-tuple ((B,) x int32, (B,) x int32). For the 1st element in the tuple, each element is in [0, x_offsets[-1]). For the 2nd element in the tuple, each element is in [0, N). cache: Optional 4-tuple of (v, padded_q, padded_k, output) from prior runs, where all except padded_q, padded_k are jagged. Returns: x' = f(x), (\sum_i N_i, D) x float.
forward
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/sequential/hstu.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/sequential/hstu.py
Apache-2.0
def jagged_forward( self, x: torch.Tensor, x_offsets: torch.Tensor, all_timestamps: Optional[torch.Tensor], invalid_attn_mask: torch.Tensor, delta_x_offsets: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, cache: Optional[List[HSTUCacheState]] = None, return_cache_states: bool = False, ) -> Tuple[torch.Tensor, List[HSTUCacheState]]: """ Args: x: (\sum_i N_i, D) x float x_offsets: (B + 1) x int32 all_timestamps: (B, 1 + N) x int64 invalid_attn_mask: (B, N, N) x float, each element in {0, 1} return_cache_states: bool. True if we should return cache states. Returns: x' = f(x), (\sum_i N_i, D) x float """ cache_states: List[HSTUCacheState] = [] with torch.autocast( "cuda", enabled=self._autocast_dtype is not None, dtype=self._autocast_dtype or torch.float16, ): for i, layer in enumerate(self._attention_layers): x, cache_states_i = layer( x=x, x_offsets=x_offsets, all_timestamps=all_timestamps, invalid_attn_mask=invalid_attn_mask, delta_x_offsets=delta_x_offsets, cache=cache[i] if cache is not None else None, return_cache_states=return_cache_states, ) if return_cache_states: cache_states.append(cache_states_i) return x, cache_states
Args: x: (\sum_i N_i, D) x float x_offsets: (B + 1) x int32 all_timestamps: (B, 1 + N) x int64 invalid_attn_mask: (B, N, N) x float, each element in {0, 1} return_cache_states: bool. True if we should return cache states. Returns: x' = f(x), (\sum_i N_i, D) x float
jagged_forward
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/sequential/hstu.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/sequential/hstu.py
Apache-2.0
def forward( self, x: torch.Tensor, x_offsets: torch.Tensor, all_timestamps: Optional[torch.Tensor], invalid_attn_mask: torch.Tensor, delta_x_offsets: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, cache: Optional[List[HSTUCacheState]] = None, return_cache_states: bool = False, ) -> Tuple[torch.Tensor, List[HSTUCacheState]]: """ Args: x: (B, N, D) x float. x_offsets: (B + 1) x int32. all_timestamps: (B, 1 + N) x int64 invalid_attn_mask: (B, N, N) x float, each element in {0, 1}. Returns: x' = f(x), (B, N, D) x float """ if len(x.size()) == 3: x = torch.ops.fbgemm.dense_to_jagged(x, [x_offsets])[0] jagged_x, cache_states = self.jagged_forward( x=x, x_offsets=x_offsets, all_timestamps=all_timestamps, invalid_attn_mask=invalid_attn_mask, delta_x_offsets=delta_x_offsets, cache=cache, return_cache_states=return_cache_states, ) y = torch.ops.fbgemm.jagged_to_padded_dense( values=jagged_x, offsets=[x_offsets], max_lengths=[invalid_attn_mask.size(1)], padding_value=0.0, ) return y, cache_states
Args: x: (B, N, D) x float. x_offsets: (B + 1) x int32. all_timestamps: (B, 1 + N) x int64 invalid_attn_mask: (B, N, N) x float, each element in {0, 1}. Returns: x' = f(x), (B, N, D) x float
forward
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/sequential/hstu.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/sequential/hstu.py
Apache-2.0
def generate_user_embeddings( self, past_lengths: torch.Tensor, past_ids: torch.Tensor, past_embeddings: torch.Tensor, past_payloads: Dict[str, torch.Tensor], delta_x_offsets: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, cache: Optional[List[HSTUCacheState]] = None, return_cache_states: bool = False, ) -> Tuple[torch.Tensor, List[HSTUCacheState]]: """ [B, N] -> [B, N, D]. """ device = past_lengths.device float_dtype = past_embeddings.dtype B, N, _ = past_embeddings.size() past_lengths, user_embeddings, _ = self._input_features_preproc( past_lengths=past_lengths, past_ids=past_ids, past_embeddings=past_embeddings, past_payloads=past_payloads, ) float_dtype = user_embeddings.dtype user_embeddings, cached_states = self._hstu( x=user_embeddings, x_offsets=torch.ops.fbgemm.asynchronous_complete_cumsum(past_lengths), all_timestamps=( past_payloads[TIMESTAMPS_KEY] if TIMESTAMPS_KEY in past_payloads else None ), invalid_attn_mask=1.0 - self._attn_mask.to(float_dtype), delta_x_offsets=delta_x_offsets, cache=cache, return_cache_states=return_cache_states, ) return self._output_postproc(user_embeddings), cached_states
[B, N] -> [B, N, D].
generate_user_embeddings
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/sequential/hstu.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/sequential/hstu.py
Apache-2.0
def forward( self, past_lengths: torch.Tensor, past_ids: torch.Tensor, past_embeddings: torch.Tensor, past_payloads: Dict[str, torch.Tensor], batch_id: Optional[int] = None, ) -> torch.Tensor: """ Runs the main encoder. Args: past_lengths: (B,) x int64 past_ids: (B, N,) x int64 where the latest engaged ids come first. In particular, past_ids[i, past_lengths[i] - 1] should correspond to the latest engaged values. past_embeddings: (B, N, D) x float or (\sum_b N_b, D) x float. past_payloads: implementation-specific keyed tensors of shape (B, N, ...). Returns: encoded_embeddings of [B, N, D]. """ encoded_embeddings, _ = self.generate_user_embeddings( past_lengths=past_lengths, past_ids=past_ids, past_embeddings=past_embeddings, past_payloads=past_payloads, ) return encoded_embeddings
Runs the main encoder. Args: past_lengths: (B,) x int64 past_ids: (B, N,) x int64 where the latest engaged ids come first. In particular, past_ids[i, past_lengths[i] - 1] should correspond to the latest engaged values. past_embeddings: (B, N, D) x float or (\sum_b N_b, D) x float. past_payloads: implementation-specific keyed tensors of shape (B, N, ...). Returns: encoded_embeddings of [B, N, D].
forward
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/sequential/hstu.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/sequential/hstu.py
Apache-2.0
def _encode( self, past_lengths: torch.Tensor, past_ids: torch.Tensor, past_embeddings: torch.Tensor, past_payloads: Dict[str, torch.Tensor], delta_x_offsets: Optional[Tuple[torch.Tensor, torch.Tensor]], cache: Optional[List[HSTUCacheState]], return_cache_states: bool, ) -> Union[torch.Tensor, Tuple[torch.Tensor, List[HSTUCacheState]]]: """ Args: past_lengths: (B,) x int64. past_ids: (B, N,) x int64. past_embeddings: (B, N, D,) x float. past_payloads: implementation-specific keyed tensors of shape (B, N, ...). return_cache_states: bool. Returns: (B, D) x float, representing embeddings for the current state. """ encoded_seq_embeddings, cache_states = self.generate_user_embeddings( past_lengths=past_lengths, past_ids=past_ids, past_embeddings=past_embeddings, past_payloads=past_payloads, delta_x_offsets=delta_x_offsets, cache=cache, return_cache_states=return_cache_states, ) # [B, N, D] current_embeddings = get_current_embeddings( lengths=past_lengths, encoded_embeddings=encoded_seq_embeddings ) if return_cache_states: return current_embeddings, cache_states else: return current_embeddings
Args: past_lengths: (B,) x int64. past_ids: (B, N,) x int64. past_embeddings: (B, N, D,) x float. past_payloads: implementation-specific keyed tensors of shape (B, N, ...). return_cache_states: bool. Returns: (B, D) x float, representing embeddings for the current state.
_encode
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/sequential/hstu.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/sequential/hstu.py
Apache-2.0
def encode( self, past_lengths: torch.Tensor, past_ids: torch.Tensor, past_embeddings: torch.Tensor, past_payloads: Dict[str, torch.Tensor], delta_x_offsets: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, cache: Optional[List[HSTUCacheState]] = None, return_cache_states: bool = False, ) -> Union[torch.Tensor, Tuple[torch.Tensor, List[HSTUCacheState]]]: """ Runs encoder to obtain the current hidden states. Args: past_lengths: (B,) x int. past_ids: (B, N,) x int. past_embeddings: (B, N, D) x float. past_payloads: implementation-specific keyed tensors of shape (B, N, ...). Returns: (B, D,) x float, representing encoded states at the most recent time step. """ return self._encode( past_lengths=past_lengths, past_ids=past_ids, past_embeddings=past_embeddings, past_payloads=past_payloads, delta_x_offsets=delta_x_offsets, cache=cache, return_cache_states=return_cache_states, )
Runs encoder to obtain the current hidden states. Args: past_lengths: (B,) x int. past_ids: (B, N,) x int. past_embeddings: (B, N, D) x float. past_payloads: implementation-specific keyed tensors of shape (B, N, ...). Returns: (B, D,) x float, representing encoded states at the most recent time step.
encode
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/sequential/hstu.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/sequential/hstu.py
Apache-2.0
def get_preprocessed_ids( self, past_lengths: torch.Tensor, past_ids: torch.Tensor, past_embeddings: torch.Tensor, past_payloads: Dict[str, torch.Tensor], ) -> torch.Tensor: """ Returns (B, N * 2,) x int64. """ B, N = past_ids.size() return torch.cat( [ past_ids.unsqueeze(2), # (B, N, 1) past_payloads["ratings"].to(past_ids.dtype).unsqueeze(2), ], dim=2, ).reshape(B, N * 2)
Returns (B, N * 2,) x int64.
get_preprocessed_ids
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/sequential/input_features_preprocessors.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/sequential/input_features_preprocessors.py
Apache-2.0
def get_preprocessed_masks( self, past_lengths: torch.Tensor, past_ids: torch.Tensor, past_embeddings: torch.Tensor, past_payloads: Dict[str, torch.Tensor], ) -> torch.Tensor: """ Returns (B, N * 2,) x bool. """ B, N = past_ids.size() return (past_ids != 0).unsqueeze(2).expand(-1, -1, 2).reshape(B, N * 2)
Returns (B, N * 2,) x bool.
get_preprocessed_masks
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/sequential/input_features_preprocessors.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/sequential/input_features_preprocessors.py
Apache-2.0
def forward( # pyre-ignore [15] self, lengths: torch.Tensor, output_embeddings: torch.Tensor, supervision_ids: torch.Tensor, supervision_embeddings: torch.Tensor, supervision_weights: torch.Tensor, negatives_sampler: NegativesSampler, **kwargs, ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: """ Args: lengths: [B] x int32 representing number of non-zero elements per row. output_embeddings: [B, N, D] x float, embeddings for the current input sequence. supervision_ids: [B, N] x int64, (positive) supervision ids. supervision_embeddings: [B, N, D] x float. supervision_weights: Optional [B, N] x float. Optional weights for masking out invalid positions, or reweighting supervision labels. negatives_sampler: sampler used to obtain negative examples paired with positives. Returns: Tuple of (loss for the current engaged sequence, str-keyed aux_losses). """ torch._assert( output_embeddings.size() == supervision_embeddings.size(), "Invalid supervision embeddings size.", ) torch._assert( supervision_ids.size() == supervision_embeddings.size()[:-1], "Invalid supervision ids size.", ) jagged_id_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths) jagged_supervision_ids = ( torch.ops.fbgemm.dense_to_jagged( supervision_ids.unsqueeze(-1).float(), [jagged_id_offsets] )[0] .squeeze(1) .long() ) if "user_ids" in kwargs: # expand to jagged. max_length: int = int(lengths.max()) kwargs["user_ids"] = torch.ops.fbgemm.dense_to_jagged( kwargs["user_ids"] .unsqueeze(1) .expand(-1, max_length) .unsqueeze(2), # (B, max_length, 1) [jagged_id_offsets], )[0].squeeze(1) args = OrderedDict( [ ( "output_embeddings", torch.ops.fbgemm.dense_to_jagged( output_embeddings, [jagged_id_offsets], )[0], ), ("supervision_ids", jagged_supervision_ids), ( "supervision_embeddings", torch.ops.fbgemm.dense_to_jagged( supervision_embeddings, [jagged_id_offsets], )[0], ), ( "supervision_weights", torch.ops.fbgemm.dense_to_jagged( supervision_weights.unsqueeze(-1), [jagged_id_offsets], )[0].squeeze(1), ), ("negatives_sampler", negatives_sampler), ] ) args.update(kwargs) if self._activation_checkpoint: return checkpoint( self.jagged_forward, *args.values(), use_reentrant=False, ) else: return self.jagged_forward( output_embeddings=torch.ops.fbgemm.dense_to_jagged( output_embeddings, [jagged_id_offsets], )[0], supervision_ids=jagged_supervision_ids, supervision_embeddings=torch.ops.fbgemm.dense_to_jagged( supervision_embeddings, [jagged_id_offsets], )[0], supervision_weights=torch.ops.fbgemm.dense_to_jagged( supervision_weights.unsqueeze(-1), [jagged_id_offsets], )[0].squeeze(1), negatives_sampler=negatives_sampler, **kwargs, )
Args: lengths: [B] x int32 representing number of non-zero elements per row. output_embeddings: [B, N, D] x float, embeddings for the current input sequence. supervision_ids: [B, N] x int64, (positive) supervision ids. supervision_embeddings: [B, N, D] x float. supervision_weights: Optional [B, N] x float. Optional weights for masking out invalid positions, or reweighting supervision labels. negatives_sampler: sampler used to obtain negative examples paired with positives. Returns: Tuple of (loss for the current engaged sequence, str-keyed aux_losses).
forward
python
facebookresearch/generative-recommenders
generative_recommenders/research/modeling/sequential/losses/sampled_softmax.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/research/modeling/sequential/losses/sampled_softmax.py
Apache-2.0
def get_args(): # pyre-ignore [3] """Parse commandline.""" parser = argparse.ArgumentParser() parser.add_argument( "--dataset", default="debug", choices=SUPPORTED_DATASETS, help="dataset" ) args, unknown_args = parser.parse_known_args() logger.warning(f"unknown_args: {unknown_args}") return args
Parse commandline.
get_args
python
facebookresearch/generative-recommenders
generative_recommenders/dlrm_v3/inference/main.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/dlrm_v3/inference/main.py
Apache-2.0
def generate_loadgen_version_definitions_sha1(ofile, loadgen_root): """Writes definition for Sha1OfFiles.""" sha1s = "" loadgen_files = ( ["/bindings/" + s for s in os.listdir(loadgen_root + "/bindings")] + ["/" + s for s in os.listdir(loadgen_root)]) for fn in sorted(loadgen_files): full_fn = loadgen_root + fn if not os.path.isfile(full_fn): continue file_data = open(full_fn, "rb").read() sha1s += hashlib.sha1(file_data).hexdigest() + " " + fn + "\n" ofile.write(func_def("Sha1OfFiles", make_raw_string(sha1s[0:-1])))
Writes definition for Sha1OfFiles.
generate_loadgen_version_definitions_sha1
python
facebookresearch/generative-recommenders
generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/version_generator.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/version_generator.py
Apache-2.0
def generate_loadgen_version_definitions(cc_filename, loadgen_root): """Generates the C++ source file with the loadgen version info.""" try: os.makedirs(os.path.dirname(cc_filename)) except OSError as exc: if exc.errno != errno.EEXIST: raise ofile = open(cc_filename, "w") ofile.write("// DO NOT EDIT: Autogenerated by version_generator.py.\n\n") ofile.write("#include <string>\n\n") ofile.write("namespace mlperf {\n\n") ofile.write(func_def("Version", "\"4.1\"")) date_time_now_local = datetime.datetime.now().isoformat() date_time_now_utc = datetime.datetime.utcnow().isoformat() ofile.write(func_def("BuildDateLocal", "\"" + date_time_now_local + "\"")) ofile.write(func_def("BuildDateUtc", "\"" + date_time_now_utc + "\"")) git_dir = "--git-dir=\"" + loadgen_root + "/../.git\" " git_work_tree = "--work-tree=\"" + loadgen_root + "/..\" " git_command = "git " + git_dir + git_work_tree git_status = os.popen(git_command + "status") git_status.read() is_git_repo = git_status.close() is None if is_git_repo: generate_loadgen_version_definitions_git(ofile, git_command) else: generate_loadgen_verstion_definitions_git_stubs(ofile) generate_loadgen_version_definitions_sha1(ofile, loadgen_root) ofile.write("} // namespace mlperf\n") ofile.close()
Generates the C++ source file with the loadgen version info.
generate_loadgen_version_definitions
python
facebookresearch/generative-recommenders
generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/version_generator.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/version_generator.py
Apache-2.0
def process_query_async(query_samples): """Processes the list of queries.""" time.sleep(.001) responses = [] response_array = array.array( "f", [0, 1, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128, 254, 255]) response_info = response_array.buffer_info() response_data = response_info[0] response_size = response_info[1] * response_array.itemsize for s in query_samples: responses.append( mlperf_loadgen.QuerySampleResponse( s.id, response_data, response_size)) mlperf_loadgen.QuerySamplesComplete(responses)
Processes the list of queries.
process_query_async
python
facebookresearch/generative-recommenders
generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/py_demo_single_stream.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/py_demo_single_stream.py
Apache-2.0
def process_query_async(query_samples): """Processes the list of queries.""" query_responses = [] for s in query_samples: response_array = np.array(responses[s.index], np.int32) time.sleep(.0002) token = response_array[:1] response_token = array.array("B", token.tobytes()) response_token_info = response_token.buffer_info() response_token_data = response_token_info[0] response_token_size = response_token_info[1] * response_token.itemsize mlperf_loadgen.FirstTokenComplete([mlperf_loadgen.QuerySampleResponse(s.id, response_token_data, response_token_size)]) time.sleep(.02) n_tokens = len(response_array) response_array = array.array("B", response_array.tobytes()) response_info = response_array.buffer_info() response_data = response_info[0] response_size = response_info[1] * response_array.itemsize query_responses.append( mlperf_loadgen.QuerySampleResponse( s.id, response_data, response_size, n_tokens)) mlperf_loadgen.QuerySamplesComplete(query_responses)
Processes the list of queries.
process_query_async
python
facebookresearch/generative-recommenders
generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/token_metrics/py_demo_single_stream.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/token_metrics/py_demo_single_stream.py
Apache-2.0
def process_query_async(query_samples): """Processes the list of queries.""" query_responses = [] for s in query_samples: response_array = np.array(responses[s.index], np.int32) token = response_array[0] time.sleep(.0002) response_token = array.array("B", token.tobytes()) response_token_info = response_token.buffer_info() response_token_data = response_token_info[0] response_token_size = response_token_info[1] * response_token.itemsize time.sleep(.02) n_tokens = len(response_array) response_array = array.array("B", response_array.tobytes()) response_info = response_array.buffer_info() response_data = response_info[0] response_size = response_info[1] * response_array.itemsize # print(f"Reported size python: {n_tokens}") query_responses.append( mlperf_loadgen.QuerySampleResponse( s.id, response_data, response_size)) mlperf_loadgen.QuerySamplesComplete(query_responses)
Processes the list of queries.
process_query_async
python
facebookresearch/generative-recommenders
generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/token_metrics/py_demo_server_inferred.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/token_metrics/py_demo_server_inferred.py
Apache-2.0
def process_query_async(query_samples): """Processes the list of queries.""" query_responses = [] for s in query_samples: response_array = np.array(responses[s.index], np.int32) token = response_array[0] time.sleep(.0002) response_token = array.array("B", token.tobytes()) response_token_info = response_token.buffer_info() response_token_data = response_token_info[0] response_token_size = response_token_info[1] * response_token.itemsize mlperf_loadgen.FirstTokenComplete([mlperf_loadgen.QuerySampleResponse(s.id, response_token_data, response_token_size)]) time.sleep(.02) n_tokens = len(response_array) response_array = array.array("B", response_array.tobytes()) response_info = response_array.buffer_info() response_data = response_info[0] response_size = response_info[1] * response_array.itemsize # print(f"Reported size python: {n_tokens}") query_responses.append( mlperf_loadgen.QuerySampleResponse( s.id, response_data, response_size, n_tokens)) mlperf_loadgen.QuerySamplesComplete(query_responses)
Processes the list of queries.
process_query_async
python
facebookresearch/generative-recommenders
generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/token_metrics/py_demo_server.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/token_metrics/py_demo_server.py
Apache-2.0
def get_features(self, sample_id): """Returns the feature for a given sample id.""" return self.eval_features[sample_id]
Returns the feature for a given sample id.
get_features
python
facebookresearch/generative-recommenders
generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/py_demo_server_lon.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/py_demo_server_lon.py
Apache-2.0
def load_samples_to_ram(self, query_samples): """Loads the features for the given query samples into RAM.""" # Current implementation is not using this functionality. del query_samples return
Loads the features for the given query samples into RAM.
load_samples_to_ram
python
facebookresearch/generative-recommenders
generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/py_demo_server_lon.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/py_demo_server_lon.py
Apache-2.0
def unload_samples_from_ram(self, query_samples): """Unloads the features for the given query samples from RAM.""" # Current implementation is not using this functionality. del query_samples return
Unloads the features for the given query samples from RAM.
unload_samples_from_ram
python
facebookresearch/generative-recommenders
generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/py_demo_server_lon.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/py_demo_server_lon.py
Apache-2.0
def __init__(self, qsl: QSL, sut_server_addr: list): """ Constructor for the QDL. Args: qsl: The QSL to use. sut_server_addr: A list of addresses of the SUT. """ self.qsl = qsl # Construct QDL from the python binding self.qdl = mlperf_loadgen.ConstructQDL( self.issue_query, self.flush_queries, self.client_get_name) self.sut_server_addr = sut_server_addr self.num_nodes = len(sut_server_addr) # For round robin between the SUTs: self.next_sut_id = 0 self.lock = threading.Lock()
Constructor for the QDL. Args: qsl: The QSL to use. sut_server_addr: A list of addresses of the SUT.
__init__
python
facebookresearch/generative-recommenders
generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/py_demo_server_lon.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/py_demo_server_lon.py
Apache-2.0
def issue_query(self, query_samples): """Process the query to send to the SUT""" threading.Thread(target=self.process_query_async, args=[query_samples]).start()
Process the query to send to the SUT
issue_query
python
facebookresearch/generative-recommenders
generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/py_demo_server_lon.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/py_demo_server_lon.py
Apache-2.0
def flush_queries(self): """Flush the queries. Dummy implementation.""" pass
Flush the queries. Dummy implementation.
flush_queries
python
facebookresearch/generative-recommenders
generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/py_demo_server_lon.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/py_demo_server_lon.py
Apache-2.0
def process_query_async(self, query_samples): """ This function is called by the Loadgen in a separate thread. It is responsible for 1. Creating a query for the SUT, by reading the features from the QSL. 2. Sending the query to the SUT. 3. Waiting for the response from the SUT. 4. Deserializing the response. 5. Calling mlperf_loadgen.QuerySamplesComplete(query_samples, response) Args: query_samples: A list of QuerySample objects. """ responses = [] for s in query_samples: # Overall process: # QDL builds a real-world query and sends to SUT --> SUT processes --> SUT sends back to QDL # Read features from the QSL features = self.qsl.get_features(s.index) time.sleep(.001) # Ensure a maximal rate of queries to the SUT # Send the query to SUT in round robin # Wait for a response sut_result = self.client_predict(features, s.index) response_array = array.array('B', sut_result.encode('utf-8')) bi = response_array.buffer_info() responses.append(mlperf_loadgen.QuerySampleResponse( s.id, bi[0], bi[1])) mlperf_loadgen.QuerySamplesComplete(responses)
This function is called by the Loadgen in a separate thread. It is responsible for 1. Creating a query for the SUT, by reading the features from the QSL. 2. Sending the query to the SUT. 3. Waiting for the response from the SUT. 4. Deserializing the response. 5. Calling mlperf_loadgen.QuerySamplesComplete(query_samples, response) Args: query_samples: A list of QuerySample objects.
process_query_async
python
facebookresearch/generative-recommenders
generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/py_demo_server_lon.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/py_demo_server_lon.py
Apache-2.0
def get_sut_id_round_robin(self): """Get the SUT id in round robin.""" with self.lock: res = self.next_sut_id self.next_sut_id = (self.next_sut_id + 1) % self.num_nodes return res
Get the SUT id in round robin.
get_sut_id_round_robin
python
facebookresearch/generative-recommenders
generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/py_demo_server_lon.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/py_demo_server_lon.py
Apache-2.0
def client_predict(self, query, id): """Serialize the query, send it to the SUT in round robin, and return the deserialized response.""" url = '{}/predict/'.format(self.sut_server_addr[self.get_sut_id_round_robin()]) response = requests.post(url, json={'query': query, id: id}) return response.json()['result']
Serialize the query, send it to the SUT in round robin, and return the deserialized response.
client_predict
python
facebookresearch/generative-recommenders
generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/py_demo_server_lon.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/py_demo_server_lon.py
Apache-2.0
def client_get_name(self): """Get the name of the SUT from ALL the SUTS.""" if len(self.sut_server_addr) == 1: return requests.post(f'{self.sut_server_addr[0]}/getname/').json()['name'] sut_names = [requests.post(f'{addr}/getname/').json()['name'] for addr in self.sut_server_addr] return "Multi-node SUT: " + ', '.join(sut_names)
Get the name of the SUT from ALL the SUTS.
client_get_name
python
facebookresearch/generative-recommenders
generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/py_demo_server_lon.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/py_demo_server_lon.py
Apache-2.0
def preprocess(query): """[SUT Node] A dummy preprocess.""" # Here may come for example batching, tokenization, resizing, normalization, etc. response = query return response
[SUT Node] A dummy preprocess.
preprocess
python
facebookresearch/generative-recommenders
generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/sut_over_network_demo.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/sut_over_network_demo.py
Apache-2.0
def dnn_model(query): """[SUT Node] A dummy DNN model.""" # Here may come for example a call to a dnn model such as resnet, bert, etc. response = query return response
[SUT Node] A dummy DNN model.
dnn_model
python
facebookresearch/generative-recommenders
generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/sut_over_network_demo.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/sut_over_network_demo.py
Apache-2.0
def postprocess(query): """[SUT Node] A dummy postprocess.""" # Here may come for example a postprocessing call, e.g., NMS, detokenization, etc. response = query return response
[SUT Node] A dummy postprocess.
postprocess
python
facebookresearch/generative-recommenders
generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/sut_over_network_demo.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/sut_over_network_demo.py
Apache-2.0
def predict(): """Receives a query (e.g., a text) runs inference, and returns a prediction.""" query = request.get_json(force=True)['query'] result = postprocess(dnn_model(preprocess(query))) return jsonify(result=result)
Receives a query (e.g., a text) runs inference, and returns a prediction.
predict
python
facebookresearch/generative-recommenders
generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/sut_over_network_demo.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/sut_over_network_demo.py
Apache-2.0
def getname(): """Returns the name of the SUT.""" return jsonify(name=f'Demo SUT (Network SUT) node' + (' ' + node) if node else '')
Returns the name of the SUT.
getname
python
facebookresearch/generative-recommenders
generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/sut_over_network_demo.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/dlrm_v3/inference/thirdparty/loadgen/demos/lon/sut_over_network_demo.py
Apache-2.0
def jagged_dense_bmm_broadcast_add( max_seq_len: int, seq_offsets: torch.Tensor, jagged: torch.Tensor, dense: torch.Tensor, bias: torch.Tensor, kernel: HammerKernel = HammerKernel.PYTORCH, ) -> torch.Tensor: """ Computing out = jagged x dense + bias jagged has shape (sum_B(M_i), K), dense has shape (B, K, N), and bias has shape (B, N) out has shape (sum_B(M_i), N) """ if not is_fx_tracing(): _, K = jagged.shape B, _, N = dense.shape torch._assert(dense.shape[1] == K, "wrong dense shape[1]") torch._assert(seq_offsets.shape[0] == B + 1, "wrong seq_offsets shape[0]") torch._assert(bias.shape[0] == B, "wrong bias shape[0]") torch._assert(bias.shape[1] == N, "wrong bias shape[1]") if kernel == HammerKernel.TRITON: return triton_jagged_dense_bmm_broadcast_add( max_seq_len=max_seq_len, seq_offsets=seq_offsets, jagged=jagged, dense=dense, bias=bias, ) elif kernel == HammerKernel.TRITON_CC: return triton_cc_jagged_dense_bmm( max_seq_len=max_seq_len, seq_offsets=seq_offsets, jagged=jagged, dense=dense, bias=bias, ) else: return pytorch_jagged_dense_bmm_broadcast_add( max_seq_len=max_seq_len, seq_offsets=seq_offsets, jagged=jagged, dense=dense, bias=bias, )
Computing out = jagged x dense + bias jagged has shape (sum_B(M_i), K), dense has shape (B, K, N), and bias has shape (B, N) out has shape (sum_B(M_i), N)
jagged_dense_bmm_broadcast_add
python
facebookresearch/generative-recommenders
generative_recommenders/ops/jagged_tensors.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/ops/jagged_tensors.py
Apache-2.0
def cuda_hstu_mha( max_seq_len: int, alpha: float, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, seq_offsets: Optional[torch.Tensor] = None, causal: bool = False, num_targets: Optional[torch.Tensor] = None, max_attn_len: int = 0, contextual_seq_len: int = 0, q_descale: Optional[torch.Tensor] = None, k_descale: Optional[torch.Tensor] = None, v_descale: Optional[torch.Tensor] = None, sort_by_length: bool = False, deterministic: bool = False, sm_margin: int = 0, ) -> torch.Tensor: """ Arguments: q, k, v: (batch_size, seqlen, nheads, headdim) or (total_seqlen, nheads, headdim) """ return HSTUFlashAttentionFunction.apply( max_seq_len, alpha, q, k, v, seq_offsets, causal, num_targets, max_attn_len, contextual_seq_len, q_descale, k_descale, v_descale, sort_by_length, deterministic, sm_margin, )
Arguments: q, k, v: (batch_size, seqlen, nheads, headdim) or (total_seqlen, nheads, headdim)
cuda_hstu_mha
python
facebookresearch/generative-recommenders
generative_recommenders/ops/cpp/cuda_hstu_attention.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/ops/cpp/cuda_hstu_attention.py
Apache-2.0
def jagged_dense_bmm_broadcast_add_kernel( seq_offsets, Jagged, Dense, Bias, Out, AUTOTUNE_MAX_SEQ_LEN, N, K, stride_jm, stride_db, stride_dk, stride_dn, stride_bias_b, stride_om, HAS_BIAS: tl.constexpr, ALLOW_TF32: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr, ): """ Computing bmm Out = Jagged x Dense + Bias M is the jagged dimension Jagged has shape (sum_B(M_i), K), Dense has shape (B, K, N), Bias has shape (B, N), and Out has shape (sum_B(M_i), N) """ off_n = tl.program_id(0) off_m = tl.program_id(1) off_b = tl.program_id(2) seq_start = tl.load(seq_offsets + off_b).to(tl.int64) seq_end = tl.load(seq_offsets + off_b + 1) seq_len = seq_end - seq_start start_m = off_m * BLOCK_M start_n = off_n * BLOCK_N if start_m >= seq_len: return Jagged += seq_start * stride_jm Dense += off_b.to(tl.int64) * stride_db Out += seq_start * stride_om offs_m = start_m + tl.arange(0, BLOCK_M) offs_n = start_n + tl.arange(0, BLOCK_N) offs_k = tl.arange(0, BLOCK_K) jg_ptrs = Jagged + offs_m[:, None] * stride_jm + offs_k[None, :] dn_ptrs = Dense + offs_k[:, None] * stride_dk + offs_n[None, :] * stride_dn accumulator = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32) for k in range(0, K, BLOCK_K): jg = tl.load( jg_ptrs, # pyre-fixme[16]: `int` has no attribute `__getitem__`. mask=(offs_m[:, None] < seq_len) and ((k + offs_k)[None, :] < K), other=0.0, ) dn = tl.load( dn_ptrs, mask=((k + offs_k)[:, None] < K) and (offs_n[None, :] < N), other=0.0, ) accumulator += tl.dot(jg, dn, allow_tf32=ALLOW_TF32) jg_ptrs += BLOCK_K dn_ptrs += BLOCK_K * stride_dk if HAS_BIAS: bias_ptrs = Bias + off_b * stride_bias_b + offs_n bias = tl.load(bias_ptrs, mask=offs_n < N) accumulator += bias[None, :].to(tl.float32) out = accumulator.to(Out.dtype.element_ty) offs_m = start_m + tl.arange(0, BLOCK_M) offs_n = start_n + tl.arange(0, BLOCK_N) out_ptrs = Out + offs_m[:, None] * stride_om + offs_n[None, :] tl.store(out_ptrs, out, mask=(offs_m[:, None] < seq_len) & (offs_n[None, :] < N))
Computing bmm Out = Jagged x Dense + Bias M is the jagged dimension Jagged has shape (sum_B(M_i), K), Dense has shape (B, K, N), Bias has shape (B, N), and Out has shape (sum_B(M_i), N)
jagged_dense_bmm_broadcast_add_kernel
python
facebookresearch/generative-recommenders
generative_recommenders/ops/triton/triton_jagged.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/ops/triton/triton_jagged.py
Apache-2.0
def _jagged_jagged_bmm_reduce_sum( seq_offsets, JaggedA, JaggedB, Out, ReduceOut, M, N, AUTOTUNE_MAX_SEQ_LEN, stride_ak, stride_bk, stride_ob, stride_om, stride_on, stride_orb, stride_orn, REDUCE_JAGGEDB: tl.constexpr, ALLOW_TF32: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr, ): """ Computing bmm Out = Jagged x Jagged K is the jagged dimension JaggedA has shape (sum_B(K_i), M), JaggedB has shape (sum_B(K_i), N), and Out has shape (B, M, N) """ off_b = tl.program_id(0) off_m = tl.program_id(1) off_n = tl.program_id(2) seq_start = tl.load(seq_offsets + off_b).to(tl.int64) seq_end = tl.load(seq_offsets + off_b + 1) seq_len = seq_end - seq_start start_m = off_m * BLOCK_M start_n = off_n * BLOCK_N accumulator = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32) Out += off_b.to(tl.int64) * stride_ob offs_m = start_m + tl.arange(0, BLOCK_M) offs_n = start_n + tl.arange(0, BLOCK_N) out_ptrs = Out + offs_m[:, None] * stride_om + offs_n[None, :] * stride_on if REDUCE_JAGGEDB: out_reduce_ptrs = ReduceOut + off_b * stride_orb + offs_n * stride_orn acc_reduce = tl.zeros((BLOCK_N,), dtype=tl.float32) if seq_len == 0: out = accumulator.to(Out.dtype.element_ty) tl.store(out_ptrs, out, mask=(offs_m[:, None] < M) & (offs_n[None, :] < N)) if REDUCE_JAGGEDB: if off_m == 0: tl.store( out_reduce_ptrs, # pyre-ignore [61] acc_reduce.to(ReduceOut.dtype.element_ty), mask=(offs_n < N), ) return JaggedA += seq_start * stride_ak JaggedB += seq_start * stride_bk offs_k = tl.arange(0, BLOCK_K) jg_a_ptrs = JaggedA + offs_k[None, :] * stride_ak + offs_m[:, None] jg_b_ptrs = JaggedB + offs_k[:, None] * stride_bk + offs_n[None, :] for k in range(0, seq_len, BLOCK_K): jg_a = tl.load( jg_a_ptrs, # pyre-fixme[16]: `int` has no attribute `__getitem__`. mask=(offs_m[:, None] < M) and ((k + offs_k)[None, :] < seq_len), other=0.0, ) jg_b = tl.load( jg_b_ptrs, mask=(offs_n[None, :] < N) and ((k + offs_k)[:, None] < seq_len), other=0.0, ) accumulator += tl.dot(jg_a, jg_b, allow_tf32=ALLOW_TF32) if REDUCE_JAGGEDB: if off_m == 0: acc_reduce += tl.sum(jg_b.to(tl.float32), axis=0) jg_a_ptrs += BLOCK_K * stride_ak jg_b_ptrs += BLOCK_K * stride_bk out = accumulator.to(Out.dtype.element_ty) tl.store(out_ptrs, out, mask=(offs_m[:, None] < M) & (offs_n[None, :] < N)) if REDUCE_JAGGEDB: if off_m == 0: tl.store( out_reduce_ptrs, # pyre-ignore [61] acc_reduce.to(ReduceOut.dtype.element_ty), mask=(offs_n < N), )
Computing bmm Out = Jagged x Jagged K is the jagged dimension JaggedA has shape (sum_B(K_i), M), JaggedB has shape (sum_B(K_i), N), and Out has shape (B, M, N)
_jagged_jagged_bmm_reduce_sum
python
facebookresearch/generative-recommenders
generative_recommenders/ops/triton/triton_jagged.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/ops/triton/triton_jagged.py
Apache-2.0
def jagged_dense_broadcast_add_kernel( seq_offsets, Jagged, Dense, Out, AUTOTUNE_MAX_SEQ_LEN, D, stride_jn, stride_db, stride_on, BLOCK_N: tl.constexpr, BLOCK_D: tl.constexpr, ): """ Computing Out = Jagged + Dense JaggedA has shape (sum_B(N_i), D), Dense has shape (B, D), and Out has shape (sum_B(N_i), D) """ off_b = tl.program_id(0) off_n = tl.program_id(1) seq_start = tl.load(seq_offsets + off_b) seq_end = tl.load(seq_offsets + off_b + 1) seq_len = seq_end - seq_start start_n = off_n * BLOCK_N if start_n >= seq_len: return Jagged += seq_start * stride_jn Dense += off_b * stride_db Out += seq_start * stride_on offs_n = start_n + tl.arange(0, BLOCK_N) offs_d = tl.arange(0, BLOCK_D) jagged_ptrs = Jagged + offs_n[:, None] * stride_jn + offs_d[None, :] dense_ptrs = Dense + offs_d out_ptrs = Out + offs_n[:, None] * stride_jn + offs_d[None, :] for d in range(0, D, BLOCK_D): jg = tl.load( jagged_ptrs, # pyre-fixme[16]: `int` has no attribute `__getitem__`. mask=(offs_n[:, None] < seq_len) and (d + offs_d)[None, :] < D, ) dn = tl.load(dense_ptrs, mask=d + offs_d < D) out = jg + dn[None, :] tl.store( out_ptrs, out, mask=(offs_n[:, None] < seq_len) and (d + offs_d)[None, :] < D, ) dense_ptrs += BLOCK_D jagged_ptrs += BLOCK_D out_ptrs += BLOCK_D
Computing Out = Jagged + Dense JaggedA has shape (sum_B(N_i), D), Dense has shape (B, D), and Out has shape (sum_B(N_i), D)
jagged_dense_broadcast_add_kernel
python
facebookresearch/generative-recommenders
generative_recommenders/ops/triton/triton_jagged.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/ops/triton/triton_jagged.py
Apache-2.0
def jagged_reduce_sum( seq_offsets, Jagged, Out, D, stride_jn, stride_ob, BLOCK_D: tl.constexpr, ): """ Computing Out = Jagged + Dense JaggedA has shape (sum_B(N_i), D), Dense has shape (B, D), and Out has shape (sum_B(N_i), D) """ off_b = tl.program_id(0) off_d = tl.program_id(1) * BLOCK_D seq_start = tl.load(seq_offsets + off_b) seq_end = tl.load(seq_offsets + off_b + 1) seq_len = seq_end - seq_start Jagged += seq_start * stride_jn Out += off_b * stride_ob offs_d = off_d + tl.arange(0, BLOCK_D) jagged_ptrs = Jagged + offs_d out_ptrs = Out + offs_d accumulator = tl.zeros((BLOCK_D,), dtype=tl.float32) for _ in range(0, seq_len): jg = tl.load( jagged_ptrs, mask=offs_d < D, ) accumulator += jg jagged_ptrs += stride_jn out = accumulator.to(Out.dtype.element_ty) tl.store( out_ptrs, out, mask=offs_d < D, )
Computing Out = Jagged + Dense JaggedA has shape (sum_B(N_i), D), Dense has shape (B, D), and Out has shape (sum_B(N_i), D)
jagged_reduce_sum
python
facebookresearch/generative-recommenders
generative_recommenders/ops/triton/triton_jagged.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/ops/triton/triton_jagged.py
Apache-2.0
def _add_position_embeddings_kernel( Jagged, seq_offsets, high_inds, Dense, Out, AUTOTUNE_MAX_SEQ_LEN, D, scale, stride_jn, stride_dk, stride_on, SCALE_JAGGED: tl.constexpr, BLOCK_D: tl.constexpr, BLOCK_N: tl.constexpr, ): """ Jagged has shape (sum_B(N_i), D), Dense has shape (K, D), Out has shape (sum_B(N_i), D) """ off_b = tl.program_id(0) off_n = tl.program_id(1) seq_start = tl.load(seq_offsets + off_b) seq_end = tl.load(seq_offsets + off_b + 1) max_ind = tl.load(high_inds + off_b) seq_len = seq_end - seq_start start_n = off_n * BLOCK_N if start_n >= seq_len: return offs_n = start_n + tl.arange(0, BLOCK_N) clamped_offs_n = tl.where(offs_n >= max_ind, max_ind, offs_n) offs_d = tl.arange(0, BLOCK_D) Jagged += seq_start.to(tl.int64) * stride_jn jagged_ptr_offsets = offs_n[:, None] * stride_jn + offs_d[None, :] Out += seq_start.to(tl.int64) * stride_on out_ptrs = Out + offs_n[:, None] * stride_on + offs_d[None, :] dense_ptrs = Dense + clamped_offs_n[:, None] * stride_dk + offs_d[None, :] for _d in range(0, D, BLOCK_D): mask = (offs_n[:, None] < seq_len) and offs_d[None, :] < D jg = tl.load(Jagged + jagged_ptr_offsets, mask=mask) if SCALE_JAGGED: jg = jg * scale dn = tl.load(dense_ptrs, mask=mask) jg += dn tl.store(out_ptrs, jg, mask=mask) dense_ptrs += BLOCK_D out_ptrs += BLOCK_D offs_d += BLOCK_D jagged_ptr_offsets += BLOCK_D
Jagged has shape (sum_B(N_i), D), Dense has shape (K, D), Out has shape (sum_B(N_i), D)
_add_position_embeddings_kernel
python
facebookresearch/generative-recommenders
generative_recommenders/ops/triton/triton_position.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/ops/triton/triton_position.py
Apache-2.0
def _add_timestamp_position_embeddings_kernel( SeqEmb, Offsets, Lengths, PosEmb, TsEmb, Out, TS, PosInds, TsInds, NumTargets, AUTOTUNE_MAX_SEQ_LEN, D, num_time_buckets, time_bucket_increments, time_bucket_scale, time_delta, max_contextual_seq_len, max_pos_ind, stride_sn, stride_pn, stride_tn, stride_on, TRAINING: tl.constexpr, HAS_MULTIPLE_TARGETS: tl.constexpr, INTERLEAVE_TARGETS: tl.constexpr, TIME_BUCKET_FN: tl.constexpr, BLOCK_D: tl.constexpr, BLOCK_N: tl.constexpr, ): """ SeqEmb has shape (sum_B(N_i), D), PosEmb has shape (N_p, D), TsEmb has shape (N_t, D), Out has shape (sum_B(N_i), D) """ off_b = tl.program_id(0) off_n = tl.program_id(1) seq_start = tl.load(Offsets + off_b) seq_end = tl.load(Offsets + off_b + 1) seq_len = seq_end - seq_start start_n = off_n * BLOCK_N if start_n >= seq_len: return offs_n = start_n + tl.arange(0, BLOCK_N) offs_d = tl.arange(0, BLOCK_D) seq_emb_offsets = offs_n[:, None] * stride_sn + offs_d[None, :] SeqEmb += seq_start.to(tl.int64) * stride_sn mask_n = offs_n < seq_len # position encoding seq_len = tl.load(Lengths + off_b) if HAS_MULTIPLE_TARGETS: num_targets = tl.load(NumTargets + off_b) if INTERLEAVE_TARGETS: high_ind = seq_len - num_targets * 2 else: high_ind = seq_len - num_targets else: high_ind = seq_len pos_inds = tl.where(offs_n < high_ind, offs_n, high_ind) pos_inds = high_ind - pos_inds + max_contextual_seq_len pos_inds = tl.where(pos_inds < max_pos_ind - 1, pos_inds, max_pos_ind - 1) pos_inds = tl.where(offs_n < max_contextual_seq_len, offs_n, pos_inds) if TRAINING: tl.store(PosInds + seq_start + offs_n, pos_inds, mask=mask_n) pos_emb_offsets = pos_inds[:, None] * stride_pn + offs_d[None, :] # timestamp encoding ts = tl.load(TS + seq_start + offs_n, mask=mask_n) query_time = tl.load(TS + seq_end - 1) ts = query_time - ts + time_delta ts = tl.where(ts > 1e-6, ts, 1e-6) / time_bucket_increments if TIME_BUCKET_FN == "log": ts = tl.log(ts) else: ts = tl.sqrt(ts) ts = ts * time_bucket_scale ts = ts.to(tl.int32) ts = tl.where(ts > 0, ts, 0) ts = tl.where(ts < num_time_buckets, ts, num_time_buckets) if TRAINING: tl.store(TsInds + seq_start + offs_n, ts, mask=mask_n) ts_emb_offsets = ts[:, None] * stride_tn + offs_d[None, :] Out += seq_start.to(tl.int64) * stride_on out_offsets = Out + offs_n[:, None] * stride_on + offs_d[None, :] for _d in range(0, D, BLOCK_D): mask = (offs_n[:, None] < seq_len) and offs_d[None, :] < D seq_emb = tl.load(SeqEmb + seq_emb_offsets, mask=mask) pos_emb = tl.load(PosEmb + pos_emb_offsets, mask=mask) ts_emb = tl.load(TsEmb + ts_emb_offsets, mask=mask) tl.store(out_offsets, seq_emb + (pos_emb + ts_emb).to(seq_emb.dtype), mask=mask) seq_emb_offsets += BLOCK_D pos_emb_offsets += BLOCK_D ts_emb_offsets += BLOCK_D out_offsets += BLOCK_D offs_d += BLOCK_D
SeqEmb has shape (sum_B(N_i), D), PosEmb has shape (N_p, D), TsEmb has shape (N_t, D), Out has shape (sum_B(N_i), D)
_add_timestamp_position_embeddings_kernel
python
facebookresearch/generative-recommenders
generative_recommenders/ops/triton/triton_position.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/ops/triton/triton_position.py
Apache-2.0
def forward( self, max_seq_len: int, seq_embeddings: torch.Tensor, seq_offsets: torch.Tensor, contextual_embeddings: Optional[torch.Tensor], ) -> torch.Tensor: """ Args: seq_embeddings: (L, D) seq_offsets: (B + 1,) max_seq_len: int contextual_embeddings: (B, D') """ pass
Args: seq_embeddings: (L, D) seq_offsets: (B + 1,) max_seq_len: int contextual_embeddings: (B, D')
forward
python
facebookresearch/generative-recommenders
generative_recommenders/modules/contextualize_mlps.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/modules/contextualize_mlps.py
Apache-2.0
def __init__( self, stu: STU, is_inference: bool, dropout_ratio: float = 0.5, seed: int = 0, ) -> None: """ Stochastic Depth STU """ super().__init__(stu=stu, is_inference=is_inference) self._dropout_ratio: float = dropout_ratio self._iter: int = 0 self._seed: int = seed self._skip_x: Optional[torch.Tensor] = None
Stochastic Depth STU
__init__
python
facebookresearch/generative-recommenders
generative_recommenders/modules/dynamic_stu.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/modules/dynamic_stu.py
Apache-2.0
def __init__( self, stu: STU, max_l2_len: int, is_inference: bool, contextual_seq_len: int = 0, ) -> None: """ Stochastic Depth STU """ super().__init__(stu=stu, is_inference=is_inference) self._max_l2_len: int = max_l2_len self._contextual_seq_len: int = contextual_seq_len self._saved_tensors: Optional[ Tuple[torch.Tensor, torch.Tensor, torch.Tensor] ] = None self._runtime_max_l2_len: int = 0 self._runtime_minus_l2_len: int = 0
Stochastic Depth STU
__init__
python
facebookresearch/generative-recommenders
generative_recommenders/modules/dynamic_stu.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/modules/dynamic_stu.py
Apache-2.0
def forward( self, seq_embeddings: torch.Tensor, seq_timestamps: torch.Tensor, seq_payloads: Dict[str, torch.Tensor], ) -> torch.Tensor: """ Args: seq_embeddings: (L, D) seq_timestamps: (L, ) seq_payloads: str-keyed tensors. Implementation specific. Returns: postprocessed seq_embeddings, (L, D) """ pass
Args: seq_embeddings: (L, D) seq_timestamps: (L, ) seq_payloads: str-keyed tensors. Implementation specific. Returns: postprocessed seq_embeddings, (L, D)
forward
python
facebookresearch/generative-recommenders
generative_recommenders/modules/postprocessors.py
https://github.com/facebookresearch/generative-recommenders/blob/master/generative_recommenders/modules/postprocessors.py
Apache-2.0
def __init__( self, feature_layer=21, overfeat_bin='overfeat', # or 'overfeat_cuda' pretrained_params=None, network_size=0, merge='maxmean', batch_size=200, verbose=0, ): """ :param feature_layer: The ConvNet layer that's used for feature extraction. Defaults to layer `21`. :param overfeat_bin: The path to the `overfeat` binary. :param pretrained_params: The path to the pretrained parameters file. These files come with the overfeat distribution and can be found in `overfeat/data`. :param network_size: Use the small (0) or large network (1). :param merge: How spatial features are merged. May be one of 'maxmean', 'meanmax' or a callable. """ self.feature_layer = feature_layer self.overfeat_bin = overfeat_bin self.pretrained_params = pretrained_params self.network_size = network_size self.merge = merge self.batch_size = batch_size self.verbose = verbose
:param feature_layer: The ConvNet layer that's used for feature extraction. Defaults to layer `21`. :param overfeat_bin: The path to the `overfeat` binary. :param pretrained_params: The path to the pretrained parameters file. These files come with the overfeat distribution and can be found in `overfeat/data`. :param network_size: Use the small (0) or large network (1). :param merge: How spatial features are merged. May be one of 'maxmean', 'meanmax' or a callable.
__init__
python
dnouri/nolearn
nolearn/overfeat.py
https://github.com/dnouri/nolearn/blob/master/nolearn/overfeat.py
MIT
def __init__( self, feature_layer=21, pretrained_params='net_weight_0', network_size=None, merge='maxmean', batch_size=200, verbose=0, ): """ :param feature_layer: The ConvNet layer that's used for feature extraction. Defaults to layer `21`. Please refer to `this post <https://groups.google.com/forum/#!topic/overfeat/hQeI5hcw8f0>`_ to find out which layers are available for the two different networks. :param pretrained_params: The path to the pretrained parameters file. These files come with the overfeat distribution and can be found in `overfeat/data`. :param merge: How spatial features are merged. May be one of 'maxmean', 'meanmax' or a callable. """ if network_size is None: network_size = int(pretrained_params[-1]) self.feature_layer = feature_layer self.pretrained_params = pretrained_params self.network_size = network_size self.merge = merge self.batch_size = batch_size self.verbose = verbose
:param feature_layer: The ConvNet layer that's used for feature extraction. Defaults to layer `21`. Please refer to `this post <https://groups.google.com/forum/#!topic/overfeat/hQeI5hcw8f0>`_ to find out which layers are available for the two different networks. :param pretrained_params: The path to the pretrained parameters file. These files come with the overfeat distribution and can be found in `overfeat/data`. :param merge: How spatial features are merged. May be one of 'maxmean', 'meanmax' or a callable.
__init__
python
dnouri/nolearn
nolearn/overfeat.py
https://github.com/dnouri/nolearn/blob/master/nolearn/overfeat.py
MIT
def __init__( self, layer_sizes=None, scales=0.05, fan_outs=None, output_act_funct=None, real_valued_vis=True, use_re_lu=True, uniforms=False, learn_rates=0.1, learn_rate_decays=1.0, learn_rate_minimums=0.0, momentum=0.9, l2_costs=0.0001, dropouts=0, nesterov=True, nest_compare=True, rms_lims=None, learn_rates_pretrain=None, momentum_pretrain=None, l2_costs_pretrain=None, nest_compare_pretrain=None, epochs=10, epochs_pretrain=0, loss_funct=None, minibatch_size=64, minibatches_per_epoch=None, pretrain_callback=None, fine_tune_callback=None, random_state=None, verbose=0, ): """ Many parameters such as `learn_rates`, `dropouts` etc. will also accept a single value, in which case that value will be used for all layers. To control the value per layer, pass a list of values instead; see examples below. Parameters ending with `_pretrain` may be provided to override the given parameter for pretraining. Consider an example where you want the pre-training to use a lower learning rate than the fine tuning (the backprop), then you'd maybe pass something like:: DBN([783, 300, 10], learn_rates=0.1, learn_rates_pretrain=0.005) If you don't pass the `learn_rates_pretrain` parameter, the value of `learn_rates` will be used for both pre-training and fine tuning. (Which seems to not work very well.) :param layer_sizes: A list of integers of the form ``[n_vis_units, n_hid_units1, n_hid_units2, ..., n_out_units]``. An example: ``[784, 300, 10]`` The number of units in the input layer and the output layer will be set automatically if you set them to -1. Thus, the above example is equivalent to ``[-1, 300, -1]`` if you pass an ``X`` with 784 features, and a ``y`` with 10 classes. :param scales: Scale of the randomly initialized weights. A list of floating point values. When you find good values for the scale of the weights you can speed up training a lot, and also improve performance. Defaults to `0.05`. :param fan_outs: Number of nonzero incoming connections to a hidden unit. Defaults to `None`, which means that all connections have non-zero weights. :param output_act_funct: Output activation function. Instance of type :class:`~gdbn.activationFunctions.Sigmoid`, :class:`~.gdbn.activationFunctions.Linear`, :class:`~.gdbn.activationFunctions.Softmax` from the :mod:`gdbn.activationFunctions` module. Defaults to :class:`~.gdbn.activationFunctions.Softmax`. :param real_valued_vis: Set `True` (the default) if visible units are real-valued. :param use_re_lu: Set `True` to use rectified linear units. Defaults to `False`. :param uniforms: Not documented at this time. :param learn_rates: A list of learning rates, one entry per weight layer. An example: ``[0.1, 0.1]`` :param learn_rate_decays: The number with which the `learn_rate` is multiplied after each epoch of fine-tuning. :param learn_rate_minimums: The minimum `learn_rates`; after the learn rate reaches the minimum learn rate, the `learn_rate_decays` no longer has any effect. :param momentum: Momentum :param l2_costs: L2 costs per weight layer. :param dropouts: Dropouts per weight layer. :param nesterov: Not documented at this time. :param nest_compare: Not documented at this time. :param rms_lims: Not documented at this time. :param learn_rates_pretrain: A list of learning rates similar to `learn_rates_pretrain`, but used for pretraining. Defaults to value of `learn_rates` parameter. :param momentum_pretrain: Momentum for pre-training. Defaults to value of `momentum` parameter. :param l2_costs_pretrain: L2 costs per weight layer, for pre-training. Defaults to the value of `l2_costs` parameter. :param nest_compare_pretrain: Not documented at this time. :param epochs: Number of epochs to train (with backprop). :param epochs_pretrain: Number of epochs to pre-train (with CDN). :param loss_funct: A function that calculates the loss. Used for displaying learning progress and for :meth:`score`. :param minibatch_size: Size of a minibatch. :param minibatches_per_epoch: Number of minibatches per epoch. The default is to use as many as fit into our training set. :param pretrain_callback: An optional function that takes as arguments the :class:`DBN` instance, the epoch and the layer index as its argument, and is called for each epoch of pretraining. :param fine_tune_callback: An optional function that takes as arguments the :class:`DBN` instance and the epoch, and is called for each epoch of fine tuning. :param random_state: An optional int used as the seed by the random number generator. :param verbose: Debugging output. """ if layer_sizes is None: layer_sizes = [-1, -1] if output_act_funct is None: output_act_funct = activationFunctions.Softmax() elif isinstance(output_act_funct, str): output_act_funct = getattr(activationFunctions, output_act_funct)() if random_state is not None: raise ValueError("random_sate must be an int") self.layer_sizes = layer_sizes self.scales = scales self.fan_outs = fan_outs self.output_act_funct = output_act_funct self.real_valued_vis = real_valued_vis self.use_re_lu = use_re_lu self.uniforms = uniforms self.learn_rates = learn_rates self.learn_rate_decays = learn_rate_decays self.learn_rate_minimums = learn_rate_minimums self.momentum = momentum self.l2_costs = l2_costs self.dropouts = dropouts self.nesterov = nesterov self.nest_compare = nest_compare self.rms_lims = rms_lims self.learn_rates_pretrain = learn_rates_pretrain self.momentum_pretrain = momentum_pretrain self.l2_costs_pretrain = l2_costs_pretrain self.nest_compare_pretrain = nest_compare_pretrain self.epochs = epochs self.epochs_pretrain = epochs_pretrain self.loss_funct = loss_funct self.use_dropout = True if dropouts else False self.minibatch_size = minibatch_size self.minibatches_per_epoch = minibatches_per_epoch self.pretrain_callback = pretrain_callback self.fine_tune_callback = fine_tune_callback self.random_state = random_state self.verbose = verbose
Many parameters such as `learn_rates`, `dropouts` etc. will also accept a single value, in which case that value will be used for all layers. To control the value per layer, pass a list of values instead; see examples below. Parameters ending with `_pretrain` may be provided to override the given parameter for pretraining. Consider an example where you want the pre-training to use a lower learning rate than the fine tuning (the backprop), then you'd maybe pass something like:: DBN([783, 300, 10], learn_rates=0.1, learn_rates_pretrain=0.005) If you don't pass the `learn_rates_pretrain` parameter, the value of `learn_rates` will be used for both pre-training and fine tuning. (Which seems to not work very well.) :param layer_sizes: A list of integers of the form ``[n_vis_units, n_hid_units1, n_hid_units2, ..., n_out_units]``. An example: ``[784, 300, 10]`` The number of units in the input layer and the output layer will be set automatically if you set them to -1. Thus, the above example is equivalent to ``[-1, 300, -1]`` if you pass an ``X`` with 784 features, and a ``y`` with 10 classes. :param scales: Scale of the randomly initialized weights. A list of floating point values. When you find good values for the scale of the weights you can speed up training a lot, and also improve performance. Defaults to `0.05`. :param fan_outs: Number of nonzero incoming connections to a hidden unit. Defaults to `None`, which means that all connections have non-zero weights. :param output_act_funct: Output activation function. Instance of type :class:`~gdbn.activationFunctions.Sigmoid`, :class:`~.gdbn.activationFunctions.Linear`, :class:`~.gdbn.activationFunctions.Softmax` from the :mod:`gdbn.activationFunctions` module. Defaults to :class:`~.gdbn.activationFunctions.Softmax`. :param real_valued_vis: Set `True` (the default) if visible units are real-valued. :param use_re_lu: Set `True` to use rectified linear units. Defaults to `False`. :param uniforms: Not documented at this time. :param learn_rates: A list of learning rates, one entry per weight layer. An example: ``[0.1, 0.1]`` :param learn_rate_decays: The number with which the `learn_rate` is multiplied after each epoch of fine-tuning. :param learn_rate_minimums: The minimum `learn_rates`; after the learn rate reaches the minimum learn rate, the `learn_rate_decays` no longer has any effect. :param momentum: Momentum :param l2_costs: L2 costs per weight layer. :param dropouts: Dropouts per weight layer. :param nesterov: Not documented at this time. :param nest_compare: Not documented at this time. :param rms_lims: Not documented at this time. :param learn_rates_pretrain: A list of learning rates similar to `learn_rates_pretrain`, but used for pretraining. Defaults to value of `learn_rates` parameter. :param momentum_pretrain: Momentum for pre-training. Defaults to value of `momentum` parameter. :param l2_costs_pretrain: L2 costs per weight layer, for pre-training. Defaults to the value of `l2_costs` parameter. :param nest_compare_pretrain: Not documented at this time. :param epochs: Number of epochs to train (with backprop). :param epochs_pretrain: Number of epochs to pre-train (with CDN). :param loss_funct: A function that calculates the loss. Used for displaying learning progress and for :meth:`score`. :param minibatch_size: Size of a minibatch. :param minibatches_per_epoch: Number of minibatches per epoch. The default is to use as many as fit into our training set. :param pretrain_callback: An optional function that takes as arguments the :class:`DBN` instance, the epoch and the layer index as its argument, and is called for each epoch of pretraining. :param fine_tune_callback: An optional function that takes as arguments the :class:`DBN` instance and the epoch, and is called for each epoch of fine tuning. :param random_state: An optional int used as the seed by the random number generator. :param verbose: Debugging output.
__init__
python
dnouri/nolearn
nolearn/dbn.py
https://github.com/dnouri/nolearn/blob/master/nolearn/dbn.py
MIT
def __init__( self, feature_layer='fc7_cudanet_out', pretrained_params='imagenet.decafnet.epoch90', pretrained_meta='imagenet.decafnet.meta', center_only=True, classify_direct=False, verbose=0, ): """ :param feature_layer: The ConvNet layer that's used for feature extraction. Defaults to `fc7_cudanet_out`. A description of all available layers for the ImageNet-1k-pretrained ConvNet is found in the DeCAF wiki. They are: - `pool5_cudanet_out` - `fc6_cudanet_out` - `fc6_neuron_cudanet_out` - `fc7_cudanet_out` - `fc7_neuron_cudanet_out` - `probs_cudanet_out` :param pretrained_params: This must point to the file with the pretrained parameters. Defaults to `imagenet.decafnet.epoch90`. For the ImageNet-1k-pretrained ConvNet this file can be obtained from here: http://www.eecs.berkeley.edu/~jiayq/decaf_pretrained/ :param pretrained_meta: Similar to `pretrained_params`, this must file to the file with the pretrained parameters' metadata. Defaults to `imagenet.decafnet.meta`. :param center_only: Use the center patch of the image only when extracting features. If `False`, use four corners, the image center and flipped variants and average a total of 10 feature vectors, which will usually yield better results. Defaults to `True`. :param classify_direct: When `True`, assume that input X is an array of shape (num x 256 x 256 x 3) as returned by `prepare_image`. """ self.feature_layer = feature_layer self.pretrained_params = pretrained_params self.pretrained_meta = pretrained_meta self.center_only = center_only self.classify_direct = classify_direct self.net_ = None if (not os.path.exists(pretrained_params) or not os.path.exists(pretrained_meta)): raise ValueError( "Pre-trained ConvNet parameters not found. You may" "need to download the files from " "http://www.eecs.berkeley.edu/~jiayq/decaf_pretrained/ and " "pass the path to the two files as `pretrained_params` and " "`pretrained_meta` to the `{}` estimator.".format( self.__class__.__name__))
:param feature_layer: The ConvNet layer that's used for feature extraction. Defaults to `fc7_cudanet_out`. A description of all available layers for the ImageNet-1k-pretrained ConvNet is found in the DeCAF wiki. They are: - `pool5_cudanet_out` - `fc6_cudanet_out` - `fc6_neuron_cudanet_out` - `fc7_cudanet_out` - `fc7_neuron_cudanet_out` - `probs_cudanet_out` :param pretrained_params: This must point to the file with the pretrained parameters. Defaults to `imagenet.decafnet.epoch90`. For the ImageNet-1k-pretrained ConvNet this file can be obtained from here: http://www.eecs.berkeley.edu/~jiayq/decaf_pretrained/ :param pretrained_meta: Similar to `pretrained_params`, this must file to the file with the pretrained parameters' metadata. Defaults to `imagenet.decafnet.meta`. :param center_only: Use the center patch of the image only when extracting features. If `False`, use four corners, the image center and flipped variants and average a total of 10 feature vectors, which will usually yield better results. Defaults to `True`. :param classify_direct: When `True`, assume that input X is an array of shape (num x 256 x 256 x 3) as returned by `prepare_image`.
__init__
python
dnouri/nolearn
nolearn/decaf.py
https://github.com/dnouri/nolearn/blob/master/nolearn/decaf.py
MIT
def prepare_image(self, image): """Returns image of shape `(256, 256, 3)`, as expected by `transform` when `classify_direct = True`. """ from decaf.util import transform # soft dep _JEFFNET_FLIP = True # first, extract the 256x256 center. image = transform.scale_and_extract(transform.as_rgb(image), 256) # convert to [0,255] float32 image = image.astype(np.float32) * 255. if _JEFFNET_FLIP: # Flip the image if necessary, maintaining the c_contiguous order image = image[::-1, :].copy() # subtract the mean image -= self.net_._data_mean return image
Returns image of shape `(256, 256, 3)`, as expected by `transform` when `classify_direct = True`.
prepare_image
python
dnouri/nolearn
nolearn/decaf.py
https://github.com/dnouri/nolearn/blob/master/nolearn/decaf.py
MIT
def chunks(l, n): """ Yield successive n-sized chunks from l. """ for i in xrange(0, len(l), n): yield l[i:i + n]
Yield successive n-sized chunks from l.
chunks
python
dnouri/nolearn
nolearn/util.py
https://github.com/dnouri/nolearn/blob/master/nolearn/util.py
MIT
def multiclass_logloss(actual, predicted, eps=1e-15): """Multi class version of Logarithmic Loss metric. :param actual: Array containing the actual target classes :param predicted: Matrix with class predictions, one probability per class """ # Convert 'actual' to a binary array if it's not already: if len(actual.shape) == 1: actual2 = np.zeros((actual.shape[0], predicted.shape[1])) for i, val in enumerate(actual): actual2[i, val] = 1 actual = actual2 clip = np.clip(predicted, eps, 1 - eps) rows = actual.shape[0] vsota = np.sum(actual * np.log(clip)) return -1.0 / rows * vsota
Multi class version of Logarithmic Loss metric. :param actual: Array containing the actual target classes :param predicted: Matrix with class predictions, one probability per class
multiclass_logloss
python
dnouri/nolearn
nolearn/metrics.py
https://github.com/dnouri/nolearn/blob/master/nolearn/metrics.py
MIT
def __call__(self, dataset, classifier, steps=10, verbose=0, random_state=42): """Create a learning curve that uses more training cases with each step. :param dataset: Dataset to work with :type dataset: :class:`~nolearn.dataset.Dataset` :param classifier: Classifier for fitting and making predictions. :type classifier: :class:`~sklearn.base.BaseEstimator` :param steps: Number of steps in the learning curve. :type steps: int :result: 3-tuple with lists `scores_train`, `scores_test`, `sizes` Drawing the resulting learning curve can be done like this: .. code-block:: python dataset = Dataset() clf = LogisticRegression() scores_train, scores_test, sizes = learning_curve(dataset, clf) pl.plot(sizes, scores_train, 'b', label='training set') pl.plot(sizes, scores_test, 'r', label='test set') pl.legend(loc='lower right') pl.show() """ X_train, X_test, y_train, y_test = dataset.train_test_split() scores_train = [] scores_test = [] sizes = [] if verbose: print(" n train test") for frac in np.linspace(0.1, 1.0, num=steps): frac_size = int(X_train.shape[0] * frac) sizes.append(frac_size) X_train1 = X_train[:frac_size] y_train1 = y_train[:frac_size] clf = clone(classifier) clf.fit(X_train1, y_train1) predict_train = self.predict(clf, X_train1) predict_test = self.predict(clf, X_test) score_train = self.score_func(y_train1, predict_train) score_test = self.score_func(y_test, predict_test) scores_train.append(score_train) scores_test.append(score_test) if verbose: print(" %8d %0.4f %0.4f" % ( frac_size, score_train, score_test)) return scores_train, scores_test, sizes
Create a learning curve that uses more training cases with each step. :param dataset: Dataset to work with :type dataset: :class:`~nolearn.dataset.Dataset` :param classifier: Classifier for fitting and making predictions. :type classifier: :class:`~sklearn.base.BaseEstimator` :param steps: Number of steps in the learning curve. :type steps: int :result: 3-tuple with lists `scores_train`, `scores_test`, `sizes` Drawing the resulting learning curve can be done like this: .. code-block:: python dataset = Dataset() clf = LogisticRegression() scores_train, scores_test, sizes = learning_curve(dataset, clf) pl.plot(sizes, scores_train, 'b', label='training set') pl.plot(sizes, scores_test, 'r', label='test set') pl.legend(loc='lower right') pl.show()
__call__
python
dnouri/nolearn
nolearn/metrics.py
https://github.com/dnouri/nolearn/blob/master/nolearn/metrics.py
MIT
def __init__(self, save_to=None, write_every=8): """ :param save_to: If given, `save_to` must be a path into which I will write weight statistics in CSV format. """ self.last_weights = None self.history = [] self.save_to = save_to self.write_every = write_every self._dictwriter = None self._save_to_file = None
:param save_to: If given, `save_to` must be a path into which I will write weight statistics in CSV format.
__init__
python
dnouri/nolearn
nolearn/lasagne/handlers.py
https://github.com/dnouri/nolearn/blob/master/nolearn/lasagne/handlers.py
MIT
def objective(layers, loss_function, target, aggregate=aggregate, deterministic=False, l1=0, l2=0, get_output_kw=None): """ Default implementation of the NeuralNet objective. :param layers: The underlying layers of the NeuralNetwork :param loss_function: The callable loss function to use :param target: the expected output :param aggregate: the aggregation function to use :param deterministic: Whether or not to get a deterministic output :param l1: Optional l1 regularization parameter :param l2: Optional l2 regularization parameter :param get_output_kw: optional kwargs to pass to :meth:`NeuralNetwork.get_output` :return: The total calculated loss """ if get_output_kw is None: get_output_kw = {} output_layer = layers[-1] network_output = get_output( output_layer, deterministic=deterministic, **get_output_kw) loss = aggregate(loss_function(network_output, target)) if l1: loss += regularization.regularize_layer_params( layers.values(), regularization.l1) * l1 if l2: loss += regularization.regularize_layer_params( layers.values(), regularization.l2) * l2 return loss
Default implementation of the NeuralNet objective. :param layers: The underlying layers of the NeuralNetwork :param loss_function: The callable loss function to use :param target: the expected output :param aggregate: the aggregation function to use :param deterministic: Whether or not to get a deterministic output :param l1: Optional l1 regularization parameter :param l2: Optional l2 regularization parameter :param get_output_kw: optional kwargs to pass to :meth:`NeuralNetwork.get_output` :return: The total calculated loss
objective
python
dnouri/nolearn
nolearn/lasagne/base.py
https://github.com/dnouri/nolearn/blob/master/nolearn/lasagne/base.py
MIT
def __init__( self, layers, update=nesterov_momentum, loss=None, # BBB objective=objective, objective_loss_function=None, batch_iterator_train=BatchIterator(batch_size=128), batch_iterator_test=BatchIterator(batch_size=128), regression=False, max_epochs=100, train_split=TrainSplit(eval_size=0.2), custom_scores=None, scores_train=None, scores_valid=None, X_tensor_type=None, y_tensor_type=None, use_label_encoder=False, on_batch_finished=None, on_epoch_finished=None, on_training_started=None, on_training_finished=None, more_params=None, check_input=True, verbose=0, **kwargs ): """ Initialize a Neural Network Parameters ---------- layers: A list of lasagne layers to compose into the final neural net. See :ref:`layer-def` update: The update function to use when training. Uses the form provided by the :mod:`lasagne.updates` implementations. objective: The objective function to use when training. The callable will be passed the NeuralNetwork's :attr:`.layers_` attribute as the first argument, and the output target as the second argument. max_epochs: The number of epochs to train. This is used as the default when calling the :meth:`.fit` method without an epochs argument. Other Parameters ---------------- batch_iterator_train: The sample iterator to use while training the network. batch_iterator_test: The sample Iterator to use while testing and validating the network. regression: Whether or not this is a regressor network. Determines the default objective and scoring functions. train_split: The method used to separate training and validation samples. See :class:`TrainSplit` for the default implementation. y_tensor_type: The type of tensor to use to hold the network's output. Typically ``T.ivector`` (the default) for classification tasks. on_training_started, on_batch_finished, on_epoch_finished, on_training_finished: A list of functions which are called during training at the corresponding times. The functions will be passed the NeuralNet as the first parameter and its :attr:`.train_history_` attribute as the second parameter. custom_scores: A list of callable custom scoring functions. The functions will be passed the expected y values as the first argument, and the predicted y_values as the second argument. use_label_encoder: If true, all y_values will be encoded using a :class:`sklearn.preprocessing.LabelEncoder` instance. verbose: The verbosity level of the network. Any non-zero value will cause the network to print the layer info at the start of training, as well as print a log of the training history after each epoch. Larger values will increase the amount of info shown. more_params: A set of more parameters to use when initializing layers defined using the dictionary method. Note ---- * Extra arguments can be passed to the call to the *update* function by prepending the string ``update_`` to the corresponding argument name, e.g. ``update_learning_rate=0.01`` will define the ``learning_rate`` parameter of the update function. * Extra arguments can be provided to the objective call through the Neural Network by prepending the string ``objective_`` to the corresponding argument name. """ if loss is not None: raise ValueError( "The 'loss' parameter was removed, please use " "'objective_loss_function' instead.") # BBB if hasattr(objective, 'get_loss'): raise ValueError( "The 'Objective' class is no longer supported, please " "use 'nolearn.lasagne.objective' or similar.") # BBB if objective_loss_function is None: objective_loss_function = ( squared_error if regression else categorical_crossentropy) if hasattr(self, 'train_test_split'): # BBB warn("The 'train_test_split' method has been deprecated, please " "use the 'train_split' parameter instead.") train_split = LegacyTrainTestSplit( eval_size=kwargs.pop('eval_size', 0.2)) if 'eval_size' in kwargs: # BBB warn("The 'eval_size' argument has been deprecated, please use " "the 'train_split' parameter instead, e.g.\n" "train_split=TrainSplit(eval_size=0.4)") train_split.eval_size = kwargs.pop('eval_size') if y_tensor_type is None: if regression: y_tensor_type = T.TensorType( theano.config.floatX, (False, False)) else: y_tensor_type = T.ivector if X_tensor_type is not None: raise ValueError( "The 'X_tensor_type' parameter has been removed. " "It's unnecessary.") # BBB if 'custom_score' in kwargs: warn("The 'custom_score' argument has been deprecated, please use " "the 'custom_scores' parameter instead, which is just " "a list of custom scores e.g.\n" "custom_scores=[('first output', lambda y1, y2: abs(y1[0,0]-y2[0,0])), ('second output', lambda y1,y2: abs(y1[0,1]-y2[0,1]))]") # add it to custom_scores if custom_scores is None: custom_scores = [kwargs.pop('custom_score')] else: custom_scores.append(kwargs.pop('custom_score')) if isinstance(layers, Layer): layers = _list([layers]) elif isinstance(layers, Iterable): layers = _list(layers) self.layers = layers self.update = update self.objective = objective self.objective_loss_function = objective_loss_function self.batch_iterator_train = batch_iterator_train self.batch_iterator_test = batch_iterator_test self.regression = regression self.max_epochs = max_epochs self.train_split = train_split self.custom_scores = custom_scores self.scores_train = scores_train or [] self.scores_valid = scores_valid or [] self.y_tensor_type = y_tensor_type self.use_label_encoder = use_label_encoder self.on_batch_finished = on_batch_finished or [] self.on_epoch_finished = on_epoch_finished or [] self.on_training_started = on_training_started or [] self.on_training_finished = on_training_finished or [] self.more_params = more_params or {} self.check_input = check_input self.verbose = verbose if self.verbose: # XXX: PrintLog should come before any other handlers, # because early stopping will otherwise cause the last # line not to be printed self.on_epoch_finished.append(PrintLog()) self.on_training_started.append(PrintLayerInfo()) for key in kwargs.keys(): assert not hasattr(self, key) vars(self).update(kwargs) self._kwarg_keys = list(kwargs.keys()) self.train_history_ = [] if 'batch_iterator' in kwargs: # BBB raise ValueError( "The 'batch_iterator' argument has been replaced. " "Use 'batch_iterator_train' and 'batch_iterator_test' instead." )
Initialize a Neural Network Parameters ---------- layers: A list of lasagne layers to compose into the final neural net. See :ref:`layer-def` update: The update function to use when training. Uses the form provided by the :mod:`lasagne.updates` implementations. objective: The objective function to use when training. The callable will be passed the NeuralNetwork's :attr:`.layers_` attribute as the first argument, and the output target as the second argument. max_epochs: The number of epochs to train. This is used as the default when calling the :meth:`.fit` method without an epochs argument. Other Parameters ---------------- batch_iterator_train: The sample iterator to use while training the network. batch_iterator_test: The sample Iterator to use while testing and validating the network. regression: Whether or not this is a regressor network. Determines the default objective and scoring functions. train_split: The method used to separate training and validation samples. See :class:`TrainSplit` for the default implementation. y_tensor_type: The type of tensor to use to hold the network's output. Typically ``T.ivector`` (the default) for classification tasks. on_training_started, on_batch_finished, on_epoch_finished, on_training_finished: A list of functions which are called during training at the corresponding times. The functions will be passed the NeuralNet as the first parameter and its :attr:`.train_history_` attribute as the second parameter. custom_scores: A list of callable custom scoring functions. The functions will be passed the expected y values as the first argument, and the predicted y_values as the second argument. use_label_encoder: If true, all y_values will be encoded using a :class:`sklearn.preprocessing.LabelEncoder` instance. verbose: The verbosity level of the network. Any non-zero value will cause the network to print the layer info at the start of training, as well as print a log of the training history after each epoch. Larger values will increase the amount of info shown. more_params: A set of more parameters to use when initializing layers defined using the dictionary method. Note ---- * Extra arguments can be passed to the call to the *update* function by prepending the string ``update_`` to the corresponding argument name, e.g. ``update_learning_rate=0.01`` will define the ``learning_rate`` parameter of the update function. * Extra arguments can be provided to the objective call through the Neural Network by prepending the string ``objective_`` to the corresponding argument name.
__init__
python
dnouri/nolearn
nolearn/lasagne/base.py
https://github.com/dnouri/nolearn/blob/master/nolearn/lasagne/base.py
MIT
def initialize(self): """Initializes the network. Checks that no extra kwargs were passed to the constructor, and compiles the train, predict, and evaluation functions. Subsequent calls to this function will return without any action. """ if getattr(self, '_initialized', False): return out = getattr(self, '_output_layers', None) if out is None: self.initialize_layers() self._check_for_unused_kwargs() iter_funcs = self._create_iter_funcs( self.layers_, self.objective, self.update, self.y_tensor_type, ) self.train_iter_, self.eval_iter_, self.predict_iter_ = iter_funcs self._initialized = True
Initializes the network. Checks that no extra kwargs were passed to the constructor, and compiles the train, predict, and evaluation functions. Subsequent calls to this function will return without any action.
initialize
python
dnouri/nolearn
nolearn/lasagne/base.py
https://github.com/dnouri/nolearn/blob/master/nolearn/lasagne/base.py
MIT
def initialize_layers(self, layers=None): """Sets up the Lasagne layers :param layers: The dictionary of layers, or a :class:`lasagne.Layers` instance, describing the underlying network :return: the output layer of the underlying lasagne network. :seealso: :ref:`layer-def` """ if layers is not None: self.layers = layers self.layers_ = Layers() #If a Layer, or a list of Layers was passed in if isinstance(self.layers[0], Layer): for out_layer in self.layers: for i, layer in enumerate(get_all_layers(out_layer)): if layer not in self.layers_.values(): name = layer.name or self._layer_name(layer.__class__, i) self.layers_[name] = layer if self._get_params_for(name) != {}: raise ValueError( "You can't use keyword params when passing a Lasagne " "instance object as the 'layers' parameter of " "'NeuralNet'." ) self._output_layers = self.layers return self.layers # 'self.layers' are a list of '(Layer class, kwargs)', so # we'll have to actually instantiate the layers given the # arguments: layer = None for i, layer_def in enumerate(self.layers): if isinstance(layer_def[1], dict): # Newer format: (Layer, {'layer': 'kwargs'}) layer_factory, layer_kw = layer_def layer_kw = layer_kw.copy() else: # The legacy format: ('name', Layer) layer_name, layer_factory = layer_def layer_kw = {'name': layer_name} if isinstance(layer_factory, str): layer_factory = locate(layer_factory) assert layer_factory is not None if 'name' not in layer_kw: layer_kw['name'] = self._layer_name(layer_factory, i) more_params = self._get_params_for(layer_kw['name']) layer_kw.update(more_params) if layer_kw['name'] in self.layers_: raise ValueError( "Two layers with name {}.".format(layer_kw['name'])) # Any layers that aren't subclasses of InputLayer are # assumed to require an 'incoming' paramter. By default, # we'll use the previous layer as input: try: is_input_layer = issubclass(layer_factory, InputLayer) except TypeError: is_input_layer = False if not is_input_layer: if 'incoming' in layer_kw: layer_kw['incoming'] = self.layers_[ layer_kw['incoming']] elif 'incomings' in layer_kw: layer_kw['incomings'] = [ self.layers_[name] for name in layer_kw['incomings']] else: layer_kw['incoming'] = layer # Deal with additional string parameters that may # reference other layers; currently only 'mask_input'. for param in self.layer_reference_params: if param in layer_kw: val = layer_kw[param] if isinstance(val, basestring): layer_kw[param] = self.layers_[val] for attr in ('W', 'b'): if isinstance(layer_kw.get(attr), str): name = layer_kw[attr] layer_kw[attr] = getattr(self.layers_[name], attr, None) try: layer_wrapper = layer_kw.pop('layer_wrapper', None) layer = layer_factory(**layer_kw) except TypeError as e: msg = ("Failed to instantiate {} with args {}.\n" "Maybe parameter names have changed?".format( layer_factory, layer_kw)) chain_exception(TypeError(msg), e) self.layers_[layer_kw['name']] = layer if layer_wrapper is not None: layer = layer_wrapper(layer) self.layers_["LW_%s" % layer_kw['name']] = layer self._output_layers = [layer] return [layer]
Sets up the Lasagne layers :param layers: The dictionary of layers, or a :class:`lasagne.Layers` instance, describing the underlying network :return: the output layer of the underlying lasagne network. :seealso: :ref:`layer-def`
initialize_layers
python
dnouri/nolearn
nolearn/lasagne/base.py
https://github.com/dnouri/nolearn/blob/master/nolearn/lasagne/base.py
MIT
def fit(self, X, y, epochs=None): """ Runs the training loop for a given number of epochs :param X: The input data :param y: The ground truth :param epochs: The number of epochs to run, if `None` runs for the network's :attr:`max_epochs` :return: This instance """ if self.check_input: X, y = self._check_good_input(X, y) if self.use_label_encoder: self.enc_ = LabelEncoder() y = self.enc_.fit_transform(y).astype(np.int32) self.classes_ = self.enc_.classes_ self.initialize() try: self.train_loop(X, y, epochs=epochs) except KeyboardInterrupt: pass return self
Runs the training loop for a given number of epochs :param X: The input data :param y: The ground truth :param epochs: The number of epochs to run, if `None` runs for the network's :attr:`max_epochs` :return: This instance
fit
python
dnouri/nolearn
nolearn/lasagne/base.py
https://github.com/dnouri/nolearn/blob/master/nolearn/lasagne/base.py
MIT
def partial_fit(self, X, y, classes=None): """ Runs a single epoch using the provided data :return: This instance """ return self.fit(X, y, epochs=1)
Runs a single epoch using the provided data :return: This instance
partial_fit
python
dnouri/nolearn
nolearn/lasagne/base.py
https://github.com/dnouri/nolearn/blob/master/nolearn/lasagne/base.py
MIT
def plot_conv_weights(layer, figsize=(6, 6)): """Plot the weights of a specific layer. Only really makes sense with convolutional layers. Parameters ---------- layer : lasagne.layers.Layer """ W = layer.W.get_value() shape = W.shape nrows = np.ceil(np.sqrt(shape[0])).astype(int) ncols = nrows for feature_map in range(shape[1]): figs, axes = plt.subplots(nrows, ncols, figsize=figsize, squeeze=False) for ax in axes.flatten(): ax.set_xticks([]) ax.set_yticks([]) ax.axis('off') for i, (r, c) in enumerate(product(range(nrows), range(ncols))): if i >= shape[0]: break axes[r, c].imshow(W[i, feature_map], cmap='gray', interpolation='none') return plt
Plot the weights of a specific layer. Only really makes sense with convolutional layers. Parameters ---------- layer : lasagne.layers.Layer
plot_conv_weights
python
dnouri/nolearn
nolearn/lasagne/visualize.py
https://github.com/dnouri/nolearn/blob/master/nolearn/lasagne/visualize.py
MIT
def plot_conv_activity(layer, x, figsize=(6, 8)): """Plot the acitivities of a specific layer. Only really makes sense with layers that work 2D data (2D convolutional layers, 2D pooling layers ...). Parameters ---------- layer : lasagne.layers.Layer x : numpy.ndarray Only takes one sample at a time, i.e. x.shape[0] == 1. """ if x.shape[0] != 1: raise ValueError("Only one sample can be plotted at a time.") # compile theano function xs = T.tensor4('xs').astype(theano.config.floatX) get_activity = theano.function([xs], get_output(layer, xs)) activity = get_activity(x) shape = activity.shape nrows = np.ceil(np.sqrt(shape[1])).astype(int) ncols = nrows figs, axes = plt.subplots(nrows + 1, ncols, figsize=figsize, squeeze=False) axes[0, ncols // 2].imshow(1 - x[0][0], cmap='gray', interpolation='none') axes[0, ncols // 2].set_title('original') for ax in axes.flatten(): ax.set_xticks([]) ax.set_yticks([]) ax.axis('off') for i, (r, c) in enumerate(product(range(nrows), range(ncols))): if i >= shape[1]: break ndim = activity[0][i].ndim if ndim != 2: raise ValueError("Wrong number of dimensions, image data should " "have 2, instead got {}".format(ndim)) axes[r + 1, c].imshow(-activity[0][i], cmap='gray', interpolation='none') return plt
Plot the acitivities of a specific layer. Only really makes sense with layers that work 2D data (2D convolutional layers, 2D pooling layers ...). Parameters ---------- layer : lasagne.layers.Layer x : numpy.ndarray Only takes one sample at a time, i.e. x.shape[0] == 1.
plot_conv_activity
python
dnouri/nolearn
nolearn/lasagne/visualize.py
https://github.com/dnouri/nolearn/blob/master/nolearn/lasagne/visualize.py
MIT
def occlusion_heatmap(net, x, target, square_length=7): """An occlusion test that checks an image for its critical parts. In this function, a square part of the image is occluded (i.e. set to 0) and then the net is tested for its propensity to predict the correct label. One should expect that this propensity shrinks of critical parts of the image are occluded. If not, this indicates overfitting. Depending on the depth of the net and the size of the image, this function may take awhile to finish, since one prediction for each pixel of the image is made. Currently, all color channels are occluded at the same time. Also, this does not really work if images are randomly distorted by the batch iterator. See paper: Zeiler, Fergus 2013 Parameters ---------- net : NeuralNet instance The neural net to test. x : np.array The input data, should be of shape (1, c, x, y). Only makes sense with image data. target : int The true value of the image. If the net makes several predictions, say 10 classes, this indicates which one to look at. square_length : int (default=7) The length of the side of the square that occludes the image. Must be an odd number. Results ------- heat_array : np.array (with same size as image) An 2D np.array that at each point (i, j) contains the predicted probability of the correct class if the image is occluded by a square with center (i, j). """ if (x.ndim != 4) or x.shape[0] != 1: raise ValueError("This function requires the input data to be of " "shape (1, c, x, y), instead got {}".format(x.shape)) if square_length % 2 == 0: raise ValueError("Square length has to be an odd number, instead " "got {}.".format(square_length)) num_classes = get_output_shape(net.layers_[-1])[1] img = x[0].copy() bs, col, s0, s1 = x.shape heat_array = np.zeros((s0, s1)) pad = square_length // 2 + 1 x_occluded = np.zeros((s1, col, s0, s1), dtype=img.dtype) probs = np.zeros((s0, s1, num_classes)) # generate occluded images for i in range(s0): # batch s1 occluded images for faster prediction for j in range(s1): x_pad = np.pad(img, ((0, 0), (pad, pad), (pad, pad)), 'constant') x_pad[:, i:i + square_length, j:j + square_length] = 0. x_occluded[j] = x_pad[:, pad:-pad, pad:-pad] y_proba = net.predict_proba(x_occluded) probs[i] = y_proba.reshape(s1, num_classes) # from predicted probabilities, pick only those of target class for i in range(s0): for j in range(s1): heat_array[i, j] = probs[i, j, target] return heat_array
An occlusion test that checks an image for its critical parts. In this function, a square part of the image is occluded (i.e. set to 0) and then the net is tested for its propensity to predict the correct label. One should expect that this propensity shrinks of critical parts of the image are occluded. If not, this indicates overfitting. Depending on the depth of the net and the size of the image, this function may take awhile to finish, since one prediction for each pixel of the image is made. Currently, all color channels are occluded at the same time. Also, this does not really work if images are randomly distorted by the batch iterator. See paper: Zeiler, Fergus 2013 Parameters ---------- net : NeuralNet instance The neural net to test. x : np.array The input data, should be of shape (1, c, x, y). Only makes sense with image data. target : int The true value of the image. If the net makes several predictions, say 10 classes, this indicates which one to look at. square_length : int (default=7) The length of the side of the square that occludes the image. Must be an odd number. Results ------- heat_array : np.array (with same size as image) An 2D np.array that at each point (i, j) contains the predicted probability of the correct class if the image is occluded by a square with center (i, j).
occlusion_heatmap
python
dnouri/nolearn
nolearn/lasagne/visualize.py
https://github.com/dnouri/nolearn/blob/master/nolearn/lasagne/visualize.py
MIT
def plot_occlusion(net, X, target, square_length=7, figsize=(9, None)): """Plot which parts of an image are particularly import for the net to classify the image correctly. See paper: Zeiler, Fergus 2013 Parameters ---------- net : NeuralNet instance The neural net to test. X : numpy.array The input data, should be of shape (b, c, 0, 1). Only makes sense with image data. target : list or numpy.array of ints The true values of the image. If the net makes several predictions, say 10 classes, this indicates which one to look at. If more than one sample is passed to X, each of them needs its own target. square_length : int (default=7) The length of the side of the square that occludes the image. Must be an odd number. figsize : tuple (int, int) Size of the figure. Plots ----- Figure with 3 subplots: the original image, the occlusion heatmap, and both images super-imposed. """ return _plot_heat_map( net, X, figsize, lambda net, X, n: occlusion_heatmap( net, X, target[n], square_length))
Plot which parts of an image are particularly import for the net to classify the image correctly. See paper: Zeiler, Fergus 2013 Parameters ---------- net : NeuralNet instance The neural net to test. X : numpy.array The input data, should be of shape (b, c, 0, 1). Only makes sense with image data. target : list or numpy.array of ints The true values of the image. If the net makes several predictions, say 10 classes, this indicates which one to look at. If more than one sample is passed to X, each of them needs its own target. square_length : int (default=7) The length of the side of the square that occludes the image. Must be an odd number. figsize : tuple (int, int) Size of the figure. Plots ----- Figure with 3 subplots: the original image, the occlusion heatmap, and both images super-imposed.
plot_occlusion
python
dnouri/nolearn
nolearn/lasagne/visualize.py
https://github.com/dnouri/nolearn/blob/master/nolearn/lasagne/visualize.py
MIT
def get_hex_color(layer_type): """ Determines the hex color for a layer. :parameters: - layer_type : string Class name of the layer :returns: - color : string containing a hex color for filling block. """ COLORS = ['#4A88B3', '#98C1DE', '#6CA2C8', '#3173A2', '#17649B', '#FFBB60', '#FFDAA9', '#FFC981', '#FCAC41', '#F29416', '#C54AAA', '#E698D4', '#D56CBE', '#B72F99', '#B0108D', '#75DF54', '#B3F1A0', '#91E875', '#5DD637', '#3FCD12'] hashed = int(hash(layer_type)) % 5 if "conv" in layer_type.lower(): return COLORS[:5][hashed] if layer_type in lasagne.layers.pool.__all__: return COLORS[5:10][hashed] if layer_type in lasagne.layers.recurrent.__all__: return COLORS[10:15][hashed] else: return COLORS[15:20][hashed]
Determines the hex color for a layer. :parameters: - layer_type : string Class name of the layer :returns: - color : string containing a hex color for filling block.
get_hex_color
python
dnouri/nolearn
nolearn/lasagne/visualize.py
https://github.com/dnouri/nolearn/blob/master/nolearn/lasagne/visualize.py
MIT
def make_pydot_graph(layers, output_shape=True, verbose=False): """ :parameters: - layers : list List of the layers, as obtained from lasagne.layers.get_all_layers - output_shape: (default `True`) If `True`, the output shape of each layer will be displayed. - verbose: (default `False`) If `True`, layer attributes like filter shape, stride, etc. will be displayed. :returns: - pydot_graph : PyDot object containing the graph """ import pydotplus as pydot pydot_graph = pydot.Dot('Network', graph_type='digraph') pydot_nodes = {} pydot_edges = [] for i, layer in enumerate(layers): layer_name = getattr(layer, 'name', None) if layer_name is None: layer_name = layer.__class__.__name__ layer_type = '{0}'.format(layer_name) key = repr(layer) label = layer_type color = get_hex_color(layer_type) if verbose: for attr in ['num_filters', 'num_units', 'ds', 'filter_shape', 'stride', 'strides', 'p']: if hasattr(layer, attr): label += '\n{0}: {1}'.format(attr, getattr(layer, attr)) if hasattr(layer, 'nonlinearity'): try: nonlinearity = layer.nonlinearity.__name__ except AttributeError: nonlinearity = layer.nonlinearity.__class__.__name__ label += '\nnonlinearity: {0}'.format(nonlinearity) if output_shape: label += '\nOutput shape: {0}'.format(layer.output_shape) pydot_nodes[key] = pydot.Node( key, label=label, shape='record', fillcolor=color, style='filled') if hasattr(layer, 'input_layers'): for input_layer in layer.input_layers: pydot_edges.append([repr(input_layer), key]) if hasattr(layer, 'input_layer'): pydot_edges.append([repr(layer.input_layer), key]) for node in pydot_nodes.values(): pydot_graph.add_node(node) for edges in pydot_edges: pydot_graph.add_edge( pydot.Edge(pydot_nodes[edges[0]], pydot_nodes[edges[1]])) return pydot_graph
:parameters: - layers : list List of the layers, as obtained from lasagne.layers.get_all_layers - output_shape: (default `True`) If `True`, the output shape of each layer will be displayed. - verbose: (default `False`) If `True`, layer attributes like filter shape, stride, etc. will be displayed. :returns: - pydot_graph : PyDot object containing the graph
make_pydot_graph
python
dnouri/nolearn
nolearn/lasagne/visualize.py
https://github.com/dnouri/nolearn/blob/master/nolearn/lasagne/visualize.py
MIT
def draw_to_file(layers, filename, **kwargs): """ Draws a network diagram to a file :parameters: - layers : list or NeuralNet instance List of layers or the neural net to draw. - filename : string The filename to save output to - **kwargs: see docstring of make_pydot_graph for other options """ layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers') else layers) dot = make_pydot_graph(layers, **kwargs) ext = filename[filename.rfind('.') + 1:] with io.open(filename, 'wb') as fid: fid.write(dot.create(format=ext))
Draws a network diagram to a file :parameters: - layers : list or NeuralNet instance List of layers or the neural net to draw. - filename : string The filename to save output to - **kwargs: see docstring of make_pydot_graph for other options
draw_to_file
python
dnouri/nolearn
nolearn/lasagne/visualize.py
https://github.com/dnouri/nolearn/blob/master/nolearn/lasagne/visualize.py
MIT
def draw_to_notebook(layers, **kwargs): """ Draws a network diagram in an IPython notebook :parameters: - layers : list or NeuralNet instance List of layers or the neural net to draw. - **kwargs : see the docstring of make_pydot_graph for other options """ from IPython.display import Image layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers') else layers) dot = make_pydot_graph(layers, **kwargs) return Image(dot.create_png())
Draws a network diagram in an IPython notebook :parameters: - layers : list or NeuralNet instance List of layers or the neural net to draw. - **kwargs : see the docstring of make_pydot_graph for other options
draw_to_notebook
python
dnouri/nolearn
nolearn/lasagne/visualize.py
https://github.com/dnouri/nolearn/blob/master/nolearn/lasagne/visualize.py
MIT
def get_real_filter(layers, img_size): """Get the real filter sizes of each layer involved in convoluation. See Xudong Cao: https://www.kaggle.com/c/datasciencebowl/forums/t/13166/happy-lantern-festival-report-and-code This does not yet take into consideration feature pooling, padding, striding and similar gimmicks. """ real_filter = np.zeros((len(layers), 2)) conv_mode = True first_conv_layer = True expon = np.ones((1, 2)) for i, layer in enumerate(layers[1:]): j = i + 1 if not conv_mode: real_filter[j] = img_size continue if is_conv2d(layer): if not first_conv_layer: new_filter = np.array(layer.filter_size) * expon real_filter[j] = new_filter else: new_filter = np.array(layer.filter_size) * expon real_filter[j] = new_filter first_conv_layer = False elif is_maxpool2d(layer): real_filter[j] = real_filter[i] expon *= np.array(layer.pool_size) else: conv_mode = False real_filter[j] = img_size real_filter[0] = img_size return real_filter
Get the real filter sizes of each layer involved in convoluation. See Xudong Cao: https://www.kaggle.com/c/datasciencebowl/forums/t/13166/happy-lantern-festival-report-and-code This does not yet take into consideration feature pooling, padding, striding and similar gimmicks.
get_real_filter
python
dnouri/nolearn
nolearn/lasagne/util.py
https://github.com/dnouri/nolearn/blob/master/nolearn/lasagne/util.py
MIT
def get_receptive_field(layers, img_size): """Get the real filter sizes of each layer involved in convoluation. See Xudong Cao: https://www.kaggle.com/c/datasciencebowl/forums/t/13166/happy-lantern-festival-report-and-code This does not yet take into consideration feature pooling, padding, striding and similar gimmicks. """ receptive_field = np.zeros((len(layers), 2)) conv_mode = True first_conv_layer = True expon = np.ones((1, 2)) for i, layer in enumerate(layers[1:]): j = i + 1 if not conv_mode: receptive_field[j] = img_size continue if is_conv2d(layer): if not first_conv_layer: last_field = receptive_field[i] new_field = (last_field + expon * (np.array(layer.filter_size) - 1)) receptive_field[j] = new_field else: receptive_field[j] = layer.filter_size first_conv_layer = False elif is_maxpool2d(layer): receptive_field[j] = receptive_field[i] expon *= np.array(layer.pool_size) else: conv_mode = False receptive_field[j] = img_size receptive_field[0] = img_size return receptive_field
Get the real filter sizes of each layer involved in convoluation. See Xudong Cao: https://www.kaggle.com/c/datasciencebowl/forums/t/13166/happy-lantern-festival-report-and-code This does not yet take into consideration feature pooling, padding, striding and similar gimmicks.
get_receptive_field
python
dnouri/nolearn
nolearn/lasagne/util.py
https://github.com/dnouri/nolearn/blob/master/nolearn/lasagne/util.py
MIT
def __init__( self, nlp: "Language", separator: str | None = "\n\n", attrs: dict[str, str] = {}, headings: list[str] = [ DocItemLabel.SECTION_HEADER, DocItemLabel.PAGE_HEADER, DocItemLabel.TITLE, ], display_table: Callable[["DataFrame"], str] | str = TABLE_PLACEHOLDER, docling_options: dict["InputFormat", "FormatOption"] | None = None, ) -> None: """Initialize the layout parser and Docling converter.""" self.nlp = nlp self.sep = separator self.attrs = Attrs( doc_layout=attrs.get("doc_layout", "layout"), doc_pages=attrs.get("doc_pages", "pages"), doc_tables=attrs.get("doc_tables", "tables"), doc_markdown=attrs.get("doc_markdown", "markdown"), span_layout=attrs.get("span_layout", "layout"), span_heading=attrs.get("span_heading", "heading"), span_data=attrs.get("span_data", "data"), span_group=attrs.get("span_group", "layout"), ) self.headings = headings self.display_table = display_table self.converter = DocumentConverter(format_options=docling_options) # Set spaCy extension attributes for custom data Doc.set_extension(self.attrs.doc_layout, default=None, force=True) Doc.set_extension(self.attrs.doc_pages, getter=self.get_pages, force=True) Doc.set_extension(self.attrs.doc_tables, getter=self.get_tables, force=True) Doc.set_extension(self.attrs.doc_markdown, default=None, force=True) Span.set_extension(self.attrs.span_layout, default=None, force=True) Span.set_extension(self.attrs.span_data, default=None, force=True) Span.set_extension(self.attrs.span_heading, getter=self.get_heading, force=True)
Initialize the layout parser and Docling converter.
__init__
python
explosion/spacy-layout
spacy_layout/layout.py
https://github.com/explosion/spacy-layout/blob/master/spacy_layout/layout.py
MIT
def __call__(self, source: str | Path | bytes | DoclingDocument) -> Doc: """Call parser on a path to create a spaCy Doc object.""" if isinstance(source, DoclingDocument): result = source else: result = self.converter.convert(self._get_source(source)).document return self._result_to_doc(result)
Call parser on a path to create a spaCy Doc object.
__call__
python
explosion/spacy-layout
spacy_layout/layout.py
https://github.com/explosion/spacy-layout/blob/master/spacy_layout/layout.py
MIT
def pipe( self, sources: ( Iterable[str | Path | bytes] | Iterable[tuple[str | Path | bytes, _AnyContext]] ), as_tuples: bool = False, ) -> Iterator[Doc] | Iterator[tuple[Doc, _AnyContext]]: """Process multiple documents and create spaCy Doc objects.""" if as_tuples: sources = cast(Iterable[tuple[str | Path | bytes, _AnyContext]], sources) data = (self._get_source(source) for source, _ in sources) contexts = (context for _, context in sources) results = self.converter.convert_all(data) for result, context in zip(results, contexts): yield (self._result_to_doc(result.document), context) else: sources = cast(Iterable[str | Path | bytes], sources) data = (self._get_source(source) for source in sources) results = self.converter.convert_all(data) for result in results: yield self._result_to_doc(result.document)
Process multiple documents and create spaCy Doc objects.
pipe
python
explosion/spacy-layout
spacy_layout/layout.py
https://github.com/explosion/spacy-layout/blob/master/spacy_layout/layout.py
MIT
def _texts_to_doc( self, inputs: list[tuple[str, DoclingItem]], pages: dict[int, PageLayout] ) -> Doc: """Convert Docling structure to spaCy Doc.""" words = [] spaces = [] span_data = [] token_idx = 0 # Tokenize the span because we can't rely on the document parsing to # give us items that are not split across token boundaries with self.nlp.select_pipes(disable=self.nlp.pipe_names): for span_doc, item in self.nlp.pipe(inputs, as_tuples=True): words += [token.text for token in span_doc] spaces += [bool(token.whitespace_) for token in span_doc] # Add separator token and don't include it in the layout span if self.sep: words.append(self.sep) spaces[-1] = False spaces.append(False) end = token_idx + len(span_doc) span_data.append((item, token_idx, end)) token_idx += len(span_doc) + (1 if self.sep else 0) doc = Doc(self.nlp.vocab, words=words, spaces=spaces) spans = [] for i, (item, start, end) in enumerate(span_data): span = Span(doc, start=start, end=end, label=item.label, span_id=i) layout = self._get_span_layout(item, pages) span._.set(self.attrs.span_layout, layout) if item.label in TABLE_ITEM_LABELS: span._.set(self.attrs.span_data, item.export_to_dataframe()) spans.append(span) doc.spans[self.attrs.span_group] = SpanGroup( doc, name=self.attrs.span_group, spans=spans ) return doc
Convert Docling structure to spaCy Doc.
_texts_to_doc
python
explosion/spacy-layout
spacy_layout/layout.py
https://github.com/explosion/spacy-layout/blob/master/spacy_layout/layout.py
MIT
def get_pages(self, doc: Doc) -> list[tuple[PageLayout, list[Span]]]: """Get all pages and their layout spans.""" layout = doc._.get(self.attrs.doc_layout) pages = {page.page_no: page for page in layout.pages} page_spans = {page.page_no: [] for page in layout.pages} for span in doc.spans[self.attrs.span_group]: span_layout = span._.get(self.attrs.span_layout) page_spans[span_layout.page_no].append(span) return [(pages[i], page_spans[i]) for i in page_spans]
Get all pages and their layout spans.
get_pages
python
explosion/spacy-layout
spacy_layout/layout.py
https://github.com/explosion/spacy-layout/blob/master/spacy_layout/layout.py
MIT
def get_heading(self, span: Span) -> Span | None: """Get the closest heading for a span.""" spans = list(span.doc.spans[self.attrs.span_group]) if span.label_ not in self.headings: # Go through previous layout spans in reverse and find first match for candidate in spans[: span.id][::-1]: if candidate.label_ in self.headings: return candidate
Get the closest heading for a span.
get_heading
python
explosion/spacy-layout
spacy_layout/layout.py
https://github.com/explosion/spacy-layout/blob/master/spacy_layout/layout.py
MIT
def get_tables(self, doc: Doc) -> list[Span]: """Get all tables in the document.""" return [ span for span in doc.spans[self.attrs.span_group] if span.label_ in TABLE_ITEM_LABELS ]
Get all tables in the document.
get_tables
python
explosion/spacy-layout
spacy_layout/layout.py
https://github.com/explosion/spacy-layout/blob/master/spacy_layout/layout.py
MIT
def encode_obj(obj: Any, chain: Callable | None = None) -> Any: """Convert custom dataclass to dict for serialization.""" if isinstance(obj, tuple(OBJ_TYPES.values())): result = dataclasses.asdict(obj) result[TYPE_ATTR] = type(obj).__name__ return result return obj if chain is None else chain(obj)
Convert custom dataclass to dict for serialization.
encode_obj
python
explosion/spacy-layout
spacy_layout/util.py
https://github.com/explosion/spacy-layout/blob/master/spacy_layout/util.py
MIT
def decode_obj(obj: Any, chain: Callable | None = None) -> Any: """Load custom dataclass from serialized dict.""" if isinstance(obj, dict) and obj.get(TYPE_ATTR) in OBJ_TYPES: obj_type = obj.pop(TYPE_ATTR) return OBJ_TYPES[obj_type].from_dict(obj) return obj if chain is None else chain(obj)
Load custom dataclass from serialized dict.
decode_obj
python
explosion/spacy-layout
spacy_layout/util.py
https://github.com/explosion/spacy-layout/blob/master/spacy_layout/util.py
MIT
def encode_df(obj: Any, chain: Callable | None = None) -> Any: """Convert pandas.DataFrame for serialization.""" if isinstance(obj, DataFrame): return {"data": obj.to_dict(), TYPE_ATTR: "DataFrame"} return obj if chain is None else chain(obj)
Convert pandas.DataFrame for serialization.
encode_df
python
explosion/spacy-layout
spacy_layout/util.py
https://github.com/explosion/spacy-layout/blob/master/spacy_layout/util.py
MIT
def decode_df(obj: Any, chain: Callable | None = None) -> Any: """Load pandas.DataFrame from serialized data.""" if isinstance(obj, dict) and obj.get(TYPE_ATTR) == "DataFrame": return DataFrame(obj["data"]) return obj if chain is None else chain(obj)
Load pandas.DataFrame from serialized data.
decode_df
python
explosion/spacy-layout
spacy_layout/util.py
https://github.com/explosion/spacy-layout/blob/master/spacy_layout/util.py
MIT
def raise_disconnected_warning( edges_removed, vertices_disconnected, disconnection_distance, total_rows, threshold=0.1, verbose=False, ): """A simple wrapper function to avoid large amounts of code repetition.""" if verbose & (vertices_disconnected == 0) & (edges_removed > 0): print( f"Disconnection_distance = {disconnection_distance} has removed {edges_removed} edges. " f"This is not a problem as no vertices were disconnected." ) elif (vertices_disconnected > 0) & ( vertices_disconnected <= threshold * total_rows ): warn( f"A few of your vertices were disconnected from the manifold. This shouldn't cause problems.\n" f"Disconnection_distance = {disconnection_distance} has removed {edges_removed} edges.\n" f"It has only fully disconnected {vertices_disconnected} vertices.\n" f"Use umap.utils.disconnected_vertices() to identify them.", ) elif vertices_disconnected > threshold * total_rows: warn( f"A large number of your vertices were disconnected from the manifold.\n" f"Disconnection_distance = {disconnection_distance} has removed {edges_removed} edges.\n" f"It has fully disconnected {vertices_disconnected} vertices.\n" f"You might consider using find_disconnected_points() to find and remove these points from your data.\n" f"Use umap.utils.disconnected_vertices() to identify them.", )
A simple wrapper function to avoid large amounts of code repetition.
raise_disconnected_warning
python
lmcinnes/umap
umap/umap_.py
https://github.com/lmcinnes/umap/blob/master/umap/umap_.py
BSD-3-Clause
def smooth_knn_dist(distances, k, n_iter=64, local_connectivity=1.0, bandwidth=1.0): """Compute a continuous version of the distance to the kth nearest neighbor. That is, this is similar to knn-distance but allows continuous k values rather than requiring an integral k. In essence we are simply computing the distance such that the cardinality of fuzzy set we generate is k. Parameters ---------- distances: array of shape (n_samples, n_neighbors) Distances to nearest neighbors for each sample. Each row should be a sorted list of distances to a given samples nearest neighbors. k: float The number of nearest neighbors to approximate for. n_iter: int (optional, default 64) We need to binary search for the correct distance value. This is the max number of iterations to use in such a search. local_connectivity: int (optional, default 1) The local connectivity required -- i.e. the number of nearest neighbors that should be assumed to be connected at a local level. The higher this value the more connected the manifold becomes locally. In practice this should be not more than the local intrinsic dimension of the manifold. bandwidth: float (optional, default 1) The target bandwidth of the kernel, larger values will produce larger return values. Returns ------- knn_dist: array of shape (n_samples,) The distance to kth nearest neighbor, as suitably approximated. nn_dist: array of shape (n_samples,) The distance to the 1st nearest neighbor for each point. """ target = np.log2(k) * bandwidth rho = np.zeros(distances.shape[0], dtype=np.float32) result = np.zeros(distances.shape[0], dtype=np.float32) mean_distances = np.mean(distances) for i in range(distances.shape[0]): lo = 0.0 hi = NPY_INFINITY mid = 1.0 # TODO: This is very inefficient, but will do for now. FIXME ith_distances = distances[i] non_zero_dists = ith_distances[ith_distances > 0.0] if non_zero_dists.shape[0] >= local_connectivity: index = int(np.floor(local_connectivity)) interpolation = local_connectivity - index if index > 0: rho[i] = non_zero_dists[index - 1] if interpolation > SMOOTH_K_TOLERANCE: rho[i] += interpolation * ( non_zero_dists[index] - non_zero_dists[index - 1] ) else: rho[i] = interpolation * non_zero_dists[0] elif non_zero_dists.shape[0] > 0: rho[i] = np.max(non_zero_dists) for n in range(n_iter): psum = 0.0 for j in range(1, distances.shape[1]): d = distances[i, j] - rho[i] if d > 0: psum += np.exp(-(d / mid)) else: psum += 1.0 if np.fabs(psum - target) < SMOOTH_K_TOLERANCE: break if psum > target: hi = mid mid = (lo + hi) / 2.0 else: lo = mid if hi == NPY_INFINITY: mid *= 2 else: mid = (lo + hi) / 2.0 result[i] = mid # TODO: This is very inefficient, but will do for now. FIXME if rho[i] > 0.0: mean_ith_distances = np.mean(ith_distances) if result[i] < MIN_K_DIST_SCALE * mean_ith_distances: result[i] = MIN_K_DIST_SCALE * mean_ith_distances else: if result[i] < MIN_K_DIST_SCALE * mean_distances: result[i] = MIN_K_DIST_SCALE * mean_distances return result, rho
Compute a continuous version of the distance to the kth nearest neighbor. That is, this is similar to knn-distance but allows continuous k values rather than requiring an integral k. In essence we are simply computing the distance such that the cardinality of fuzzy set we generate is k. Parameters ---------- distances: array of shape (n_samples, n_neighbors) Distances to nearest neighbors for each sample. Each row should be a sorted list of distances to a given samples nearest neighbors. k: float The number of nearest neighbors to approximate for. n_iter: int (optional, default 64) We need to binary search for the correct distance value. This is the max number of iterations to use in such a search. local_connectivity: int (optional, default 1) The local connectivity required -- i.e. the number of nearest neighbors that should be assumed to be connected at a local level. The higher this value the more connected the manifold becomes locally. In practice this should be not more than the local intrinsic dimension of the manifold. bandwidth: float (optional, default 1) The target bandwidth of the kernel, larger values will produce larger return values. Returns ------- knn_dist: array of shape (n_samples,) The distance to kth nearest neighbor, as suitably approximated. nn_dist: array of shape (n_samples,) The distance to the 1st nearest neighbor for each point.
smooth_knn_dist
python
lmcinnes/umap
umap/umap_.py
https://github.com/lmcinnes/umap/blob/master/umap/umap_.py
BSD-3-Clause
def nearest_neighbors( X, n_neighbors, metric, metric_kwds, angular, random_state, low_memory=True, use_pynndescent=True, n_jobs=-1, verbose=False, ): """Compute the ``n_neighbors`` nearest points for each data point in ``X`` under ``metric``. This may be exact, but more likely is approximated via nearest neighbor descent. Parameters ---------- X: array of shape (n_samples, n_features) The input data to compute the k-neighbor graph of. n_neighbors: int The number of nearest neighbors to compute for each sample in ``X``. metric: string or callable The metric to use for the computation. metric_kwds: dict Any arguments to pass to the metric computation function. angular: bool Whether to use angular rp trees in NN approximation. random_state: np.random state The random state to use for approximate NN computations. low_memory: bool (optional, default True) Whether to pursue lower memory NNdescent. verbose: bool (optional, default False) Whether to print status data during the computation. Returns ------- knn_indices: array of shape (n_samples, n_neighbors) The indices on the ``n_neighbors`` closest points in the dataset. knn_dists: array of shape (n_samples, n_neighbors) The distances to the ``n_neighbors`` closest points in the dataset. rp_forest: list of trees The random projection forest used for searching (if used, None otherwise). """ if verbose: print(ts(), "Finding Nearest Neighbors") if metric == "precomputed": # Note that this does not support sparse distance matrices yet ... # Compute indices of n nearest neighbors knn_indices = fast_knn_indices(X, n_neighbors) # knn_indices = np.argsort(X)[:, :n_neighbors] # Compute the nearest neighbor distances # (equivalent to np.sort(X)[:,:n_neighbors]) knn_dists = X[np.arange(X.shape[0])[:, None], knn_indices].copy() # Prune any nearest neighbours that are infinite distance apart. disconnected_index = knn_dists == np.inf knn_indices[disconnected_index] = -1 knn_search_index = None else: # TODO: Hacked values for now n_trees = min(64, 5 + int(round((X.shape[0]) ** 0.5 / 20.0))) n_iters = max(5, int(round(np.log2(X.shape[0])))) knn_search_index = NNDescent( X, n_neighbors=n_neighbors, metric=metric, metric_kwds=metric_kwds, random_state=random_state, n_trees=n_trees, n_iters=n_iters, max_candidates=60, low_memory=low_memory, n_jobs=n_jobs, verbose=verbose, compressed=False, ) knn_indices, knn_dists = knn_search_index.neighbor_graph if verbose: print(ts(), "Finished Nearest Neighbor Search") return knn_indices, knn_dists, knn_search_index
Compute the ``n_neighbors`` nearest points for each data point in ``X`` under ``metric``. This may be exact, but more likely is approximated via nearest neighbor descent. Parameters ---------- X: array of shape (n_samples, n_features) The input data to compute the k-neighbor graph of. n_neighbors: int The number of nearest neighbors to compute for each sample in ``X``. metric: string or callable The metric to use for the computation. metric_kwds: dict Any arguments to pass to the metric computation function. angular: bool Whether to use angular rp trees in NN approximation. random_state: np.random state The random state to use for approximate NN computations. low_memory: bool (optional, default True) Whether to pursue lower memory NNdescent. verbose: bool (optional, default False) Whether to print status data during the computation. Returns ------- knn_indices: array of shape (n_samples, n_neighbors) The indices on the ``n_neighbors`` closest points in the dataset. knn_dists: array of shape (n_samples, n_neighbors) The distances to the ``n_neighbors`` closest points in the dataset. rp_forest: list of trees The random projection forest used for searching (if used, None otherwise).
nearest_neighbors
python
lmcinnes/umap
umap/umap_.py
https://github.com/lmcinnes/umap/blob/master/umap/umap_.py
BSD-3-Clause