repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list | possible_versions
list |
---|---|---|---|---|---|
behavioral-data/multiverse | [
"82b7265de0aa3e9d229ce9f3f86b8b48435ca365",
"82b7265de0aa3e9d229ce9f3f86b8b48435ca365"
] | [
"src/models/CORAL-LM/coral/model/coral.py",
"src/models/CORAL_BART/models.py"
] | [
"# from .bert_graph import BERTGraph\n\nimport torch.nn as nn\nfrom .bert import BERT\nimport pdb\nimport torch\n\n\nclass CORAL(nn.Module):\n \"\"\"\n CORAL: Code representation learning\n \"\"\"\n\n def __init__(self, bert: BERT, n_topics=5):\n super(CORAL, self).__init__()\n\n self.n_topics = n_topics\n\n # self.bert_graph = BERTGraph(bert)\n self.bert_graph = bert\n\n self.dim_reduction = nn.Linear(bert.hidden, self.n_topics)\n\n self.reconstruction = nn.Linear(n_topics, bert.hidden, bias=False)\n\n self.spv_stage_label = nn.Linear(n_topics, 6)\n\n def forward(self, x, neg_x, segment_label, neg_segment_label, adj_mat, neg_adj_mat):\n vecs = self.bert_graph(x, segment_label, adj_mat)\n graph_vec = vecs[:, 0]\n\n topic_dist = self.dim_reduction(graph_vec)\n\n stage_vec = self.spv_stage_label(topic_dist)\n\n topic_dist = nn.Softmax(dim=1)(topic_dist)\n reconstructed_vec = self.reconstruction(topic_dist)\n\n neg_graph_vec = self.bert_graph(\n neg_x, neg_segment_label, neg_adj_mat)\n return reconstructed_vec, graph_vec, neg_graph_vec, topic_dist, stage_vec\n",
"from transformers import (BartForConditionalGeneration, BartConfig)\nfrom transformers.generation_utils import BeamHypotheses\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom src.models.CORAL_BART.utils import load_pickled_tree, separate_regions\nimport glob\nimport numpy as np\n\nimport os\nimport sys\nfrom collections import defaultdict\n\nimport logging\nimport pathlib\nlogger = logging.getLogger(__name__)\n\nroot_dir = os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))))\nsys.path.insert(0, os.path.join(root_dir, \"hyperbolics\", \"pytorch\"))\n# import hyperbolic_models\n\n\ndef mixin_with_p(a,b,p):\n assert a.shape == b.shape\n mask = np.where(np.random.random(a.shape) < p)\n new_b = np.copy(b)\n new_b[mask] = a[mask]\n return new_b\n\nclass MultiTaskBart(BartForConditionalGeneration):\n def __init__(self, config: BartConfig, span_aware_decoding=False):\n super().__init__(config)\n self.token_classifier = nn.Linear(\n config.hidden_size, config.num_labels)\n self.token_classifier_dropout = nn.Dropout(config.hidden_dropout_prob)\n\n self.span_aware_decoding = config.span_aware_decoding\n self.classification_threshold = config.classification_threshold\n\n def forward(\n self,\n input_ids,\n attention_mask=None,\n encoder_outputs=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n decoder_cached_states=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_input_logits=False,\n return_final_encoder_hidden_states=False,\n **unused,\n ):\n seq2seq_outputs = super().forward(\n input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n encoder_outputs=encoder_outputs,\n decoder_attention_mask=decoder_attention_mask,\n decoder_cached_states=decoder_cached_states,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=return_input_logits\n )\n \n to_return = [seq2seq_outputs]\n \n if not return_input_logits:\n return seq2seq_outputs\n \n encoder_final_hidden_states = seq2seq_outputs[-2]\n if return_input_logits: \n sequence_output = self.token_classifier_dropout(\n encoder_final_hidden_states)\n token_logits = self.token_classifier(sequence_output)\n to_return.append(token_logits)\n\n if return_final_encoder_hidden_states:\n to_return.append(encoder_final_hidden_states)\n \n return tuple(to_return)\n\n def classify_ids_api(self, input_ids):\n # Classifies input ids using the encoder. Also ultiumately runs\n # the decoder, so this could probably be optimized.\n _, token_logits = self(input_ids, return_input_logits=True)\n scores = torch.softmax(token_logits, axis=2)\n classes = (scores[:, :, -1] >= self.classification_threshold).int()\n return classes\n\n def _generate_beam_search(\n self,\n input_ids,\n cur_len,\n max_length,\n min_length,\n do_sample,\n early_stopping,\n temperature,\n top_k,\n top_p,\n repetition_penalty,\n no_repeat_ngram_size,\n bad_words_ids,\n pad_token_id,\n eos_token_id,\n batch_size,\n num_return_sequences,\n length_penalty,\n num_beams,\n vocab_size,\n encoder_outputs,\n attention_mask,\n use_cache,\n model_specific_kwargs,\n ):\n \"\"\" Generate sequences for each example with beam search.\n \"\"\"\n\n # generated hypotheses\n generated_hyps = [\n BeamHypotheses(num_beams, max_length, length_penalty,\n early_stopping=early_stopping)\n for _ in range(batch_size)\n ]\n\n # scores for each sentence in the beam\n beam_scores = torch.zeros(\n (batch_size, num_beams), dtype=torch.float, device=input_ids.device)\n\n # for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times\n if do_sample is False:\n beam_scores[:, 1:] = -1e9\n beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)\n\n # cache compute states\n past = (encoder_outputs, None) if encoder_outputs is not None else None\n\n # done sentences\n done = [False for _ in range(batch_size)]\n\n if self.span_aware_decoding:\n\n inserted_id = model_specific_kwargs[\"inserted_id\"]\n end_of_inserted_id = model_specific_kwargs[\"end_of_inserted_id\"]\n\n original_inputs = model_specific_kwargs[\"original_inputs\"]\n oracle_token_labels = model_specific_kwargs.get(\"oracle_token_labels\")\n oracle_mixin_p = model_specific_kwargs.get(\"oracle_mixin_p\")\n\n if oracle_token_labels is not None:\n force_tokens = np.logical_not(\n np.array(oracle_token_labels.cpu()))\n \n if oracle_mixin_p != 1.0:\n token_classification = np.logical_not(\n np.array(self.classify_ids_api(original_inputs).cpu()))\n force_tokens = mixin_with_p(force_tokens,token_classification,oracle_mixin_p)\n else:\n force_tokens = np.logical_not(\n np.array(self.classify_ids_api(original_inputs).cpu()))\n\n spans_to_force = []\n for i, tokens in enumerate(force_tokens):\n spans = separate_regions(original_inputs[i], tokens)\n spans_to_force.append(\n [np.concatenate((x.cpu().numpy(), [inserted_id])) for x in spans if not all([y==1 for y in x])])\n\n inserted_id = model_specific_kwargs[\"inserted_id\"]\n end_of_inserted_id = model_specific_kwargs[\"end_of_inserted_id\"]\n\n # Tracks which span a beam is in\n beam_span = np.zeros((batch_size, num_beams)).astype(int)\n\n # Track which token a beam needs to force in each span\n beam_span_index = np.zeros((batch_size, num_beams)).astype(int)\n\n # Track whether a beam should be doing generating wihtout constraints\n beam_is_restricted = np.tile(force_tokens[:,1],(num_beams,1)).T.astype(int)\n\n while cur_len < max_length:\n model_inputs = self.prepare_inputs_for_generation(\n input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_specific_kwargs\n )\n # (batch_size * num_beams, cur_len, vocab_size)\n outputs = self(**model_inputs)\n # (batch_size * num_beams, vocab_size)\n next_token_logits = outputs[0][:, -1, :]\n\n # if model has past, then set the past variable to speed up decoding\n if self._use_cache(outputs, use_cache):\n past = outputs[1]\n if self.config.is_encoder_decoder and do_sample is False:\n # TODO (PVP) still a bit hacky here - there might be a better solution\n next_token_logits = self.adjust_logits_during_generation(\n next_token_logits, cur_len=cur_len, max_length=max_length\n )\n\n # (batch_size * num_beams, vocab_size)\n scores = F.log_softmax(next_token_logits, dim=-1)\n\n scores = self.postprocess_next_token_scores(\n scores=scores,\n input_ids=input_ids,\n no_repeat_ngram_size=no_repeat_ngram_size,\n bad_words_ids=bad_words_ids,\n cur_len=cur_len,\n min_length=min_length,\n max_length=max_length,\n eos_token_id=eos_token_id,\n repetition_penalty=repetition_penalty,\n batch_size=batch_size,\n num_beams=num_beams,\n )\n\n assert scores.shape == (batch_size * num_beams, vocab_size), \"Shapes of scores: {} != {}\".format(\n scores.shape, (batch_size * num_beams, vocab_size)\n )\n\n if do_sample:\n _scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)\n # Temperature\n if temperature != 1.0:\n _scores = _scores / temperature\n # Top-p/top-k filtering\n _scores = top_k_top_p_filtering(\n _scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2\n ) # (batch_size * num_beams, vocab_size)\n # re-organize to group the beam together to sample from all beam_idxs\n _scores = _scores.contiguous().view(\n batch_size, num_beams * vocab_size\n ) # (batch_size, num_beams * vocab_size)\n\n # Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search)\n probs = F.softmax(_scores, dim=-1)\n next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) # (batch_size, num_beams * 2)\n # Compute next scores\n next_scores = torch.gather(_scores, -1, next_tokens) # (batch_size, num_beams * 2)\n # sort the sampled vector to make sure that the first num_beams samples are the best\n next_scores, next_scores_indices = torch.sort(next_scores, descending=True, dim=1)\n next_tokens = torch.gather(next_tokens, -1, next_scores_indices) # (batch_size, num_beams * 2)\n\n else:\n next_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)\n\n # re-organize to group the beam together (we are keeping top hypothesis accross beams)\n next_scores = next_scores.view(\n batch_size, num_beams * vocab_size\n ) # (batch_size, num_beams * vocab_size)\n\n next_scores, next_tokens = torch.topk(next_scores, 2 * num_beams, dim=1, largest=True, sorted=True)\n\n assert next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams)\n\n # next batch beam content\n next_batch_beam = []\n\n # for each sentence\n for batch_idx in range(batch_size):\n\n # if we are done with this sentence, add a pad token\n if done[batch_idx]:\n assert (\n len(generated_hyps[batch_idx]) >= num_beams\n ), \"Batch can only be done if at least {} beams have been generated\".format(num_beams)\n assert (\n eos_token_id is not None and pad_token_id is not None\n ), \"generated beams >= num_beams -> eos_token_id and pad_token have to be defined\"\n next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch\n continue\n\n # next sentence beam content, this will get added to next_batch_beam\n next_sent_beam = []\n\n assert next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams)\n # We can modify each beam's next scores here\n\n # if self.span_aware_decoding and not cur_len==1:\n # next_scores[np.where(beam_is_restricted),0] = 1\n # # next_tokens[np.where(beam_is_restricted), 0] = spans_to_force[np.where(beam_is_restricted)]\n\n # next batch beam content\n next_batch_beam = []\n\n # for each sentence\n for batch_idx in range(batch_size):\n\n # if we are done with this sentence, add a pad token\n if done[batch_idx]:\n assert (\n len(generated_hyps[batch_idx]) >= num_beams\n ), \"Batch can only be done if at least {} beams have been generated\".format(num_beams)\n assert (\n eos_token_id is not None and pad_token_id is not None\n ), \"generated beams >= num_beams -> eos_token_id and pad_token have to be defined\"\n next_batch_beam.extend(\n [(0, pad_token_id, 0)] * num_beams) # pad the batch\n continue\n\n # next sentence beam content, this will get added to next_batch_beam\n next_sent_beam = []\n \n # next tokens for this sentence\n for beam_token_rank, (beam_token_id, beam_token_score) in enumerate(\n zip(next_tokens[batch_idx], next_scores[batch_idx])\n ):\n # get beam and token IDs\n beam_id = beam_token_id // vocab_size\n token_id = beam_token_id % vocab_size\n\n # Do we want to ignore the first token?\n # if self.span_aware_decoding and cur_len>1 and not beam_took_token = [False]:\n # # if self.span_aware_decoding and not cur_len==1:\n # beam_restriced = beam_is_restricted[batch_idx, beam_id]\n # beam_span_id = beam_span[batch_idx, beam_id]\n # if beam_restriced:\n # span_to_force = spans_to_force[batch_idx][beam_span_id]\n # span_token_id = beam_span_index[batch_idx, beam_id]\n # token_to_force = span_to_force[span_token_id]\n\n # beam_took_token[beam_id] +=1\n # token_id = token_to_force\n # beam_token_score = torch.tensor(1)\n\n # # If we completed a span...\n # if span_token_id == len(span_to_force) - 1:\n # # Isn't reaching here..\n # beam_span[batch_idx, beam_id] += 1\n # beam_span_index[batch_idx, beam_id] = 0\n # beam_is_restricted[batch_idx, beam_id] = 0\n # beam_span_index[batch_idx, beam_id] += 1\n\n # # We've reached the end of a free span, and we have more spans to generate, so restrict on next run:\n # elif token_id == end_of_inserted_id and not len(spans_to_force[batch_idx][beam_span_id:]) == 0:\n # beam_is_restricted[batch_idx, beam_id] = 0\n # Token is getting set correctly but it's not getting added to the beam>\n effective_beam_id = batch_idx * num_beams + beam_id\n # add to generated hypotheses if end of sentence\n if (eos_token_id is not None) and (token_id.item() == eos_token_id):\n # if beam_token does not belong to top num_beams tokens, it should not be added\n is_beam_token_worse_than_top_num_beams = beam_token_rank >= num_beams\n if is_beam_token_worse_than_top_num_beams:\n continue\n generated_hyps[batch_idx].add(\n input_ids[effective_beam_id].clone(\n ), beam_token_score.item(),\n )\n else:\n # add next predicted token since it is not eos_token\n next_sent_beam.append(\n (beam_token_score, token_id, effective_beam_id))\n\n # once the beam for next step is full, don't add more tokens to it.\n if len(next_sent_beam) == num_beams:\n break\n\n # Check if we are done so that we can save a pad step if all(done)\n done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done(\n next_scores[batch_idx].max().item(), cur_len\n )\n\n # update next beam content\n assert len(\n next_sent_beam) == num_beams, \"Beam should always be full\"\n next_batch_beam.extend(next_sent_beam)\n assert len(next_batch_beam) == num_beams * (batch_idx +\n 1), \"We should have added num_beams each step\"\n\n # stop when we are done with each sentence\n if all(done):\n break\n\n # sanity check / prepare next batch\n # For whatever reason it feels like the forced token isn't ending up in next_batch_beam\n \n\n #Force tokens if necessary\n if self.span_aware_decoding:\n span_aware_next_beam_batch = []\n for beam_score, beam_token, effective_beam_id in next_batch_beam:\n beam_id = effective_beam_id % num_beams\n batch_idx = (effective_beam_id - beam_id) // num_beams\n \n\n beam_restriced = beam_is_restricted[batch_idx, beam_id]\n beam_span_id = beam_span[batch_idx, beam_id]\n \n #Beam is restriced and there's more work to do\n if beam_restriced and beam_span_id < len(spans_to_force[batch_idx]):\n span_to_force = spans_to_force[batch_idx][beam_span_id]\n span_token_id = beam_span_index[batch_idx, beam_id]\n token_to_force = span_to_force[span_token_id]\n \n token_id = token_to_force\n\n\n # If we completed a span...\n if span_token_id == len(span_to_force) - 1:\n # Isn't reaching here..\n beam_span[batch_idx, beam_id] += 1\n beam_span_index[batch_idx, beam_id] = 0\n beam_is_restricted[batch_idx, beam_id] = 0\n else:\n beam_span_index[batch_idx, beam_id] += 1\n span_aware_next_beam_batch.append((torch.tensor(0), token_to_force, effective_beam_id))\n else:\n span_aware_next_beam_batch.append((beam_score, beam_token, effective_beam_id))\n if beam_token == end_of_inserted_id:\n #Start forcing on the next loop:\n beam_is_restricted[batch_idx, beam_id] = 1\n\n next_batch_beam = span_aware_next_beam_batch\n\n assert len(next_batch_beam) == batch_size * num_beams\n beam_scores = beam_scores.new([x[0] for x in next_batch_beam])\n beam_tokens = input_ids.new([x[1] for x in next_batch_beam])\n beam_idx = input_ids.new(\n [x[2] for x in next_batch_beam]) # Effective beam ids\n \n # re-order batch and update current length\n input_ids = input_ids[beam_idx, :]\n input_ids = torch.cat(\n [input_ids, beam_tokens.unsqueeze(1)], dim=-1)\n\n # if cur_len > 1 and self.span_aware_decoding:\n # # Not doing this in numpy because it's 1am\n # for i in range(len(input_ids)):\n # beam_id = i % num_beams\n # batch_idx = i // num_beams\n\n # beam_restriced = beam_is_restricted[batch_idx, beam_id]\n # if beam_restriced:\n # span_to_force = spans_to_force[batch_idx][beam_span[batch_idx, beam_id]]\n # span_token_id = beam_span_index[batch_idx, beam_id]\n # token_to_force = span_to_force[span_token_id]\n # if i == 5:\n # a = 1\n # input_ids[i, -1] = int(token_to_force)\n # # beam_token_score = torch.tensor(1)\n\n # # If we completed a span...\n # if span_token_id == len(span_to_force) - 1:\n # # Isn't reaching here..\n # beam_span[batch_idx, beam_id] += 1\n # beam_span_index[batch_idx, beam_id] = 0\n # beam_is_restricted[batch_idx, beam_id] = 0\n # else:\n # beam_span_index[batch_idx, beam_id] += 1\n\n # # We've reached the end of a free span, restrict on next run:\n # elif token_id == end_of_inserted_id:\n # beam_is_restricted[batch_idx, beam_id] = 0\n # Values aren't getting set correctly\n cur_len = cur_len + 1\n\n # re-order internal states\n if past is not None:\n past = self._reorder_cache(past, beam_idx)\n\n # extend attention_mask for new generated input if only decoder\n if self.config.is_encoder_decoder is False:\n attention_mask = torch.cat(\n [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1\n )\n\n # finalize all open beam hypotheses and add to generated hypotheses\n for batch_idx in range(batch_size):\n if done[batch_idx]:\n continue\n\n # test that beam scores match previously calculated scores if not eos and batch_idx not done\n if eos_token_id is not None and all(\n (token_id % vocab_size).item() != eos_token_id for token_id in next_tokens[batch_idx]\n ):\n assert torch.all(\n next_scores[batch_idx, :num_beams] == beam_scores.view(batch_size, num_beams)[\n batch_idx]\n ), \"If batch_idx is not done, final next scores: {} have to equal to accumulated beam_scores: {}\".format(\n next_scores[:, :num_beams][batch_idx], beam_scores.view(batch_size, num_beams)[\n batch_idx],\n )\n\n # need to add best num_beams hypotheses to generated hyps\n for beam_id in range(num_beams):\n effective_beam_id = batch_idx * num_beams + beam_id\n final_score = beam_scores[effective_beam_id].item()\n final_tokens = input_ids[effective_beam_id]\n generated_hyps[batch_idx].add(final_tokens, final_score)\n\n # depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch\n output_batch_size = batch_size if do_sample else batch_size * num_return_sequences\n output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences\n\n # select the best hypotheses\n sent_lengths = input_ids.new(output_batch_size)\n best = []\n\n # retrieve best hypotheses\n for i, hypotheses in enumerate(generated_hyps):\n sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0])\n for j in range(output_num_return_sequences_per_batch):\n effective_batch_idx = output_num_return_sequences_per_batch * i + j\n best_hyp = sorted_hyps.pop()[1]\n sent_lengths[effective_batch_idx] = len(best_hyp)\n best.append(best_hyp)\n\n # shorter batches are padded\n if sent_lengths.min().item() != sent_lengths.max().item():\n assert pad_token_id is not None, \"`Pad_token_id` has to be defined\"\n sent_max_len = min(sent_lengths.max().item() + 1, max_length)\n decoded = input_ids.new(\n output_batch_size, sent_max_len).fill_(pad_token_id)\n\n # fill with hypothesis and eos_token_id if necessary\n for i, hypo in enumerate(best):\n decoded[i, : sent_lengths[i]] = hypo\n if sent_lengths[i] < max_length:\n decoded[i, sent_lengths[i]] = eos_token_id\n else:\n # none of the hypotheses have an eos_token\n assert (len(hypo) == max_length for hypo in best)\n decoded = torch.stack(best).type(torch.long).to(\n next(self.parameters()).device)\n\n return decoded\n\n\nclass HyperbolicLibraryEmbedding():\n\n def __init__(self, tree_embedding_path, tokenizer):\n self.tree_embedding_path = tree_embedding_path\n self.tree = load_pickled_tree(self.tree_embedding_path)\n self.num_nodes = self.tree.number_of_nodes()\n self.device = \"cpu\"\n self.model = self.load_model()\n self.tokenizer = tokenizer\n self.node_embeddings = self.extract_embedding()\n\n # Since we don't always id,\n self.token_embeddings = defaultdict(list)\n for node_id, data in list(self.tree.nodes(data=True)):\n euclidean_component = [\n x for x in self.node_embeddings[node_id] if x[\"space\"] == \"euclidean\"][0]\n\n node_name_input_ids = self.tokenizer(\n data[\"name\"], add_special_tokens=False)[\"input_ids\"]\n for input_id in node_name_input_ids:\n self.token_embeddings[input_id].append(\n euclidean_component[\"vector\"])\n\n self.average_token_embeddings = {k: np.mean(\n v, axis=0) for k, v in self.token_embeddings.items()}\n\n def get_last_checkpoint_path(self):\n candidate_paths = [x for x in pathlib.Path(\n self.tree_embedding_path).glob('**/*embeddings*') if x.is_file()]\n\n def get_checkpoint_epoch(x): return x.name.split(\".\")[-1]\n epochs = [get_checkpoint_epoch(x) for x in candidate_paths]\n\n if \"final\" in epochs:\n return candidate_paths[epochs.index(\"final\")]\n else:\n return candidate_paths[np.argmax([int(x) for x in epochs])]\n\n def load_model(self):\n last_checkpoint_path = self.get_last_checkpoint_path()\n logger.info(f\"Loading tree embedding from {last_checkpoint_path}\")\n return torch.load(last_checkpoint_path).to(self.device)\n\n def set_embeddings(self, model):\n embeddings = model.get_input_embeddings()\n for k, v in self.average_token_embeddings.items():\n assert v.shape[0] == embeddings.embedding_dim\n embeddings.weight.data[k] = torch.tensor(\n v).to(embeddings.weight.data.device)\n model.set_input_embeddings(embeddings)\n\n def extract_embedding(self):\n embedding = dict()\n for node in range(self.num_nodes):\n embedding[node] = []\n for hyp_factor in self.model.H:\n embedding[node].append(\n {'space': 'hyperbolic',\n 'scale': hyp_factor.scale().item(),\n 'vector': hyp_factor.w[node, :].cpu().detach().numpy()\n }\n )\n for sph_factor in self.model.S:\n embedding[node].append(\n {'space': 'spherical',\n 'scale': sph_factor.scale().item(),\n 'vector': sph_factor.w[node, :].cpu().detach().numpy()\n }\n )\n if len(self.model.E) > 0:\n embedding[node].append(\n {'space': 'euclidean',\n 'scale': 0,\n 'vector': self.model.E[0].w[node, :].cpu().detach().numpy()\n }\n )\n return embedding\n"
] | [
[
"torch.nn.Linear",
"torch.nn.Softmax"
],
[
"torch.nn.Dropout",
"torch.softmax",
"numpy.random.random",
"torch.nn.functional.softmax",
"torch.nn.functional.log_softmax",
"torch.zeros",
"torch.load",
"torch.topk",
"numpy.tile",
"torch.multinomial",
"torch.tensor",
"torch.nn.Linear",
"numpy.copy",
"numpy.mean",
"torch.sort",
"torch.gather",
"numpy.zeros",
"torch.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nikhilkharade/Python | [
"86508ceb349226e17d36a0f8be155be7efd734bc"
] | [
"machine_learning/linear_regression.py"
] | [
"\"\"\"\nLinear regression is the most basic type of regression commonly used for\npredictive analysis. The idea is pretty simple: we have a dataset and we have\nfeatures associated with it. Features should be chosen very cautiously\nas they determine how much our model will be able to make future predictions.\nWe try to set the weight of these features, over many iterations, so that they best\nfit our dataset. In this particular code, I had used a CSGO dataset (ADR vs\nRating). We try to best fit a line through dataset and estimate the parameters.\n\"\"\"\nimport requests\nimport numpy as np\n\n\ndef collect_dataset():\n \"\"\"Collect dataset of CSGO\n The dataset contains ADR vs Rating of a Player\n :return : dataset obtained from the link, as matrix\n \"\"\"\n response = requests.get(\n \"https://raw.githubusercontent.com/yashLadha/\"\n + \"The_Math_of_Intelligence/master/Week1/ADRvs\"\n + \"Rating.csv\"\n )\n lines = response.text.splitlines()\n data = []\n for item in lines:\n item = item.split(\",\")\n data.append(item)\n data.pop(0) # This is for removing the labels from the list\n dataset = np.matrix(data)\n return dataset\n\n\ndef run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta):\n \"\"\"Run steep gradient descent and updates the Feature vector accordingly_\n :param data_x : contains the dataset\n :param data_y : contains the output associated with each data-entry\n :param len_data : length of the data_\n :param alpha : Learning rate of the model\n :param theta : Feature vector (weight's for our model)\n ;param return : Updated Feature's, using\n curr_features - alpha_ * gradient(w.r.t. feature)\n \"\"\"\n n = len_data\n\n prod = np.dot(theta, data_x.transpose())\n prod -= data_y.transpose()\n sum_grad = np.dot(prod, data_x)\n theta = theta - (alpha / n) * sum_grad\n return theta\n\n\ndef sum_of_square_error(data_x, data_y, len_data, theta):\n \"\"\"Return sum of square error for error calculation\n :param data_x : contains our dataset\n :param data_y : contains the output (result vector)\n :param len_data : len of the dataset\n :param theta : contains the feature vector\n :return : sum of square error computed from given feature's\n \"\"\"\n prod = np.dot(theta, data_x.transpose())\n prod -= data_y.transpose()\n sum_elem = np.sum(np.square(prod))\n error = sum_elem / (2 * len_data)\n return error\n\n\ndef run_linear_regression(data_x, data_y):\n \"\"\"Implement Linear regression over the dataset\n :param data_x : contains our dataset\n :param data_y : contains the output (result vector)\n :return : feature for line of best fit (Feature vector)\n \"\"\"\n iterations = 100000\n alpha = 0.0001550\n\n no_features = data_x.shape[1]\n len_data = data_x.shape[0] - 1\n\n theta = np.zeros((1, no_features))\n\n for i in range(0, iterations):\n theta = run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta)\n error = sum_of_square_error(data_x, data_y, len_data, theta)\n print(\"At Iteration %d - Error is %.5f \" % (i + 1, error))\n\n return theta\n\n\ndef main():\n \"\"\" Driver function \"\"\"\n data = collect_dataset()\n\n len_data = data.shape[0]\n data_x = np.c_[np.ones(len_data), data[:, :-1]].astype(float)\n data_y = data[:, -1].astype(float)\n\n theta = run_linear_regression(data_x, data_y)\n len_result = theta.shape[1]\n print(\"Resultant Feature vector : \")\n for i in range(0, len_result):\n print(\"%.5f\" % (theta[0, i]))\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.matrix",
"numpy.dot",
"numpy.square",
"numpy.ones",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aprilnovak/openmc | [
"507c155560aab38cd6957bf9df9feca801ccd8bc"
] | [
"openmc/data/multipole.py"
] | [
"from numbers import Integral, Real\nfrom math import exp, erf, pi, sqrt\n\nimport h5py\nimport numpy as np\n\nfrom . import WMP_VERSION\nfrom .data import K_BOLTZMANN\nimport openmc.checkvalue as cv\nfrom openmc.mixin import EqualityMixin\n\n\n# Formalisms\n_FORM_MLBW = 2\n_FORM_RM = 3\n\n# Constants that determine which value to access\n_MP_EA = 0 # Pole\n\n# Reich-Moore indices\n_RM_RT = 1 # Residue total\n_RM_RA = 2 # Residue absorption\n_RM_RF = 3 # Residue fission\n\n# Multi-level Breit Wigner indices\n_MLBW_RT = 1 # Residue total\n_MLBW_RX = 2 # Residue competitive\n_MLBW_RA = 3 # Residue absorption\n_MLBW_RF = 4 # Residue fission\n\n# Polynomial fit indices\n_FIT_T = 0 # Total\n_FIT_A = 1 # Absorption\n_FIT_F = 2 # Fission\n\n\ndef _faddeeva(z):\n r\"\"\"Evaluate the complex Faddeeva function.\n\n Technically, the value we want is given by the equation:\n\n .. math::\n w(z) = \\frac{i}{\\pi} \\int_{-\\infty}^{\\infty} \\frac{1}{z - t}\n \\exp(-t^2) \\text{d}t\n\n as shown in Equation 63 from Hwang, R. N. \"A rigorous pole\n representation of multilevel cross sections and its practical\n applications.\" Nuclear Science and Engineering 96.3 (1987): 192-209.\n\n The :func:`scipy.special.wofz` function evaluates\n :math:`w(z) = \\exp(-z^2) \\text{erfc}(-iz)`. These two forms of the Faddeeva\n function are related by a transformation.\n\n If we call the integral form :math:`w_\\text{int}`, and the function form\n :math:`w_\\text{fun}`:\n\n .. math::\n w_\\text{int}(z) =\n \\begin{cases}\n w_\\text{fun}(z) & \\text{for } \\text{Im}(z) > 0\\\\\n -w_\\text{fun}(z^*)^* & \\text{for } \\text{Im}(z) < 0\n \\end{cases}\n\n Parameters\n ----------\n z : complex\n Argument to the Faddeeva function.\n\n Returns\n -------\n complex\n :math:`\\frac{i}{\\pi} \\int_{-\\infty}^{\\infty} \\frac{1}{z - t} \\exp(-t^2)\n \\text{d}t`\n\n \"\"\"\n from scipy.special import wofz\n if np.angle(z) > 0:\n return wofz(z)\n else:\n return -np.conj(wofz(z.conjugate()))\n\n\ndef _broaden_wmp_polynomials(E, dopp, n):\n r\"\"\"Evaluate Doppler-broadened windowed multipole curvefit.\n\n The curvefit is a polynomial of the form :math:`\\frac{a}{E}\n + \\frac{b}{\\sqrt{E}} + c + d \\sqrt{E} + \\ldots`\n\n Parameters\n ----------\n E : Real\n Energy to evaluate at.\n dopp : Real\n sqrt(atomic weight ratio / kT) in units of eV.\n n : Integral\n Number of components to the polynomial.\n\n Returns\n -------\n numpy.ndarray\n The value of each Doppler-broadened curvefit polynomial term.\n\n \"\"\"\n sqrtE = sqrt(E)\n beta = sqrtE * dopp\n half_inv_dopp2 = 0.5 / dopp**2\n quarter_inv_dopp4 = half_inv_dopp2**2\n\n if beta > 6.0:\n # Save time, ERF(6) is 1 to machine precision.\n # beta/sqrtpi*exp(-beta**2) is also approximately 1 machine epsilon.\n erf_beta = 1.0\n exp_m_beta2 = 0.0\n else:\n erf_beta = erf(beta)\n exp_m_beta2 = exp(-beta**2)\n\n # Assume that, for sure, we'll use a second order (1/E, 1/V, const)\n # fit, and no less.\n\n factors = np.zeros(n)\n\n factors[0] = erf_beta / E\n factors[1] = 1.0 / sqrtE\n factors[2] = (factors[0] * (half_inv_dopp2 + E)\n + exp_m_beta2 / (beta * sqrt(pi)))\n\n # Perform recursive broadening of high order components. range(1, n-2)\n # replaces a do i = 1, n-3. All indices are reduced by one due to the\n # 1-based vs. 0-based indexing.\n for i in range(1, n-2):\n if i != 1:\n factors[i+2] = (-factors[i-2] * (i - 1.0) * i * quarter_inv_dopp4\n + factors[i] * (E + (1.0 + 2.0 * i) * half_inv_dopp2))\n else:\n factors[i+2] = factors[i]*(E + (1.0 + 2.0 * i) * half_inv_dopp2)\n\n return factors\n\n\nclass WindowedMultipole(EqualityMixin):\n \"\"\"Resonant cross sections represented in the windowed multipole format.\n\n Parameters\n ----------\n formalism : {'MLBW', 'RM'}\n The R-matrix formalism used to reconstruct resonances. Either 'MLBW'\n for multi-level Breit Wigner or 'RM' for Reich-Moore.\n\n Attributes\n ----------\n num_l : Integral\n Number of possible l quantum states for this nuclide.\n fit_order : Integral\n Order of the windowed curvefit.\n fissionable : bool\n Whether or not the target nuclide has fission data.\n formalism : {'MLBW', 'RM'}\n The R-matrix formalism used to reconstruct resonances. Either 'MLBW'\n for multi-level Breit Wigner or 'RM' for Reich-Moore.\n spacing : Real\n The width of each window in sqrt(E)-space. For example, the frst window\n will end at (sqrt(start_E) + spacing)**2 and the second window at\n (sqrt(start_E) + 2*spacing)**2.\n sqrtAWR : Real\n Square root of the atomic weight ratio of the target nuclide.\n start_E : Real\n Lowest energy in eV the library is valid for.\n end_E : Real\n Highest energy in eV the library is valid for.\n data : np.ndarray\n A 2D array of complex poles and residues. data[i, 0] gives the energy\n at which pole i is located. data[i, 1:] gives the residues associated\n with the i-th pole. There are 3 residues for Reich-Moore data, one each\n for the total, absorption, and fission channels. Multi-level\n Breit Wigner data has an additional residue for the competitive channel.\n pseudo_k0RS : np.ndarray\n A 1D array of Real values. There is one value for each valid l\n quantum number. The values are equal to\n sqrt(2 m / hbar) * AWR / (AWR + 1) * r\n where m is the neutron mass, AWR is the atomic weight ratio, and r\n is the l-dependent scattering radius.\n l_value : np.ndarray\n A 1D array of Integral values equal to the l quantum number for each\n pole + 1.\n w_start : np.ndarray\n A 1D array of Integral values. w_start[i] - 1 is the index of the first\n pole in window i.\n w_end : np.ndarray\n A 1D array of Integral values. w_end[i] - 1 is the index of the last\n pole in window i.\n broaden_poly : np.ndarray\n A 1D array of boolean values indicating whether or not the polynomial\n curvefit in that window should be Doppler broadened.\n curvefit : np.ndarray\n A 3D array of Real curvefit polynomial coefficients. curvefit[i, 0, :]\n gives coefficients for the total cross section in window i.\n curvefit[i, 1, :] gives absorption coefficients and curvefit[i, 2, :]\n gives fission coefficients. The polynomial terms are increasing powers\n of sqrt(E) starting with 1/E e.g:\n a/E + b/sqrt(E) + c + d sqrt(E) + ...\n\n \"\"\"\n def __init__(self, formalism):\n self._num_l = None\n self.formalism = formalism\n self.spacing = None\n self.sqrtAWR = None\n self.start_E = None\n self.end_E = None\n self.data = None\n self.pseudo_k0RS = None\n self.l_value = None\n self.w_start = None\n self.w_end = None\n self.broaden_poly = None\n self.curvefit = None\n\n @property\n def num_l(self):\n return self._num_l\n\n @property\n def fit_order(self):\n return self.curvefit.shape[1] - 1\n\n @property\n def fissionable(self):\n if self.formalism == 'RM':\n return self.data.shape[1] == 4\n else:\n # Assume self.formalism == 'MLBW'\n return self.data.shape[1] == 5\n\n @property\n def formalism(self):\n return self._formalism\n\n @property\n def spacing(self):\n return self._spacing\n\n @property\n def sqrtAWR(self):\n return self._sqrtAWR\n\n @property\n def start_E(self):\n return self._start_E\n\n @property\n def end_E(self):\n return self._end_E\n\n @property\n def data(self):\n return self._data\n\n @property\n def pseudo_k0RS(self):\n return self._pseudo_k0RS\n\n @property\n def l_value(self):\n return self._l_value\n\n @property\n def w_start(self):\n return self._w_start\n\n @property\n def w_end(self):\n return self._w_end\n\n @property\n def broaden_poly(self):\n return self._broaden_poly\n\n @property\n def curvefit(self):\n return self._curvefit\n\n @formalism.setter\n def formalism(self, formalism):\n cv.check_type('formalism', formalism, str)\n cv.check_value('formalism', formalism, ('MLBW', 'RM'))\n self._formalism = formalism\n\n @spacing.setter\n def spacing(self, spacing):\n if spacing is not None:\n cv.check_type('spacing', spacing, Real)\n cv.check_greater_than('spacing', spacing, 0.0, equality=False)\n self._spacing = spacing\n\n @sqrtAWR.setter\n def sqrtAWR(self, sqrtAWR):\n if sqrtAWR is not None:\n cv.check_type('sqrtAWR', sqrtAWR, Real)\n cv.check_greater_than('sqrtAWR', sqrtAWR, 0.0, equality=False)\n self._sqrtAWR = sqrtAWR\n\n @start_E.setter\n def start_E(self, start_E):\n if start_E is not None:\n cv.check_type('start_E', start_E, Real)\n cv.check_greater_than('start_E', start_E, 0.0, equality=True)\n self._start_E = start_E\n\n @end_E.setter\n def end_E(self, end_E):\n if end_E is not None:\n cv.check_type('end_E', end_E, Real)\n cv.check_greater_than('end_E', end_E, 0.0, equality=False)\n self._end_E = end_E\n\n @data.setter\n def data(self, data):\n if data is not None:\n cv.check_type('data', data, np.ndarray)\n if len(data.shape) != 2:\n raise ValueError('Multipole data arrays must be 2D')\n if self.formalism == 'RM':\n if data.shape[1] not in (3, 4):\n raise ValueError('For the Reich-Moore formalism, '\n 'data.shape[1] must be 3 or 4. One value for the pole.'\n ' One each for the total and absorption residues. '\n 'Possibly one more for a fission residue.')\n else:\n # Assume self.formalism == 'MLBW'\n if data.shape[1] not in (4, 5):\n raise ValueError('For the Multi-level Breit-Wigner '\n 'formalism, data.shape[1] must be 4 or 5. One value '\n 'for the pole. One each for the total, competitive, '\n 'and absorption residues. Possibly one more for a '\n 'fission residue.')\n if not np.issubdtype(data.dtype, complex):\n raise TypeError('Multipole data arrays must be complex dtype')\n self._data = data\n\n @pseudo_k0RS.setter\n def pseudo_k0RS(self, pseudo_k0RS):\n if pseudo_k0RS is not None:\n cv.check_type('pseudo_k0RS', pseudo_k0RS, np.ndarray)\n if len(pseudo_k0RS.shape) != 1:\n raise ValueError('Multipole pseudo_k0RS arrays must be 1D')\n if not np.issubdtype(pseudo_k0RS.dtype, float):\n raise TypeError('Multipole data arrays must be float dtype')\n self._pseudo_k0RS = pseudo_k0RS\n\n @l_value.setter\n def l_value(self, l_value):\n if l_value is not None:\n cv.check_type('l_value', l_value, np.ndarray)\n if len(l_value.shape) != 1:\n raise ValueError('Multipole l_value arrays must be 1D')\n if not np.issubdtype(l_value.dtype, int):\n raise TypeError('Multipole l_value arrays must be integer'\n ' dtype')\n\n self._num_l = len(np.unique(l_value))\n\n else:\n self._num_l = None\n\n self._l_value = l_value\n\n @w_start.setter\n def w_start(self, w_start):\n if w_start is not None:\n cv.check_type('w_start', w_start, np.ndarray)\n if len(w_start.shape) != 1:\n raise ValueError('Multipole w_start arrays must be 1D')\n if not np.issubdtype(w_start.dtype, int):\n raise TypeError('Multipole w_start arrays must be integer'\n ' dtype')\n self._w_start = w_start\n\n @w_end.setter\n def w_end(self, w_end):\n if w_end is not None:\n cv.check_type('w_end', w_end, np.ndarray)\n if len(w_end.shape) != 1:\n raise ValueError('Multipole w_end arrays must be 1D')\n if not np.issubdtype(w_end.dtype, int):\n raise TypeError('Multipole w_end arrays must be integer dtype')\n self._w_end = w_end\n\n @broaden_poly.setter\n def broaden_poly(self, broaden_poly):\n if broaden_poly is not None:\n cv.check_type('broaden_poly', broaden_poly, np.ndarray)\n if len(broaden_poly.shape) != 1:\n raise ValueError('Multipole broaden_poly arrays must be 1D')\n if not np.issubdtype(broaden_poly.dtype, bool):\n raise TypeError('Multipole broaden_poly arrays must be boolean'\n ' dtype')\n self._broaden_poly = broaden_poly\n\n @curvefit.setter\n def curvefit(self, curvefit):\n if curvefit is not None:\n cv.check_type('curvefit', curvefit, np.ndarray)\n if len(curvefit.shape) != 3:\n raise ValueError('Multipole curvefit arrays must be 3D')\n if curvefit.shape[2] not in (2, 3): # sig_t, sig_a (maybe sig_f)\n raise ValueError('The third dimension of multipole curvefit'\n ' arrays must have a length of 2 or 3')\n if not np.issubdtype(curvefit.dtype, float):\n raise TypeError('Multipole curvefit arrays must be float dtype')\n self._curvefit = curvefit\n\n @classmethod\n def from_hdf5(cls, group_or_filename):\n \"\"\"Construct a WindowedMultipole object from an HDF5 group or file.\n\n Parameters\n ----------\n group_or_filename : h5py.Group or str\n HDF5 group containing multipole data. If given as a string, it is\n assumed to be the filename for the HDF5 file, and the first group is\n used to read from.\n\n Returns\n -------\n openmc.data.WindowedMultipole\n Resonant cross sections represented in the windowed multipole\n format.\n\n \"\"\"\n\n if isinstance(group_or_filename, h5py.Group):\n group = group_or_filename\n else:\n h5file = h5py.File(group_or_filename, 'r')\n try:\n version = h5file['version'].value.decode()\n except AttributeError:\n version = h5file['version'].value[0].decode()\n if version != WMP_VERSION:\n raise ValueError('The given WMP data uses version '\n + version + ' whereas your installation of the OpenMC '\n 'Python API expects version ' + WMP_VERSION)\n group = h5file['nuclide']\n\n # Read scalars.\n\n if group['formalism'].value == _FORM_MLBW:\n out = cls('MLBW')\n elif group['formalism'].value == _FORM_RM:\n out = cls('RM')\n else:\n raise ValueError('Unrecognized/Unsupported R-matrix formalism')\n\n out.spacing = group['spacing'].value\n out.sqrtAWR = group['sqrtAWR'].value\n out.start_E = group['start_E'].value\n out.end_E = group['end_E'].value\n\n # Read arrays.\n\n err = \"WMP '{}' array shape is not consistent with the '{}' array shape\"\n\n out.data = group['data'].value\n\n out.l_value = group['l_value'].value\n if out.l_value.shape[0] != out.data.shape[0]:\n raise ValueError(err.format('l_value', 'data'))\n\n out.pseudo_k0RS = group['pseudo_K0RS'].value\n if out.pseudo_k0RS.shape[0] != out.num_l:\n raise ValueError(err.format('pseudo_k0RS', 'l_value'))\n\n out.w_start = group['w_start'].value\n\n out.w_end = group['w_end'].value\n if out.w_end.shape[0] != out.w_start.shape[0]:\n raise ValueError(err.format('w_end', 'w_start'))\n\n out.broaden_poly = group['broaden_poly'].value.astype(np.bool)\n if out.broaden_poly.shape[0] != out.w_start.shape[0]:\n raise ValueError(err.format('broaden_poly', 'w_start'))\n\n out.curvefit = group['curvefit'].value\n if out.curvefit.shape[0] != out.w_start.shape[0]:\n raise ValueError(err.format('curvefit', 'w_start'))\n\n # _broaden_wmp_polynomials assumes the curve fit has at least 3 terms.\n if out.fit_order < 2:\n raise ValueError(\"Windowed multipole is only supported for \"\n \"curvefits with 3 or more terms.\")\n\n return out\n\n def _evaluate(self, E, T):\n \"\"\"Compute total, absorption, and fission cross sections.\n\n Parameters\n ----------\n E : Real\n Energy of the incident neutron in eV.\n T : Real\n Temperature of the target in K.\n\n Returns\n -------\n 3-tuple of Real\n Total, absorption, and fission microscopic cross sections at the\n given energy and temperature.\n\n \"\"\"\n\n if E < self.start_E: return (0, 0, 0)\n if E > self.end_E: return (0, 0, 0)\n\n # ======================================================================\n # Bookkeeping\n\n # Define some frequently used variables.\n sqrtkT = sqrt(K_BOLTZMANN * T)\n sqrtE = sqrt(E)\n invE = 1.0 / E\n\n # Locate us. The i_window calc omits a + 1 present in F90 because of\n # the 1-based vs. 0-based indexing. Similarly startw needs to be\n # decreased by 1. endw does not need to be decreased because\n # range(startw, endw) does not include endw.\n i_window = int(np.floor((sqrtE - sqrt(self.start_E)) / self.spacing))\n startw = self.w_start[i_window] - 1\n endw = self.w_end[i_window]\n\n # Fill in factors. Because of the unique interference dips in scatering\n # resonances, the total cross section has a special \"factor\" that does\n # not appear in the absorption and fission equations.\n if startw <= endw:\n twophi = np.zeros(self.num_l, dtype=np.float)\n sig_t_factor = np.zeros(self.num_l, dtype=np.cfloat)\n\n for iL in range(self.num_l):\n twophi[iL] = self.pseudo_k0RS[iL] * sqrtE\n if iL == 1:\n twophi[iL] = twophi[iL] - np.arctan(twophi[iL])\n elif iL == 2:\n arg = 3.0 * twophi[iL] / (3.0 - twophi[iL]**2)\n twophi[iL] = twophi[iL] - np.arctan(arg)\n elif iL == 3:\n arg = (twophi[iL] * (15.0 - twophi[iL]**2)\n / (15.0 - 6.0 * twophi[iL]**2))\n twophi[iL] = twophi[iL] - np.arctan(arg)\n\n twophi = 2.0 * twophi\n sig_t_factor = np.cos(twophi) - 1j*np.sin(twophi)\n\n # Initialize the ouptut cross sections.\n sig_t = 0.0\n sig_a = 0.0\n sig_f = 0.0\n\n # ======================================================================\n # Add the contribution from the curvefit polynomial.\n\n if sqrtkT != 0 and self.broaden_poly[i_window]:\n # Broaden the curvefit.\n dopp = self.sqrtAWR / sqrtkT\n broadened_polynomials = _broaden_wmp_polynomials(E, dopp,\n self.fit_order + 1)\n for i_poly in range(self.fit_order+1):\n sig_t += (self.curvefit[i_window, i_poly, _FIT_T]\n * broadened_polynomials[i_poly])\n sig_a += (self.curvefit[i_window, i_poly, _FIT_A]\n * broadened_polynomials[i_poly])\n if self.fissionable:\n sig_f += (self.curvefit[i_window, i_poly, _FIT_F]\n * broadened_polynomials[i_poly])\n else:\n temp = invE\n for i_poly in range(self.fit_order+1):\n sig_t += self.curvefit[i_window, i_poly, _FIT_T] * temp\n sig_a += self.curvefit[i_window, i_poly, _FIT_A] * temp\n if self.fissionable:\n sig_f += self.curvefit[i_window, i_poly, _FIT_F] * temp\n temp *= sqrtE\n\n # ======================================================================\n # Add the contribution from the poles in this window.\n\n if sqrtkT == 0.0:\n # If at 0K, use asymptotic form.\n for i_pole in range(startw, endw):\n psi_chi = -1j / (self.data[i_pole, _MP_EA] - sqrtE)\n c_temp = psi_chi / E\n if self.formalism == 'MLBW':\n sig_t += ((self.data[i_pole, _MLBW_RT] * c_temp *\n sig_t_factor[self.l_value[i_pole]-1]).real\n + (self.data[i_pole, _MLBW_RX] * c_temp).real)\n sig_a += (self.data[i_pole, _MLBW_RA] * c_temp).real\n if self.fissionable:\n sig_f += (self.data[i_pole, _MLBW_RF] * c_temp).real\n elif self.formalism == 'RM':\n sig_t += (self.data[i_pole, _RM_RT] * c_temp *\n sig_t_factor[self.l_value[i_pole]-1]).real\n sig_a += (self.data[i_pole, _RM_RA] * c_temp).real\n if self.fissionable:\n sig_f += (self.data[i_pole, _RM_RF] * c_temp).real\n else:\n raise ValueError('Unrecognized/Unsupported R-matrix'\n ' formalism')\n\n else:\n # At temperature, use Faddeeva function-based form.\n dopp = self.sqrtAWR / sqrtkT\n for i_pole in range(startw, endw):\n Z = (sqrtE - self.data[i_pole, _MP_EA]) * dopp\n w_val = _faddeeva(Z) * dopp * invE * sqrt(pi)\n if self.formalism == 'MLBW':\n sig_t += ((self.data[i_pole, _MLBW_RT] *\n sig_t_factor[self.l_value[i_pole]-1] +\n self.data[i_pole, _MLBW_RX]) * w_val).real\n sig_a += (self.data[i_pole, _MLBW_RA] * w_val).real\n if self.fissionable:\n sig_f += (self.data[i_pole, _MLBW_RF] * w_val).real\n elif self.formalism == 'RM':\n sig_t += (self.data[i_pole, _RM_RT] * w_val *\n sig_t_factor[self.l_value[i_pole]-1]).real\n sig_a += (self.data[i_pole, _RM_RA] * w_val).real\n if self.fissionable:\n sig_f += (self.data[i_pole, _RM_RF] * w_val).real\n else:\n raise ValueError('Unrecognized/Unsupported R-matrix'\n ' formalism')\n\n return sig_t, sig_a, sig_f\n\n def __call__(self, E, T):\n \"\"\"Compute total, absorption, and fission cross sections.\n\n Parameters\n ----------\n E : Real or Iterable of Real\n Energy of the incident neutron in eV.\n T : Real\n Temperature of the target in K.\n\n Returns\n -------\n 3-tuple of Real or 3-tuple of numpy.ndarray\n Total, absorption, and fission microscopic cross sections at the\n given energy and temperature.\n\n \"\"\"\n\n fun = np.vectorize(lambda x: self._evaluate(x, T))\n return fun(E)\n\n def export_to_hdf5(self, path, libver='earliest'):\n \"\"\"Export windowed multipole data to an HDF5 file.\n\n Parameters\n ----------\n path : str\n Path to write HDF5 file to\n libver : {'earliest', 'latest'}\n Compatibility mode for the HDF5 file. 'latest' will produce files\n that are less backwards compatible but have performance benefits.\n\n \"\"\"\n\n # Open file and write version.\n with h5py.File(path, 'w', libver=libver) as f:\n f.create_dataset('version', (1, ), dtype='S10')\n f['version'][:] = WMP_VERSION.encode('ASCII')\n\n # Make a nuclide group.\n g = f.create_group('nuclide')\n\n # Write scalars.\n if self.formalism == 'MLBW':\n g.create_dataset('formalism',\n data=np.array(_FORM_MLBW, dtype=np.int32))\n else:\n # Assume RM.\n g.create_dataset('formalism',\n data=np.array(_FORM_RM, dtype=np.int32))\n g.create_dataset('spacing', data=np.array(self.spacing))\n g.create_dataset('sqrtAWR', data=np.array(self.sqrtAWR))\n g.create_dataset('start_E', data=np.array(self.start_E))\n g.create_dataset('end_E', data=np.array(self.end_E))\n\n # Write arrays.\n g.create_dataset('data', data=self.data)\n g.create_dataset('l_value', data=self.l_value)\n g.create_dataset('pseudo_K0RS', data=self.pseudo_k0RS)\n g.create_dataset('w_start', data=self.w_start)\n g.create_dataset('w_end', data=self.w_end)\n g.create_dataset('broaden_poly',\n data=self.broaden_poly.astype(np.int8))\n g.create_dataset('curvefit', data=self.curvefit)\n"
] | [
[
"numpy.arctan",
"numpy.unique",
"numpy.issubdtype",
"numpy.cos",
"numpy.sin",
"scipy.special.wofz",
"numpy.angle",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
petersvenningsson/MMWCAS-RF-EVM-SP-chain | [
"d9b42f167da830a312834d757741a0d0ce3ee1fa"
] | [
"utils/load.py"
] | [
"import os\nimport json\n\nimport numpy as np\n\nfrom utils import calibrate\n\ndef read_binary(i_frame, path, config):\n \"\"\" Reads IQ sampled data from one cascade capture device.\n Returns ADC_samples: shape (chirp_samples, n_chirps, n_capture_transmitters, n_receivers)\n \"\"\"\n\n factors = [config['numSamplePerChirp'], config['numChirpsPerFrame'], config['NumRXPerDevice']]\n samples_per_frame = np.prod(factors) * 2\n\n with open(path, 'rb') as f:\n f.seek(i_frame * samples_per_frame * np.uint16(1).nbytes)\n ADC_data = np.fromfile(f, dtype=np.int16, count = samples_per_frame)\n real = ADC_data[0::2]\n imaginary = ADC_data[1::2]\n ADC_complex = real + 1j*imaginary \n\n n_chirps_per_loop = int(config['numChirpsPerFrame']/config['nchirp_loops'])\n shape = (config['NumRXPerDevice'], config['numSamplePerChirp'], n_chirps_per_loop, config['nchirp_loops'])\n data_cube = np.reshape(ADC_complex, shape, order='F').transpose((1, 3, 0, 2))\n\n return data_cube\n\n\ndef process_ADC(i_frame, files, path, config):\n \"\"\" Returns time domain data cube of i_frame. Antenna and cascade capture board parameters are \n defined by config. \n Returns ADC_samples: shape (chirp_samples, n_chirps, n_virtual_antennas)\n \"\"\"\n\n ADC_samples = []\n for chip in config['Primary/Replica']:\n filename = next(filter(lambda x: 'data' in x, files[chip]))\n ADC_samples.append(read_binary(i_frame, os.path.join(path, filename), config))\n ADC_samples = np.concatenate(ADC_samples, axis = 2)\n\n # Calibration\n if config[\"adcCalibrationOn\"]:\n ADC_samples = calibrate(ADC_samples, config)\n \n # Rx ordering\n channel_order = list(map(lambda x:x-1, config['RxForMIMOProcess']))\n ADC_samples = ADC_samples[:,:, channel_order,:]\n\n # Virtual array channel ordering\n virtual_array_shape = (config['numSamplePerChirp'], config['nchirp_loops'], config['numRxToEnable'] * config['numTxToEnable'])\n ADC_samples = ADC_samples.reshape((virtual_array_shape), order='F')\n channel_order = list(map(lambda x:x-1, config['ChannelOrder']))\n ADC_samples = ADC_samples[:,:, channel_order]\n\n return ADC_samples\n\n\ndef get_frame(i_frame, path, path_config):\n \"\"\" Returns one frame from the data capture stored in directory path.\n \"\"\"\n\n with open (path_config) as f:\n config = json.load(f)\n \n content = os.listdir(path)\n files = {}\n for chip in config['Primary/Replica']:\n files[chip] = list(filter(lambda x: chip in x, content))\n \n data_cube = process_ADC(i_frame, files, path, config)\n return data_cube\n\n\nif __name__ == '__main__':\n path = './cascade_capture_raw'\n path_config = './config/config.json'\n i_frame = 1\n get_frame(i_frame, path, path_config)\n "
] | [
[
"numpy.fromfile",
"numpy.reshape",
"numpy.concatenate",
"numpy.uint16",
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yangbincheng/EMSRDPN | [
"71421db8e255916fb0885c7237885437719d0ece"
] | [
"src/main.py"
] | [
"import torch\n\nimport utility\nimport data\nimport model\nimport loss\nfrom option import args\nfrom trainer import Trainer\n\n#torch.set_printoptions(profile=\"full\")\ntorch.manual_seed(args.seed)\ncheckpoint = utility.checkpoint(args)\n\nif args.data_test == 'video':\n from videotester import VideoTester\n model = model.Model(args, checkpoint)\n t = VideoTester(args, model, checkpoint)\n t.test()\nelse:\n if checkpoint.ok:\n loader = data.Data(args)\n model = model.Model(args, checkpoint)\n pytorch_total_params = sum(p.numel() for p in model.parameters())\n pytorch_total_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n checkpoint.write_log(\"total params: {}, total trainable params: {}\".format(pytorch_total_params, pytorch_total_trainable_params))\n \n loss = loss.Loss(args, checkpoint) if not args.test_only else None\n t = Trainer(args, loader, model, loss, checkpoint)\n\n while not t.terminate():\n t.train()\n t.test()\n\n checkpoint.done()\n\n"
] | [
[
"torch.manual_seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
isu-veritas/gammapy | [
"715b041d7d3925bd51109dc9534634263a2f2d12",
"715b041d7d3925bd51109dc9534634263a2f2d12",
"715b041d7d3925bd51109dc9534634263a2f2d12",
"715b041d7d3925bd51109dc9534634263a2f2d12",
"715b041d7d3925bd51109dc9534634263a2f2d12"
] | [
"gammapy/datasets/tests/test_flux_points.py",
"gammapy/data/observations.py",
"gammapy/data/tests/test_pointing.py",
"gammapy/estimators/tests/test_utils.py",
"gammapy/estimators/utils.py"
] | [
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom astropy.table import Table\nfrom gammapy.datasets import Datasets, FluxPointsDataset\nfrom gammapy.estimators import FluxPoints\nfrom gammapy.modeling import Fit\nfrom gammapy.modeling.models import PowerLawSpectralModel, SkyModel\nfrom gammapy.utils.scripts import make_path\nfrom gammapy.utils.testing import mpl_plot_check, requires_data, requires_dependency\n\n\[email protected]()\ndef test_meta_table(dataset):\n meta_table = dataset.meta_table\n assert meta_table[\"TELESCOP\"] == \"CTA\"\n assert meta_table[\"OBS_ID\"] == \"0001\"\n assert meta_table[\"INSTRUME\"] == \"South_Z20_50h\"\n\n\[email protected]()\ndef dataset():\n path = \"$GAMMAPY_DATA/tests/spectrum/flux_points/diff_flux_points.fits\"\n table = Table.read(make_path(path))\n table[\"e_ref\"] = table[\"e_ref\"].quantity.to(\"TeV\")\n data = FluxPoints.from_table(table, format=\"gadf-sed\")\n model = SkyModel(\n spectral_model=PowerLawSpectralModel(\n index=2.3, amplitude=\"2e-13 cm-2 s-1 TeV-1\", reference=\"1 TeV\"\n )\n )\n\n obs_table = Table()\n obs_table[\"TELESCOP\"] = [\"CTA\"]\n obs_table[\"OBS_ID\"] = [\"0001\"]\n obs_table[\"INSTRUME\"] = [\"South_Z20_50h\"]\n\n dataset = FluxPointsDataset(model, data, meta_table=obs_table)\n return dataset\n\n\n@requires_data()\ndef test_flux_point_dataset_serialization(tmp_path):\n path = \"$GAMMAPY_DATA/tests/spectrum/flux_points/diff_flux_points.fits\"\n table = Table.read(make_path(path))\n table[\"e_ref\"] = table[\"e_ref\"].quantity.to(\"TeV\")\n data = FluxPoints.from_table(table, format=\"gadf-sed\")\n\n spectral_model = PowerLawSpectralModel(\n index=2.3, amplitude=\"2e-13 cm-2 s-1 TeV-1\", reference=\"1 TeV\"\n )\n model = SkyModel(spectral_model=spectral_model, name=\"test_model\")\n dataset = FluxPointsDataset(model, data, name=\"test_dataset\")\n\n dataset2 = FluxPointsDataset.read(path, name=\"test_dataset2\")\n assert_allclose(dataset.data.dnde.data, dataset2.data.dnde.data)\n assert dataset.mask_safe.data == dataset2.mask_safe.data\n assert dataset2.name == \"test_dataset2\"\n\n Datasets([dataset]).write(\n filename=tmp_path / \"tmp_datasets.yaml\",\n filename_models=tmp_path / \"tmp_models.yaml\",\n )\n\n datasets = Datasets.read(\n filename=tmp_path / \"tmp_datasets.yaml\",\n filename_models=tmp_path / \"tmp_models.yaml\",\n )\n\n new_dataset = datasets[0]\n assert_allclose(new_dataset.data.dnde, dataset.data.dnde, 1e-4)\n if dataset.mask_fit is None:\n assert np.all(new_dataset.mask_fit == dataset.mask_safe)\n assert np.all(new_dataset.mask_safe == dataset.mask_safe)\n assert new_dataset.name == \"test_dataset\"\n\n\n@requires_data()\ndef test_flux_point_dataset_str(dataset):\n assert \"FluxPointsDataset\" in str(dataset)\n # check print if no models present\n dataset.models = None\n assert \"FluxPointsDataset\" in str(dataset)\n\n\n@requires_data()\nclass TestFluxPointFit:\n @requires_dependency(\"iminuit\")\n def test_fit_pwl_minuit(self, dataset):\n fit = Fit()\n result = fit.run(dataset)\n self.assert_result(result, dataset.models)\n\n @requires_dependency(\"sherpa\")\n def test_fit_pwl_sherpa(self, dataset):\n fit = Fit(backend=\"sherpa\", optimize_opts={\"method\": \"simplex\"})\n result = fit.optimize(datasets=[dataset])\n self.assert_result(result, dataset.models)\n\n @staticmethod\n def assert_result(result, models):\n assert result.success\n assert_allclose(result.total_stat, 25.2059, rtol=1e-3)\n\n index = models.parameters[\"index\"]\n assert_allclose(index.value, 2.216, rtol=1e-3)\n\n amplitude = models.parameters[\"amplitude\"]\n assert_allclose(amplitude.value, 2.1616e-13, rtol=1e-3)\n\n reference = models.parameters[\"reference\"]\n assert_allclose(reference.value, 1, rtol=1e-8)\n\n @staticmethod\n @requires_dependency(\"iminuit\")\n def test_stat_profile(dataset):\n fit = Fit()\n result = fit.run(datasets=dataset)\n\n model = dataset.models[0].spectral_model\n\n assert_allclose(model.amplitude.error, 1.9e-14, rtol=1e-2)\n\n model.amplitude.scan_n_values = 3\n model.amplitude.scan_n_sigma = 1\n model.amplitude.interp = \"lin\"\n\n profile = fit.stat_profile(\n datasets=dataset,\n parameter=\"amplitude\",\n )\n\n ts_diff = profile[\"stat_scan\"] - result.total_stat\n assert_allclose(\n model.amplitude.scan_values, [1.97e-13, 2.16e-13, 2.35e-13], rtol=1e-2\n )\n assert_allclose(ts_diff, [110.244116, 0.0, 110.292074], rtol=1e-2, atol=1e-7)\n\n value = model.parameters[\"amplitude\"].value\n err = model.parameters[\"amplitude\"].error\n\n model.amplitude.scan_values = np.array([value - err, value, value + err])\n profile = fit.stat_profile(\n datasets=dataset,\n parameter=\"amplitude\",\n )\n\n ts_diff = profile[\"stat_scan\"] - result.total_stat\n assert_allclose(\n model.amplitude.scan_values, [1.97e-13, 2.16e-13, 2.35e-13], rtol=1e-2\n )\n assert_allclose(ts_diff, [110.244116, 0.0, 110.292074], rtol=1e-2, atol=1e-7)\n\n @staticmethod\n @requires_dependency(\"matplotlib\")\n def test_fp_dataset_plot_fit(dataset):\n\n with mpl_plot_check():\n dataset.plot_fit(kwargs_residuals=dict(method=\"diff/model\"))\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport collections.abc\nimport copy\nimport logging\nimport numpy as np\nfrom astropy.coordinates import SkyCoord\nfrom astropy.time import Time\nfrom astropy.units import Quantity\nimport astropy.units as u\nfrom gammapy.utils.fits import LazyFitsData, earth_location_to_dict\nfrom gammapy.utils.testing import Checker\nfrom gammapy.utils.time import time_ref_to_dict, time_relative_to_ref\nfrom astropy.utils import lazyproperty\nfrom .event_list import EventList, EventListChecker\nfrom .filters import ObservationFilter\nfrom .gti import GTI\nfrom .pointing import FixedPointingInfo\n\n__all__ = [\"Observation\", \"Observations\"]\n\nlog = logging.getLogger(__name__)\n\n\nclass Observation:\n \"\"\"In-memory observation.\n\n Parameters\n ----------\n obs_id : int\n Observation id\n obs_info : dict\n Observation info dict\n aeff : `~gammapy.irf.EffectiveAreaTable2D`\n Effective area\n edisp : `~gammapy.irf.EnergyDispersion2D`\n Energy dispersion\n psf : `~gammapy.irf.PSF3D`\n Point spread function\n bkg : `~gammapy.irf.Background3D`\n Background rate model\n rad_max: `~gammapy.irf.RadMax2D`\n Only for point-like IRFs: RAD_MAX table (energy dependent RAD_MAX)\n For a fixed RAD_MAX, create a RadMax2D with a single bin.\n gti : `~gammapy.data.GTI`\n Table with GTI start and stop time\n events : `~gammapy.data.EventList`\n Event list\n obs_filter : `ObservationFilter`\n Observation filter.\n \"\"\"\n\n aeff = LazyFitsData(cache=False)\n edisp = LazyFitsData(cache=False)\n psf = LazyFitsData(cache=False)\n bkg = LazyFitsData(cache=False)\n _rad_max = LazyFitsData(cache=False)\n _events = LazyFitsData(cache=False)\n _gti = LazyFitsData(cache=False)\n\n def __init__(\n self,\n obs_id=None,\n obs_info=None,\n gti=None,\n aeff=None,\n edisp=None,\n psf=None,\n bkg=None,\n rad_max=None,\n events=None,\n obs_filter=None,\n ):\n self.obs_id = obs_id\n self.obs_info = obs_info\n self.aeff = aeff\n self.edisp = edisp\n self.psf = psf\n self.bkg = bkg\n self._rad_max = rad_max\n self._gti = gti\n self._events = events\n self.obs_filter = obs_filter or ObservationFilter()\n\n\n @property\n def rad_max(self):\n # prevent circular import\n from gammapy.irf import RadMax2D\n\n if self._rad_max is not None:\n return self._rad_max\n\n # load once to avoid trigger lazy loading it three times\n aeff = self.aeff\n if aeff is not None and aeff.is_pointlike:\n self._rad_max = RadMax2D.from_irf(aeff)\n return self._rad_max\n\n edisp = self.edisp\n if edisp is not None and edisp.is_pointlike:\n self._rad_max = RadMax2D.from_irf(self.edisp)\n\n return self._rad_max\n\n @property\n def available_irfs(self):\n \"\"\"Which irfs are available\"\"\"\n available_irf = []\n\n for irf in [\"aeff\", \"edisp\", \"psf\", \"bkg\"]:\n available = self.__dict__.get(irf, False)\n available_hdu = self.__dict__.get(f\"_{irf}_hdu\", False)\n\n if available or available_hdu:\n available_irf.append(irf)\n\n return available_irf\n\n @property\n def events(self):\n events = self.obs_filter.filter_events(self._events)\n return events\n\n @property\n def gti(self):\n gti = self.obs_filter.filter_gti(self._gti)\n return gti\n\n @staticmethod\n def _get_obs_info(pointing, deadtime_fraction, time_start, time_stop, reference_time, location):\n \"\"\"Create obs info dict from in memory data\"\"\"\n obs_info = {\n \"RA_PNT\": pointing.icrs.ra.deg,\n \"DEC_PNT\": pointing.icrs.dec.deg,\n \"DEADC\": 1 - deadtime_fraction,\n }\n obs_info.update(time_ref_to_dict(reference_time))\n obs_info['TSTART'] = time_relative_to_ref(time_start, obs_info).to_value(u.s)\n obs_info['TSTOP'] = time_relative_to_ref(time_stop, obs_info).to_value(u.s)\n\n if location is not None:\n obs_info.update(earth_location_to_dict(location))\n\n return obs_info\n\n @classmethod\n def create(\n cls,\n pointing,\n location=None,\n obs_id=0,\n livetime=None,\n tstart=None,\n tstop=None,\n irfs=None,\n deadtime_fraction=0.0,\n reference_time=Time(\"2000-01-01 00:00:00\"),\n ):\n \"\"\"Create an observation.\n\n User must either provide the livetime, or the start and stop times.\n\n Parameters\n ----------\n pointing : `~astropy.coordinates.SkyCoord`\n Pointing position\n obs_id : int\n Observation ID as identifier\n livetime : ~astropy.units.Quantity`\n Livetime exposure of the simulated observation\n tstart: `~astropy.time.Time` or `~astropy.units.Quantity`\n Start time of observation as `~astropy.time.Time` or duration\n relative to `reference_time`\n tstop: `astropy.time.Time` or `~astropy.units.Quantity`\n Stop time of observation as `~astropy.time.Time` or duration\n relative to `reference_time`\n irfs: dict\n IRFs used for simulating the observation: `bkg`, `aeff`, `psf`, `edisp`\n deadtime_fraction : float, optional\n Deadtime fraction, defaults to 0\n reference_time : `~astropy.time.Time`\n the reference time to use in GTI definition\n\n Returns\n -------\n obs : `gammapy.data.MemoryObservation`\n \"\"\"\n if tstart is None:\n tstart = reference_time.copy()\n\n if tstop is None:\n tstop = tstart + Quantity(livetime)\n\n gti = GTI.create(tstart, tstop, reference_time=reference_time)\n\n obs_info = cls._get_obs_info(\n pointing=pointing,\n deadtime_fraction=deadtime_fraction,\n time_start=gti.time_start[0],\n time_stop=gti.time_stop[0],\n reference_time=reference_time,\n location=location,\n )\n\n return cls(\n obs_id=obs_id,\n obs_info=obs_info,\n gti=gti,\n aeff=irfs.get(\"aeff\"),\n bkg=irfs.get(\"bkg\"),\n edisp=irfs.get(\"edisp\"),\n psf=irfs.get(\"psf\"),\n )\n\n @property\n def tstart(self):\n \"\"\"Observation start time (`~astropy.time.Time`).\"\"\"\n return self.gti.time_start[0]\n\n @property\n def tstop(self):\n \"\"\"Observation stop time (`~astropy.time.Time`).\"\"\"\n return self.gti.time_stop[0]\n\n @property\n def observation_time_duration(self):\n \"\"\"Observation time duration in seconds (`~astropy.units.Quantity`).\n\n The wall time, including dead-time.\n \"\"\"\n return self.gti.time_sum\n\n @property\n def observation_live_time_duration(self):\n \"\"\"Live-time duration in seconds (`~astropy.units.Quantity`).\n\n The dead-time-corrected observation time.\n\n Computed as ``t_live = t_observation * (1 - f_dead)``\n where ``f_dead`` is the dead-time fraction.\n \"\"\"\n return self.observation_time_duration * (\n 1 - self.observation_dead_time_fraction\n )\n\n @property\n def observation_dead_time_fraction(self):\n \"\"\"Dead-time fraction (float).\n\n Defined as dead-time over observation time.\n\n Dead-time is defined as the time during the observation\n where the detector didn't record events:\n https://en.wikipedia.org/wiki/Dead_time\n https://ui.adsabs.harvard.edu/abs/2004APh....22..285F\n\n The dead-time fraction is used in the live-time computation,\n which in turn is used in the exposure and flux computation.\n \"\"\"\n return 1 - self.obs_info[\"DEADC\"]\n\n @lazyproperty\n def fixed_pointing_info(self):\n \"\"\"Fixed pointing info for this observation (`FixedPointingInfo`).\"\"\"\n meta = self.obs_info.copy() if self.obs_info is not None else {}\n if self.events is not None:\n meta.update(self.events.table.meta)\n return FixedPointingInfo(meta)\n\n @property\n def pointing_radec(self):\n \"\"\"Pointing RA / DEC sky coordinates (`~astropy.coordinates.SkyCoord`).\"\"\"\n return self.fixed_pointing_info.radec\n\n @property\n def pointing_altaz(self):\n return self.fixed_pointing_info.altaz\n\n @property\n def pointing_zen(self):\n \"\"\"Pointing zenith angle sky (`~astropy.units.Quantity`).\"\"\"\n return self.fixed_pointing_info.altaz.zen\n\n @property\n def observatory_earth_location(self):\n \"\"\"Observatory location (`~astropy.coordinates.EarthLocation`).\"\"\"\n return self.fixed_pointing_info.location\n\n @lazyproperty\n def target_radec(self):\n \"\"\"Target RA / DEC sky coordinates (`~astropy.coordinates.SkyCoord`).\"\"\"\n lon, lat = (\n self.obs_info.get(\"RA_OBJ\", np.nan),\n self.obs_info.get(\"DEC_OBJ\", np.nan),\n )\n return SkyCoord(lon, lat, unit=\"deg\", frame=\"icrs\")\n\n @property\n def muoneff(self):\n \"\"\"Observation muon efficiency.\"\"\"\n return self.obs_info.get(\"MUONEFF\", 1)\n\n def __str__(self):\n ra = self.pointing_radec.ra.deg\n dec = self.pointing_radec.dec.deg\n\n pointing = f\"{ra:.1f} deg, {dec:.1f} deg\\n\"\n # TODO: Which target was observed?\n # TODO: print info about available HDUs for this observation ...\n return (\n f\"{self.__class__.__name__}\\n\\n\"\n f\"\\tobs id : {self.obs_id} \\n \"\n f\"\\ttstart : {self.tstart.mjd:.2f}\\n\"\n f\"\\ttstop : {self.tstop.mjd:.2f}\\n\"\n f\"\\tduration : {self.observation_time_duration:.2f}\\n\"\n f\"\\tpointing (icrs) : {pointing}\\n\"\n f\"\\tdeadtime fraction : {self.observation_dead_time_fraction:.1%}\\n\"\n )\n\n def check(self, checks=\"all\"):\n \"\"\"Run checks.\n\n This is a generator that yields a list of dicts.\n \"\"\"\n checker = ObservationChecker(self)\n return checker.run(checks=checks)\n\n def peek(self, figsize=(12, 10)):\n \"\"\"Quick-look plots in a few panels.\n\n Parameters\n ----------\n figsize : tuple\n Figure size\n \"\"\"\n import matplotlib.pyplot as plt\n\n n_irfs = len(self.available_irfs)\n\n fig, axes = plt.subplots(\n nrows=n_irfs // 2,\n ncols=2 + n_irfs % 2,\n figsize=figsize,\n gridspec_kw={\"wspace\": 0.25, \"hspace\": 0.25},\n )\n\n axes_dict = dict(zip(self.available_irfs, axes.flatten()))\n\n if \"aeff\" in self.available_irfs:\n self.aeff.plot(ax=axes_dict[\"aeff\"])\n axes_dict[\"aeff\"].set_title(\"Effective area\")\n\n if \"bkg\" in self.available_irfs:\n bkg = self.bkg\n\n if not bkg.has_offset_axis:\n bkg = bkg.to_2d()\n\n bkg.plot(ax=axes_dict[\"bkg\"])\n axes_dict[\"bkg\"].set_title(\"Background rate\")\n else:\n logging.warning(f\"No background model found for obs {self.obs_id}.\")\n\n if \"psf\" in self.available_irfs:\n self.psf.plot_containment_radius_vs_energy(ax=axes_dict[\"psf\"])\n axes_dict[\"psf\"].set_title(\"Point spread function\")\n else:\n logging.warning(f\"No PSF found for obs {self.obs_id}.\")\n\n if \"edisp\" in self.available_irfs:\n self.edisp.plot_bias(ax=axes_dict[\"edisp\"], add_cbar=True)\n axes_dict[\"edisp\"].set_title(\"Energy dispersion\")\n else:\n logging.warning(f\"No energy dispersion found for obs {self.obs_id}.\")\n\n def select_time(self, time_interval):\n \"\"\"Select a time interval of the observation.\n\n Parameters\n ----------\n time_interval : `astropy.time.Time`\n Start and stop time of the selected time interval.\n For now we only support a single time interval.\n\n Returns\n -------\n new_obs : `~gammapy.data.Observation`\n A new observation instance of the specified time interval\n \"\"\"\n new_obs_filter = self.obs_filter.copy()\n new_obs_filter.time_filter = time_interval\n obs = copy.deepcopy(self)\n obs.obs_filter = new_obs_filter\n return obs\n\n @classmethod\n def read(cls, event_file, irf_file=None):\n \"\"\"Create an Observation from a Event List and an (optional) IRF file.\n\n Parameters\n ----------\n event_file : str, Path\n path to the .fits file containing the event list and the GTI\n irf_file : str, Path\n (optional) path to the .fits file containing the IRF components,\n if not provided the IRF will be read from the event file\n\n Returns\n -------\n observation : `~gammapy.data.Observation`\n observation with the events and the irf read from the file\n \"\"\"\n from gammapy.irf.io import load_irf_dict_from_file\n\n events = EventList.read(event_file)\n\n gti = GTI.read(event_file)\n\n irf_file = irf_file if irf_file is not None else event_file\n irf_dict = load_irf_dict_from_file(irf_file)\n\n obs_info = events.table.meta\n return cls(\n events=events,\n gti=gti,\n obs_info=obs_info,\n obs_id=obs_info.get(\"OBS_ID\"),\n **irf_dict,\n )\n\n\nclass Observations(collections.abc.MutableSequence):\n \"\"\"Container class that holds a list of observations.\n\n Parameters\n ----------\n observations : list\n A list of `~gammapy.data.Observation`\n \"\"\"\n\n def __init__(self, observations=None):\n self._observations = observations or []\n\n def __getitem__(self, key):\n return self._observations[self.index(key)]\n\n def __delitem__(self, key):\n del self._observations[self.index(key)]\n\n def __setitem__(self, key, obs):\n if isinstance(obs, Observation):\n self._observations[self.index(key)] = obs\n else:\n raise TypeError(f\"Invalid type: {type(obs)!r}\")\n\n def insert(self, idx, obs):\n if isinstance(obs, Observation):\n self._observations.insert(idx, obs)\n else:\n raise TypeError(f\"Invalid type: {type(obs)!r}\")\n\n def __len__(self):\n return len(self._observations)\n\n def __str__(self):\n s = self.__class__.__name__ + \"\\n\"\n s += \"Number of observations: {}\\n\".format(len(self))\n for obs in self:\n s += str(obs)\n return s\n\n def index(self, key):\n if isinstance(key, (int, slice)):\n return key\n elif isinstance(key, str):\n return self.ids.index(key)\n elif isinstance(key, Observation):\n return self._observations.index(key)\n else:\n raise TypeError(f\"Invalid type: {type(key)!r}\")\n\n @property\n def ids(self):\n \"\"\"List of obs IDs (`list`)\"\"\"\n return [str(obs.obs_id) for obs in self]\n\n def select_time(self, time_intervals):\n \"\"\"Select a time interval of the observations.\n\n Parameters\n ----------\n time_intervals : `astropy.time.Time` or list of `astropy.time.Time`\n list of Start and stop time of the time intervals or one Time interval\n\n Returns\n -------\n new_observations : `~gammapy.data.Observations`\n A new Observations instance of the specified time intervals\n \"\"\"\n new_obs_list = []\n if isinstance(time_intervals, Time):\n time_intervals = [time_intervals]\n\n for time_interval in time_intervals:\n for obs in self:\n if (obs.tstart < time_interval[1]) & (obs.tstop > time_interval[0]):\n new_obs = obs.select_time(time_interval)\n new_obs_list.append(new_obs)\n\n return self.__class__(new_obs_list)\n\n def _ipython_key_completions_(self):\n return self.ids\n\n\nclass ObservationChecker(Checker):\n \"\"\"Check an observation.\n\n Checks data format and a bit about the content.\n \"\"\"\n\n CHECKS = {\n \"events\": \"check_events\",\n \"gti\": \"check_gti\",\n \"aeff\": \"check_aeff\",\n \"edisp\": \"check_edisp\",\n \"psf\": \"check_psf\",\n }\n\n def __init__(self, observation):\n self.observation = observation\n\n def _record(self, level=\"info\", msg=None):\n return {\"level\": level, \"obs_id\": self.observation.obs_id, \"msg\": msg}\n\n def check_events(self):\n yield self._record(level=\"debug\", msg=\"Starting events check\")\n\n try:\n events = self.observation.events\n except Exception:\n yield self._record(level=\"warning\", msg=\"Loading events failed\")\n return\n\n yield from EventListChecker(events).run()\n\n # TODO: split this out into a GTIChecker\n def check_gti(self):\n yield self._record(level=\"debug\", msg=\"Starting gti check\")\n\n try:\n gti = self.observation.gti\n except Exception:\n yield self._record(level=\"warning\", msg=\"Loading GTI failed\")\n return\n\n if len(gti.table) == 0:\n yield self._record(level=\"error\", msg=\"GTI table has zero rows\")\n\n columns_required = [\"START\", \"STOP\"]\n for name in columns_required:\n if name not in gti.table.colnames:\n yield self._record(level=\"error\", msg=f\"Missing table column: {name!r}\")\n\n # TODO: Check that header keywords agree with table entries\n # TSTART, TSTOP, MJDREFI, MJDREFF\n\n # Check that START and STOP times are consecutive\n # times = np.ravel(self.table['START'], self.table['STOP'])\n # # TODO: not sure this is correct ... add test with a multi-gti table from Fermi.\n # if not np.all(np.diff(times) >= 0):\n # yield 'GTIs are not consecutive or sorted.'\n\n # TODO: add reference times for all instruments and check for this\n # Use TELESCOP header key to check which instrument it is.\n def _check_times(self):\n \"\"\"Check if various times are consistent.\n\n The headers and tables of the FITS EVENTS and GTI extension\n contain various observation and event time information.\n \"\"\"\n # http://fermi.gsfc.nasa.gov/ssc/data/analysis/documentation/Cicerone/Cicerone_Data/Time_in_ScienceTools.html\n # https://hess-confluence.desy.de/confluence/display/HESS/HESS+FITS+data+-+References+and+checks#HESSFITSdata-Referencesandchecks-Time\n telescope_met_refs = {\n \"FERMI\": Time(\"2001-01-01T00:00:00\"),\n \"HESS\": Time(\"2001-01-01T00:00:00\"),\n }\n\n meta = self.dset.event_list.table.meta\n telescope = meta[\"TELESCOP\"]\n\n if telescope in telescope_met_refs.keys():\n dt = self.time_ref - telescope_met_refs[telescope]\n if dt > self.accuracy[\"time\"]:\n yield self._record(\n level=\"error\", msg=\"Reference time incorrect for telescope\"\n )\n\n def check_aeff(self):\n yield self._record(level=\"debug\", msg=\"Starting aeff check\")\n\n try:\n aeff = self.observation.aeff\n except Exception:\n yield self._record(level=\"warning\", msg=\"Loading aeff failed\")\n return\n\n # Check that thresholds are meaningful for aeff\n if (\n \"LO_THRES\" in aeff.meta\n and \"HI_THRES\" in aeff.meta\n and aeff.meta[\"LO_THRES\"] >= aeff.meta[\"HI_THRES\"]\n ):\n yield self._record(\n level=\"error\", msg=\"LO_THRES >= HI_THRES in effective area meta data\"\n )\n\n # Check that data isn't all null\n if np.max(aeff.data.data) <= 0:\n yield self._record(\n level=\"error\", msg=\"maximum entry of effective area is <= 0\"\n )\n\n def check_edisp(self):\n yield self._record(level=\"debug\", msg=\"Starting edisp check\")\n\n try:\n edisp = self.observation.edisp\n except Exception:\n yield self._record(level=\"warning\", msg=\"Loading edisp failed\")\n return\n\n # Check that data isn't all null\n if np.max(edisp.data.data) <= 0:\n yield self._record(level=\"error\", msg=\"maximum entry of edisp is <= 0\")\n\n def check_psf(self):\n yield self._record(level=\"debug\", msg=\"Starting psf check\")\n\n try:\n self.observation.psf\n except Exception:\n yield self._record(level=\"warning\", msg=\"Loading psf failed\")\n return\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport logging\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom astropy.time import Time\nfrom gammapy.data import FixedPointingInfo, PointingInfo\nfrom gammapy.utils.testing import assert_time_allclose, requires_data\n\n\n@requires_data()\nclass TestFixedPointingInfo:\n @classmethod\n def setup_class(cls):\n filename = \"$GAMMAPY_DATA/tests/pointing_table.fits.gz\"\n cls.fpi = FixedPointingInfo.read(filename)\n\n def test_location(self):\n lon, lat, height = self.fpi.location.geodetic\n assert_allclose(lon.deg, 16.5002222222222)\n assert_allclose(lat.deg, -23.2717777777778)\n assert_allclose(height.value, 1834.999999999783)\n\n def test_time_ref(self):\n expected = Time(51910.00074287037, format=\"mjd\", scale=\"tt\")\n assert_time_allclose(self.fpi.time_ref, expected)\n\n def test_time_start(self):\n time = self.fpi.time_start\n expected = Time(53025.826414166666, format=\"mjd\", scale=\"tt\")\n assert_time_allclose(time, expected)\n\n def test_time_stop(self):\n time = self.fpi.time_stop\n expected = Time(53025.844770648146, format=\"mjd\", scale=\"tt\")\n assert_time_allclose(time, expected)\n\n def test_duration(self):\n duration = self.fpi.duration\n assert_allclose(duration.sec, 1586.0000000044238)\n\n def test_radec(self):\n pos = self.fpi.radec\n assert_allclose(pos.ra.deg, 83.633333333333)\n assert_allclose(pos.dec.deg, 24.51444444)\n assert pos.name == \"icrs\"\n\n def test_altaz(self):\n pos = self.fpi.altaz\n assert_allclose(pos.az.deg, 7.48272)\n assert_allclose(pos.alt.deg, 41.84191)\n assert pos.name == \"altaz\"\n\n\n@requires_data()\nclass TestPointingInfo:\n @classmethod\n def setup_class(cls):\n filename = \"$GAMMAPY_DATA/tests/pointing_table.fits.gz\"\n cls.pointing_info = PointingInfo.read(filename)\n\n def test_str(self):\n ss = str(self.pointing_info)\n assert \"Pointing info\" in ss\n\n def test_location(self):\n lon, lat, height = self.pointing_info.location.geodetic\n assert_allclose(lon.deg, 16.5002222222222)\n assert_allclose(lat.deg, -23.2717777777778)\n assert_allclose(height.value, 1834.999999999783)\n\n def test_time_ref(self):\n expected = Time(51910.00074287037, format=\"mjd\", scale=\"tt\")\n assert_time_allclose(self.pointing_info.time_ref, expected)\n\n def test_table(self):\n assert len(self.pointing_info.table) == 100\n\n def test_time(self):\n time = self.pointing_info.time\n assert len(time) == 100\n expected = Time(53025.826414166666, format=\"mjd\", scale=\"tt\")\n assert_time_allclose(time[0], expected)\n\n def test_duration(self):\n duration = self.pointing_info.duration\n assert_allclose(duration.sec, 1586.0000000044238)\n\n def test_radec(self):\n pos = self.pointing_info.radec[0]\n assert_allclose(pos.ra.deg, 83.633333333333)\n assert_allclose(pos.dec.deg, 24.51444444)\n assert pos.name == \"icrs\"\n\n def test_altaz(self):\n pos = self.pointing_info.altaz[0]\n assert_allclose(pos.az.deg, 11.45751357)\n assert_allclose(pos.alt.deg, 41.34088901)\n assert pos.name == \"altaz\"\n\n def test_altaz_from_table(self):\n pos = self.pointing_info.altaz_from_table[0]\n assert_allclose(pos.az.deg, 11.20432353385406)\n assert_allclose(pos.alt.deg, 41.37921408774436)\n assert pos.name == \"altaz\"\n\n def test_altaz_interpolate(self):\n time = self.pointing_info.time[0]\n pos = self.pointing_info.altaz_interpolate(time)\n assert_allclose(pos.az.deg, 11.45751357)\n assert_allclose(pos.alt.deg, 41.34088901)\n assert pos.name == \"altaz\"\n\n\n\ndef test_altaz_without_location(caplog):\n meta = {'ALT_PNT': 20.0, 'AZ_PNT': 170.0}\n pointing = FixedPointingInfo(meta)\n\n with caplog.at_level(logging.WARNING):\n altaz = pointing.altaz\n assert altaz.alt.deg == 20.0\n assert altaz.az.deg == 170.0\n\n\n pointing = FixedPointingInfo({})\n\n with caplog.at_level(logging.WARNING):\n altaz = pointing.altaz\n assert np.isnan(altaz.alt.value)\n assert np.isnan(altaz.az.value)\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom gammapy.estimators.utils import find_peaks, resample_energy_edges\nfrom gammapy.maps import Map, MapAxis\n\n\nclass TestFindPeaks:\n def test_simple(self):\n \"\"\"Test a simple example\"\"\"\n image = Map.create(npix=(10, 5), unit=\"s\")\n image.data[3, 3] = 11\n image.data[3, 4] = 10\n image.data[3, 5] = 12\n image.data[3, 6] = np.nan\n image.data[0, 9] = 1e20\n\n table = find_peaks(image, threshold=3)\n\n assert len(table) == 3\n assert table[\"value\"].unit == \"s\"\n assert table[\"ra\"].unit == \"deg\"\n assert table[\"dec\"].unit == \"deg\"\n\n row = table[0]\n assert tuple((row[\"x\"], row[\"y\"])) == (9, 0)\n assert_allclose(row[\"value\"], 1e20)\n assert_allclose(row[\"ra\"], 359.55)\n assert_allclose(row[\"dec\"], -0.2)\n\n row = table[1]\n assert tuple((row[\"x\"], row[\"y\"])) == (5, 3)\n assert_allclose(row[\"value\"], 12)\n\n def test_no_peak(self):\n image = Map.create(npix=(10, 5))\n image.data[3, 5] = 12\n\n table = find_peaks(image, threshold=12.1)\n assert len(table) == 0\n\n def test_constant(self):\n image = Map.create(npix=(10, 5))\n\n table = find_peaks(image, threshold=3)\n assert len(table) == 0\n\n def test_flat_map(self):\n \"\"\"Test a simple example\"\"\"\n axis1 = MapAxis.from_edges([1, 2], name=\"axis1\")\n axis2 = MapAxis.from_edges([9, 10], name=\"axis2\")\n image = Map.create(npix=(10, 5), unit=\"s\", axes=[axis1, axis2])\n image.data[..., 3, 3] = 11\n image.data[..., 3, 4] = 10\n image.data[..., 3, 5] = 12\n image.data[..., 3, 6] = np.nan\n image.data[..., 0, 9] = 1e20\n\n table = find_peaks(image, threshold=3)\n row = table[0]\n\n assert len(table) == 3\n assert_allclose(row[\"value\"], 1e20)\n assert_allclose(row[\"ra\"], 359.55)\n assert_allclose(row[\"dec\"], -0.2)\n\n\ndef test_resample_energy_edges(spectrum_dataset):\n resampled_energy_edges = resample_energy_edges(spectrum_dataset, conditions={})\n assert (resampled_energy_edges == spectrum_dataset._geom.axes[\"energy\"].edges).all()\n\n with pytest.raises(ValueError):\n resample_energy_edges(\n spectrum_dataset,\n conditions={\"counts_min\": spectrum_dataset.counts.data.sum() + 1},\n )\n\n resampled_energy_edges = resample_energy_edges(\n spectrum_dataset,\n conditions={\"excess_min\": spectrum_dataset.excess.data[-1] + 1},\n )\n grouped = spectrum_dataset.resample_energy_axis(\n MapAxis.from_edges(edges=resampled_energy_edges, name=\"energy\")\n )\n\n assert grouped.counts.data.shape == (29, 1, 1)\n assert_allclose(np.squeeze(grouped.counts)[-1], 2518.0)\n assert_allclose(np.squeeze(grouped.background)[-1], 200)\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport numpy as np\nimport scipy.ndimage\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy.table import Table\nfrom gammapy.datasets import SpectrumDataset, SpectrumDatasetOnOff\nfrom gammapy.datasets.map import MapEvaluator\nfrom gammapy.maps import WcsNDMap\nfrom gammapy.modeling.models import (\n ConstantFluxSpatialModel,\n PowerLawSpectralModel,\n SkyModel,\n)\n\n__all__ = [\"estimate_exposure_reco_energy\", \"find_peaks\", \"resample_energy_edges\"]\n\n\ndef find_peaks(image, threshold, min_distance=1):\n \"\"\"Find local peaks in an image.\n\n This is a very simple peak finder, that finds local peaks\n (i.e. maxima) in images above a given ``threshold`` within\n a given ``min_distance`` around each given pixel.\n\n If you get multiple spurious detections near a peak, usually\n it's best to smooth the image a bit, or to compute it using\n a different method in the first place to result in a smooth image.\n You can also increase the ``min_distance`` parameter.\n\n The output table contains one row per peak and the following columns:\n\n - ``x`` and ``y`` are the pixel coordinates (first pixel at zero)\n - ``ra`` and ``dec`` are the RA / DEC sky coordinates (ICRS frame)\n - ``value`` is the pixel value\n\n It is sorted by peak value, starting with the highest value.\n\n If there are no pixel values above the threshold, an empty table is returned.\n\n There are more featureful peak finding and source detection methods\n e.g. in the ``photutils`` or ``scikit-image`` Python packages.\n\n Parameters\n ----------\n image : `~gammapy.maps.WcsNDMap`\n Image like Map\n threshold : float or array-like\n The data value or pixel-wise data values to be used for the\n detection threshold. A 2D ``threshold`` must have the same\n shape as the map ``data``.\n min_distance : int or `~astropy.units.Quantity`\n Minimum distance between peaks. An integer value is interpreted\n as pixels.\n\n Returns\n -------\n output : `~astropy.table.Table`\n Table with parameters of detected peaks\n \"\"\"\n # Input validation\n\n if not isinstance(image, WcsNDMap):\n raise TypeError(\"find_peaks only supports WcsNDMap\")\n\n if not image.geom.is_flat:\n raise ValueError(\n \"find_peaks only supports flat Maps, with no spatial axes of length 1.\"\n )\n\n if isinstance(min_distance, (str, u.Quantity)):\n min_distance = np.mean(u.Quantity(min_distance) / image.geom.pixel_scales)\n min_distance = np.round(min_distance).to_value(\"\")\n\n size = 2 * min_distance + 1\n\n # Remove non-finite values to avoid warnings or spurious detection\n data = image.sum_over_axes(keepdims=False).data\n data[~np.isfinite(data)] = np.nanmin(data)\n\n # Handle edge case of constant data; treat as no peak\n if np.all(data == data.flat[0]):\n return Table()\n\n # Run peak finder\n data_max = scipy.ndimage.maximum_filter(data, size=size, mode=\"constant\")\n mask = (data == data_max) & (data > threshold)\n y, x = mask.nonzero()\n value = data[y, x]\n\n # Make and return results table\n\n if len(value) == 0:\n return Table()\n\n coord = SkyCoord.from_pixel(x, y, wcs=image.geom.wcs).icrs\n\n table = Table()\n table[\"value\"] = value * image.unit\n table[\"x\"] = x\n table[\"y\"] = y\n table[\"ra\"] = coord.ra\n table[\"dec\"] = coord.dec\n\n table[\"ra\"].format = \".5f\"\n table[\"dec\"].format = \".5f\"\n table[\"value\"].format = \".5g\"\n\n table.sort(\"value\")\n table.reverse()\n\n return table\n\n\ndef estimate_exposure_reco_energy(dataset, spectral_model=None, normalize=True):\n \"\"\"Estimate an exposure map in reconstructed energy.\n\n Parameters\n ----------\n dataset : `~gammapy.datasets.MapDataset` or `~gammapy.datasets.MapDatasetOnOff`\n the input dataset\n spectral_model : `~gammapy.modeling.models.SpectralModel`\n assumed spectral shape. If none, a Power Law of index 2 is assumed\n normalize : bool\n Normalize the exposure to the total integrated flux of the spectral model.\n When not normalized it directly gives the predicted counts from the spectral\n model.\n\n Returns\n -------\n exposure : `Map`\n Exposure map in reconstructed energy\n \"\"\"\n if spectral_model is None:\n spectral_model = PowerLawSpectralModel()\n\n model = SkyModel(\n spatial_model=ConstantFluxSpatialModel(), spectral_model=spectral_model\n )\n\n energy_axis = dataset._geom.axes[\"energy\"]\n\n if dataset.edisp is not None:\n edisp = dataset.edisp.get_edisp_kernel(position=None, energy_axis=energy_axis)\n else:\n edisp = None\n\n eval = MapEvaluator(model=model, exposure=dataset.exposure, edisp=edisp)\n reco_exposure = eval.compute_npred()\n\n if normalize:\n ref_flux = spectral_model.integral(\n energy_axis.edges[:-1], energy_axis.edges[1:]\n )\n reco_exposure = reco_exposure / ref_flux[:, np.newaxis, np.newaxis]\n\n return reco_exposure\n\n\ndef _satisfies_conditions(info_dict, conditions):\n satisfies = True\n for key in conditions.keys():\n satisfies &= info_dict[key.strip(\"_min\")] > conditions[key]\n return satisfies\n\n\ndef resample_energy_edges(dataset, conditions={}):\n \"\"\"Return energy edges that satisfy given condition on the per bin statistics.\n\n Parameters\n ----------\n dataset:`~gammapy.datasets.SpectrumDataset` or `~gammapy.datasets.SpectrumDatasetOnOff`\n the input dataset\n conditions : dict\n Keyword arguments containing the per-bin conditions used to resample the axis.\n Available options are: 'counts_min', 'background_min', 'excess_min', 'sqrt_ts_min', 'npred_min',\n 'npred_background_min', 'npred_signal_min'.\n Returns\n -------\n energy_edges : list of `~astropy.units.Quantity`\n Energy edges for the resampled energy axis.\n \"\"\"\n if not isinstance(dataset, (SpectrumDataset, SpectrumDatasetOnOff)):\n raise NotImplementedError(\n \"This method is currently supported for spectral datasets only.\"\n )\n\n available_conditions = [\n \"counts_min\",\n \"background_min\",\n \"excess_min\",\n \"sqrt_ts_min\",\n \"npred_min\",\n \"npred_background_min\",\n \"npred_signal_min\",\n ]\n for key in conditions.keys():\n if key not in available_conditions:\n raise ValueError(\n f\"Unrecognized option {key}. The available methods are: {available_conditions}.\"\n )\n\n axis = dataset.counts.geom.axes[\"energy\"]\n energy_min_all, energy_max_all = dataset.energy_range_total\n energy_edges = [energy_max_all]\n\n while energy_edges[-1] > energy_min_all:\n for energy_min in reversed(axis.edges_min):\n if energy_min >= energy_edges[-1]:\n continue\n elif len(energy_edges) == 1 and energy_min == energy_min_all:\n raise ValueError(\"The given conditions cannot be met.\")\n\n sliced = dataset.slice_by_energy(\n energy_min=energy_min, energy_max=energy_edges[-1]\n )\n\n with np.errstate(invalid=\"ignore\"):\n info = sliced.info_dict()\n\n if _satisfies_conditions(info, conditions):\n energy_edges.append(energy_min)\n break\n return u.Quantity(energy_edges[::-1])\n"
] | [
[
"numpy.all",
"numpy.array",
"numpy.testing.assert_allclose"
],
[
"numpy.max",
"matplotlib.pyplot.subplots"
],
[
"numpy.isnan",
"numpy.testing.assert_allclose"
],
[
"numpy.squeeze",
"numpy.testing.assert_allclose"
],
[
"numpy.isfinite",
"numpy.nanmin",
"numpy.all",
"numpy.round",
"numpy.errstate"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Anna1015/WorkAtTokyo | [
"ed37b746c38c2f668feac20044fe4dec38336086"
] | [
"rnn.py"
] | [
"import torch\nfrom torch import nn\nimport torchvision.datasets as dsets\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\nimport os\nimport numpy as np\nimport torch.utils.data as Data\n\n# Hyper Parameters\nEPOCH = 10 # train the training data n times, to save time, we just train 1 epoch\nBATCH_SIZE = 64\nTIME_STEP = 10000 # rnn time step / image height\nINPUT_SIZE = 39 # rnn input size / image width\nLR = 0.01 # learning rate\ntrainDIR = \"C:/testData/very small test data set/training data set\"\ntestDIR = \"C:/testData/very small test data set/test data set\"\nmodelDIR = \"C:/testData/very small test data set\"\nmodelFILE = \"1D-RNNv5.pth\"\nmodelPATH = modelDIR + '/' + modelFILE\n\n\n# 制作训练集\ntrainFILES = os.listdir(trainDIR)\n\ntrainMatrix = np.zeros(shape=(1,TIME_STEP,40))\nfor f in trainFILES:\n if f[0]=='.':\n continue\n tempFILE = trainDIR + '/' + f\n tempMatrix = np.loadtxt(tempFILE)\n tempMatrix = tempMatrix[np.newaxis,0:TIME_STEP,:]\n trainMatrix = np.vstack((trainMatrix,tempMatrix))\n\nx_train = trainMatrix[:,:,1:]\ny_train = np.array([0,0,1])\ny_train = torch.from_numpy(y_train).float()\nx_train = torch.from_numpy(x_train).float()\n\ntrainDataSet = Data.TensorDataset(x_train, y_train)\n\ntrain_loader = Data.DataLoader(\n dataset=trainDataSet, # torch TensorDataset format\n batch_size=BATCH_SIZE, # mini batch size\n shuffle=True, # 要不要打乱数据\n # num_workers=1, # 多线程来读数据\n)\n\n\n\n\n\n# 制作测试集合\ntestFILES = os.listdir(testDIR)\n\ntestMatrix = np.zeros(shape=(1,TIME_STEP,40))\nfor f in testFILES:\n if f[0]=='.':\n continue\n tempFILE = testDIR + '/' + f\n tempMatrix = np.loadtxt(tempFILE)\n tempMatrix = tempMatrix[np.newaxis,0:TIME_STEP,:]\n testMatrix = np.vstack((testMatrix,tempMatrix))\n\nx_test = testMatrix[:,:,1:]\ny_test = np.array([0,0,1])\nx_test = torch.from_numpy(x_test).float()\n\n\n\n\nclass RNN(nn.Module):\n def __init__(self):\n super(RNN, self).__init__()\n\n self.rnn = nn.LSTM( # if use nn.RNN(), it hardly learns\n input_size=INPUT_SIZE,\n hidden_size=64, # rnn hidden unit\n num_layers=1, # number of rnn layer\n batch_first=True, # input & output will has batch size as 1s dimension. e.g. (batch, time_step, input_size)\n )\n\n self.out = nn.Linear(64, 2)\n\n def forward(self, x):\n r_out, (h_n, h_c) = self.rnn(x, None) # None represents zero initial hidden state\n\n # choose r_out at the last time step\n out = self.out(r_out[:, -1, :])\n return out\n\n\nrnn = RNN()\nprint(rnn)\n\noptimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all cnn parameters\nloss_func = nn.CrossEntropyLoss() # the target label is not one-hotted\n#\n# training and testing\nfor epoch in range(EPOCH):\n print(epoch)\n for step, (b_x, b_y) in enumerate(train_loader): # gives batch data\n b_x = b_x.view(-1, TIME_STEP, 39) # reshape x to (batch, time_step, input_size)\n print(step)\n output = rnn(b_x) # rnn output\n b_y = b_y.type(torch.LongTensor)\n loss = loss_func(output, b_y) # cross entropy loss\n optimizer.zero_grad() # clear gradients for this training step\n loss.backward() # backpropagation, compute gradients\n optimizer.step() # apply gradients\n\n if step % 5 == 0:\n test_output = rnn(x_test) # (samples, time_step, input_size)\n pred_y = torch.max(test_output, 1)[1].data.numpy()\n accuracy = float((pred_y == y_test).astype(int).sum()) / float(y_test.size)\n print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.numpy(), '| test accuracy: %.2f' % accuracy)\n\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.max",
"torch.nn.LSTM",
"torch.utils.data.TensorDataset",
"numpy.vstack",
"torch.utils.data.DataLoader",
"torch.from_numpy",
"torch.nn.Linear",
"numpy.array",
"numpy.zeros",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
youngsjjn/MemSeg | [
"a3daf8039dc2c763d366f4bfd07c87416cf8ec81"
] | [
"tool/test.py"
] | [
"import os\nimport time\nimport logging\nimport argparse\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn.functional as F\nimport torch.nn.parallel\nimport torch.utils.data\nimport torch.nn as nn\n\nfrom util import dataset, transform, config\nfrom util.util import AverageMeter, intersectionAndUnion, check_makedirs, colorize\n\ncv2.ocl.setUseOpenCL(False)\n\nlabel_mapping = {\n 7: 0, 8: 1, 11: 2, 12: 3,\n 13: 4, 17: 5,\n 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11,\n 25: 12, 26: 13, 27: 14, 28: 15,\n 31: 16, 32: 17, 33: 18}\n\ndef get_parser():\n parser = argparse.ArgumentParser(description='PyTorch Semantic Segmentation')\n parser.add_argument('--config', type=str, default='config/cityscapes/cityscapes_deeplab50mem.yaml', help='config file')\n parser.add_argument('opts', help='see config/ade20k/ade20k_pspnet50.yaml for all options', default=None, nargs=argparse.REMAINDER)\n args = parser.parse_args()\n assert args.config is not None\n cfg = config.load_cfg_from_cfg_file(args.config)\n if args.opts is not None:\n cfg = config.merge_cfg_from_list(cfg, args.opts)\n return cfg\n\n\ndef get_logger():\n logger_name = \"main-logger\"\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.INFO)\n handler = logging.StreamHandler()\n fmt = \"[%(asctime)s %(levelname)s %(filename)s line %(lineno)d %(process)d] %(message)s\"\n handler.setFormatter(logging.Formatter(fmt))\n logger.addHandler(handler)\n return logger\n\n\ndef check(args):\n assert args.classes > 1\n assert args.zoom_factor in [1, 2, 4, 8]\n assert args.split in ['train', 'val', 'test']\n if args.arch == 'psp':\n assert (args.train_h - 1) % 8 == 0 and (args.train_w - 1) % 8 == 0\n elif args.arch == \"deeplabv3\":\n assert args.train_h % 1 == 0 and args.train_w % 1 == 0\n elif args.arch == 'psa':\n if args.compact:\n args.mask_h = (args.train_h - 1) // (8 * args.shrink_factor) + 1\n args.mask_w = (args.train_w - 1) // (8 * args.shrink_factor) + 1\n else:\n assert (args.mask_h is None and args.mask_w is None) or (args.mask_h is not None and args.mask_w is not None)\n if args.mask_h is None and args.mask_w is None:\n args.mask_h = 2 * ((args.train_h - 1) // (8 * args.shrink_factor) + 1) - 1\n args.mask_w = 2 * ((args.train_w - 1) // (8 * args.shrink_factor) + 1) - 1\n else:\n assert (args.mask_h % 2 == 1) and (args.mask_h >= 3) and (\n args.mask_h <= 2 * ((args.train_h - 1) // (8 * args.shrink_factor) + 1) - 1)\n assert (args.mask_w % 2 == 1) and (args.mask_w >= 3) and (\n args.mask_w <= 2 * ((args.train_h - 1) // (8 * args.shrink_factor) + 1) - 1)\n else:\n raise Exception('architecture not supported yet'.format(args.arch))\n\n\ndef main():\n global args, logger\n args = get_parser()\n # check(args)\n logger = get_logger()\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = ','.join(str(x) for x in args.test_gpu)\n logger.info(args)\n logger.info(\"=> creating model ...\")\n logger.info(\"Classes: {}\".format(args.classes))\n\n value_scale = 255\n mean = [0.485, 0.456, 0.406]\n # mean = [0.39068785, 0.40521392, 0.41434407]\n mean = [item * value_scale for item in mean]\n std = [0.229, 0.224, 0.225]\n # std = [0.29652068, 0.30514979, 0.30080369]\n std = [item * value_scale for item in std]\n\n gray_folder = os.path.join(args.save_folder, 'gray')\n color_folder = os.path.join(args.save_folder, 'color')\n\n test_transform = transform.Compose([transform.ToTensor()])\n test_data = dataset.SemData(split=args.split, data_root=args.data_root, data_list=args.test_list, transform=test_transform)\n index_start = args.index_start\n if args.index_step == 0:\n index_end = len(test_data.data_list)\n else:\n index_end = min(index_start + args.index_step, len(test_data.data_list))\n test_data.data_list = test_data.data_list[index_start:index_end]\n test_loader = torch.utils.data.DataLoader(test_data, batch_size=1, shuffle=False, num_workers=args.workers, pin_memory=True)\n colors = np.loadtxt(args.colors_path).astype('uint8')\n names = [line.rstrip('\\n') for line in open(args.names_path)]\n\n criterion = nn.CrossEntropyLoss(ignore_index=args.ignore_label)\n if not args.has_prediction:\n if args.arch == 'psp':\n from model.pspnet import PSPNet\n model = PSPNet(backbone=args.backbone, output_stride=args.output_stride, layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor, pretrained=False)\n elif args.arch == 'psa':\n from model.psanet import PSANet\n model = PSANet(layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor, compact=args.compact,\n shrink_factor=args.shrink_factor, mask_h=args.mask_h, mask_w=args.mask_w,\n normalization_factor=args.normalization_factor, psa_softmax=args.psa_softmax, pretrained=False)\n elif args.arch == 'deeplabv3':\n from model.deeplabv3 import Deeplabv3\n model = Deeplabv3(layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor,\n backbone=args.backbone, output_stride=args.output_stride, pretrained=False,\n criterion=criterion)\n elif args.arch == 'danet':\n from model.danet import DANet\n model = DANet(layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor,\n backbone=args.backbone, output_stride=args.output_stride, pretrained=False,\n criterion=criterion)\n elif args.arch == 'trseg':\n from model.transformnet import TransformNet\n model = TransformNet(layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor,\n backbone=args.backbone, output_stride=args.output_stride, pretrained=False,\n criterion=criterion)\n elif args.arch == 'hrnet':\n from model.hrnet import HighResolutionNet\n model = HighResolutionNet(args)\n\n logger.info(model)\n model = torch.nn.DataParallel(model).cuda()\n # model = model.cuda()\n cudnn.benchmark = True\n # if os.path.isfile(args.model_path):\n # logger.info(\"=> loading checkpoint '{}'\".format(args.model_path))\n # checkpoint = torch.load(args.model_path)\n # model.load_state_dict(checkpoint['state_dict'], strict=True)\n # logger.info(\"=> loaded checkpoint '{}'\".format(args.model_path))\n # else:\n # raise RuntimeError(\"=> no checkpoint found at '{}'\".format(args.model_path))\n eval(test_loader, test_data.data_list, model, args.classes, mean, std, args.base_size, args.test_h, args.test_w, args.scales, gray_folder, color_folder, colors)\n if args.split != 'test':\n cal_acc(test_data.data_list, gray_folder, args.classes, names)\n\n\ndef net_process(model, image, mean, std=None, flip=True):\n input = torch.from_numpy(image.transpose((2, 0, 1))).float()\n if std is None:\n for t, m in zip(input, mean):\n t.sub_(m)\n else:\n for t, m, s in zip(input, mean, std):\n t.sub_(m).div_(s)\n input = input.unsqueeze(0).cuda()\n if flip:\n input = torch.cat([input, input.flip(3)], 0)\n with torch.no_grad():\n output = model(input)\n _, _, h_i, w_i = input.shape\n _, _, h_o, w_o = output.shape\n if (h_o != h_i) or (w_o != w_i):\n output = F.interpolate(output, (h_i, w_i), mode='bilinear', align_corners=True)\n output = F.softmax(output, dim=1)\n if flip:\n output = (output[0] + output[1].flip(2)) / 2\n else:\n output = output[0]\n output = output.data.cpu().numpy()\n output = output.transpose(1, 2, 0)\n return output\n\n\ndef scale_process(model, image, classes, crop_h, crop_w, h, w, mean, std=None, stride_rate=2/3):\n ori_h, ori_w, _ = image.shape\n pad_h = max(crop_h - ori_h, 0)\n pad_w = max(crop_w - ori_w, 0)\n pad_h_half = int(pad_h / 2)\n pad_w_half = int(pad_w / 2)\n if pad_h > 0 or pad_w > 0:\n image = cv2.copyMakeBorder(image, pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half, cv2.BORDER_CONSTANT, value=mean)\n new_h, new_w, _ = image.shape\n stride_h = int(np.ceil(crop_h*stride_rate))\n stride_w = int(np.ceil(crop_w*stride_rate))\n grid_h = int(np.ceil(float(new_h-crop_h)/stride_h) + 1)\n grid_w = int(np.ceil(float(new_w-crop_w)/stride_w) + 1)\n prediction_crop = np.zeros((new_h, new_w, classes), dtype=float)\n count_crop = np.zeros((new_h, new_w), dtype=float)\n for index_h in range(0, grid_h):\n for index_w in range(0, grid_w):\n s_h = index_h * stride_h\n e_h = min(s_h + crop_h, new_h)\n s_h = e_h - crop_h\n s_w = index_w * stride_w\n e_w = min(s_w + crop_w, new_w)\n s_w = e_w - crop_w\n image_crop = image[s_h:e_h, s_w:e_w].copy()\n count_crop[s_h:e_h, s_w:e_w] += 1\n prediction_crop[s_h:e_h, s_w:e_w, :] += net_process(model, image_crop, mean, std)\n prediction_crop /= np.expand_dims(count_crop, 2)\n prediction_crop = prediction_crop[pad_h_half:pad_h_half+ori_h, pad_w_half:pad_w_half+ori_w]\n prediction = cv2.resize(prediction_crop, (w, h), interpolation=cv2.INTER_LINEAR)\n return prediction\n\ndef trainID2labelID(img):\n temp = np.copy(img)\n for k, v in label_mapping.items():\n temp[img == v] = k\n return temp\n\ndef labelID2trainID(img):\n temp = np.copy(img)\n for k, v in label_mapping.items():\n temp[img == k] = v\n return temp\n\ndef eval(test_loader, data_list, model, classes, mean, std, base_size, crop_h, crop_w, scales, gray_folder, color_folder, colors):\n logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')\n data_time = AverageMeter()\n batch_time = AverageMeter()\n model.eval()\n end = time.time()\n for i, (input, _) in enumerate(test_loader):\n data_time.update(time.time() - end)\n input = np.squeeze(input.numpy(), axis=0)\n image = np.transpose(input, (1, 2, 0))\n h, w, _ = image.shape\n prediction = np.zeros((h, w, classes), dtype=float)\n for scale in scales:\n long_size = round(scale * base_size)\n new_h = long_size\n new_w = long_size\n if h > w:\n new_w = round(long_size/float(h)*w)\n else:\n new_h = round(long_size/float(w)*h)\n image_scale = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_LINEAR)\n prediction += scale_process(model, image_scale, classes, crop_h, crop_w, h, w, mean, std)\n prediction /= len(scales)\n prediction = np.argmax(prediction, axis=2)\n batch_time.update(time.time() - end)\n end = time.time()\n if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):\n logger.info('Test: [{}/{}] '\n 'Data {data_time.val:.3f} ({data_time.avg:.3f}) '\n 'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(i + 1, len(test_loader),\n data_time=data_time,\n batch_time=batch_time))\n check_makedirs(gray_folder)\n check_makedirs(color_folder)\n gray = np.uint8(prediction)\n color = colorize(gray, colors)\n image_path, _ = data_list[i]\n image_name = image_path.split('/')[-1].split('.')[0]\n gray_path = os.path.join(gray_folder, image_name + '.png')\n color_path = os.path.join(color_folder, image_name + '.png')\n if args.split == 'test':\n gray_labelid = trainID2labelID(gray)\n # save_gray_path = gray_path.replace('_leftImg8bit','*')\n cv2.imwrite(gray_path, gray_labelid)\n # if make_video:\n else:\n cv2.imwrite(gray_path, gray)\n color.save(color_path)\n logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')\n\n\ndef cal_acc(data_list, pred_folder, classes, names):\n intersection_meter = AverageMeter()\n union_meter = AverageMeter()\n target_meter = AverageMeter()\n\n for i, (image_path, target_path) in enumerate(data_list):\n image_name = image_path.split('/')[-1].split('.')[0]\n pred = cv2.imread(os.path.join(pred_folder, image_name+'.png'), cv2.IMREAD_GRAYSCALE)\n target = cv2.imread(target_path, cv2.IMREAD_GRAYSCALE)\n if \"RELLIS\" in target_path:\n target = dataset.rellis_labelID2trainID(target)\n intersection, union, target = intersectionAndUnion(pred, target, classes)\n intersection_meter.update(intersection)\n union_meter.update(union)\n target_meter.update(target)\n accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10)\n logger.info('Evaluating {0}/{1} on image {2}, accuracy {3:.4f}.'.format(i + 1, len(data_list), image_name+'.png', accuracy))\n\n iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)\n accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)\n mIoU = np.mean(iou_class)\n mAcc = np.mean(accuracy_class)\n allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)\n\n logger.info('Eval result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(mIoU, mAcc, allAcc))\n for i in range(classes):\n logger.info('Class_{} result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(i, iou_class[i], accuracy_class[i], names[i]))\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"numpy.expand_dims",
"torch.nn.functional.softmax",
"numpy.uint8",
"torch.utils.data.DataLoader",
"numpy.ceil",
"numpy.copy",
"numpy.argmax",
"numpy.mean",
"torch.no_grad",
"torch.nn.functional.interpolate",
"numpy.transpose",
"torch.nn.DataParallel",
"numpy.zeros",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Tim-Tianyu/ditto | [
"a07659645a6670ae7c0ff3a6be27776669cd9a19"
] | [
"ditto/summarize.py"
] | [
"import numpy as np\nimport csv\nimport sys\nimport os\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom collections import Counter\nfrom nltk.corpus import stopwords\n\nfrom snippext.dataset import get_tokenizer\n\nstopwords = set(stopwords.words('english'))\n\nclass Summarizer:\n \"\"\"To summarize a data entry pair into length up to the max sequence length.\n\n Args:\n task_config (Dictionary): the task configuration\n lm (string): the language model (bert, albert, or distilbert)\n\n Attributes:\n config (Dictionary): the task configuration\n tokenizer (Tokenizer): a tokenizer from the huggingface library\n \"\"\"\n def __init__(self, task_config, lm):\n self.config = task_config\n self.tokenizer = get_tokenizer(lm=lm)\n self.len_cache = {}\n\n # build the tfidf index\n self.build_index()\n\n def build_index(self):\n \"\"\"Build the idf index.\n\n Store the index and vocabulary in self.idf and self.vocab.\n \"\"\"\n fns = [self.config['trainset'],\n self.config['validset'],\n self.config['testset']]\n content = []\n for fn in fns:\n with open(fn, encoding='utf-8') as fin:\n for line in fin:\n LL = line.split('\\t')\n if len(LL) > 2:\n for entry in LL:\n content.append(entry)\n\n vectorizer = TfidfVectorizer().fit(content)\n self.vocab = vectorizer.vocabulary_\n self.idf = vectorizer.idf_\n\n def get_len(self, word):\n \"\"\"Return the sentence_piece length of a token.\n \"\"\"\n if word in self.len_cache:\n return self.len_cache[word]\n length = len(self.tokenizer.tokenize(word))\n self.len_cache[word] = length\n return length\n\n def transform(self, row, max_len=128):\n \"\"\"Summarize one single example.\n\n Only retain tokens of the highest tf-idf\n\n Args:\n row (str): a matching example of two data entries and a binary label, separated by tab\n max_len (int, optional): the maximum sequence length to be summarized to\n\n Returns:\n str: the summarized example\n \"\"\"\n sentA, sentB, label = row.strip().split('\\t')\n res = ''\n cnt = Counter()\n for sent in [sentA, sentB]:\n tokens = sent.split(' ')\n for token in tokens:\n if token not in ['COL', 'VAL'] and \\\n token not in stopwords:\n if token in self.vocab:\n cnt[token] += self.idf[self.vocab[token]]\n\n for sent in [sentA, sentB]:\n token_cnt = Counter(sent.split(' '))\n total_len = token_cnt['COL'] + token_cnt['VAL']\n\n subset = Counter()\n for token in set(token_cnt.keys()):\n subset[token] = cnt[token]\n subset = subset.most_common(max_len)\n\n topk_tokens_copy = set([])\n for word, _ in subset:\n bert_len = self.get_len(word)\n if total_len + bert_len > max_len:\n break\n total_len += bert_len\n topk_tokens_copy.add(word)\n\n num_tokens = 0\n for token in sent.split(' '):\n if token in ['COL', 'VAL']:\n res += token + ' '\n elif token in topk_tokens_copy:\n res += token + ' '\n topk_tokens_copy.remove(token)\n\n res += '\\t'\n\n res += label + '\\n'\n return res\n\n def transform_file(self, input_fn, max_len=256, overwrite=False):\n \"\"\"Summarize all lines of a tsv file.\n\n Run the summarizer. If the output already exists, just return the file name.\n\n Args:\n input_fn (str): the input file name\n max_len (int, optional): the max sequence len\n overwrite (bool, optional): if true, then overwrite any cached output\n\n Returns:\n str: the output file name\n \"\"\"\n out_fn = input_fn + '.su'\n if not os.path.exists(out_fn) or \\\n os.stat(out_fn).st_size == 0 or overwrite:\n with open(out_fn, 'w', encoding='utf-8') as fout:\n for line in open(input_fn, encoding='utf-8'):\n fout.write(self.transform(line, max_len=max_len))\n return out_fn\n"
] | [
[
"sklearn.feature_extraction.text.TfidfVectorizer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
m0hit-kumar/probdists | [
"786d4f6c53534c318499d97200355f94c8c48919",
"786d4f6c53534c318499d97200355f94c8c48919"
] | [
"probdists/Triangulardistribution.py",
"probdists/Binomialdistribution.py"
] | [
"import math\nimport matplotlib.pyplot as plt\nfrom .Generaldistribution import Distribution\nfrom collections import Counter\nimport seaborn as sns\n\n\nclass Triangular(Distribution):\n \"\"\"\n Triangular distribution class for calculating and visualizing the\n triangular distribution: a continuous probability distribution shaped\n like a triangle\n Note: a <= mode <= b\n\n Attributes:\n\n a (float): the minimum lower limit value\n b (float): the maximum upper limit value\n mode (float): the mode, where min <= mode <= max\n\n mean (float): the mean value of the distribution\n stdev (float): the standard deviation of the distribution\n\n \"\"\"\n\n def __init__(self, a=0, b=1, mode=0.5):\n if b < mode < a or a == b:\n raise ValueError\n\n if a == b or a == mode or b == mode:\n raise TriangularValueException()\n\n self.a = a\n self.b = b\n self.mode = mode\n\n Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())\n\n def calculate_mean(self, round_to=2):\n \"\"\"\n Method to calculate the mean from the min, max and mode\n\n Args:\n round_to (int): Round the mean value. Defaults to 2.\n\n Returns:\n float: mean of the data set\n \"\"\"\n\n self.mean = 1 / 3 * (self.a + self.b + self.mode)\n\n return round(self.mean, round_to)\n\n def calculate_stdev(self, round_to=2):\n \"\"\"\n Method to calculate the standard deviation from the min, max and mode\n\n Args:\n round_to (int): Round the mean value. Defaults to 2.\n\n Returns:\n float: standard deviation of the data set\n \"\"\"\n\n summation = (\n (self.a ** 2)\n + (self.b ** 2)\n + (self.mode ** 2)\n - (self.a * self.b)\n - (self.a * self.mode)\n - (self.b * self.mode)\n )\n variance = summation / 18\n self.stdev = math.sqrt(variance)\n\n return round(self.stdev, round_to)\n\n def replace_stats_with_data(self):\n \"\"\"Method to calculate a, b, mode from the data set\n\n Args:\n None\n\n Returns:\n float: a, the minimum value\n float: b, the maximum value\n float: mode, the mode of the dataset\n \"\"\"\n if not self.data:\n # Use default values\n min_a, max_b, mode = 0, 1, 0.5\n else:\n min_a = min(self.data)\n max_b = max(self.data)\n mode = self.calculate_mode()\n\n if min == max or min == mode or max == mode:\n raise TriangularValueException()\n\n self.a = min_a\n self.b = max_b\n self.mode = mode\n\n return self.a, self.b, self.mode\n\n def calculate_mode(self, round_to=2):\n \"\"\"\n Calculates the mode of a dataset\n If no single mode, it will approximate the mode using the mean\n\n Args:\n round_to (int): Round the mode value. [Default value: 2]\n\n Returns:\n float: mode of data\n \"\"\"\n frequency_dict = dict(Counter(self.data))\n max_frequency = max(list(frequency_dict.values()))\n\n # Create list of modes from data\n mode = [k for k, v in frequency_dict.items() if v == max_frequency]\n\n if len(mode) == 1:\n return mode[0]\n else:\n # Multiple modes\n msg = f\"\"\"Multiple modes found: {str(mode)}, Triangular Distribution requires single mode\"\"\"\n raise TriangularValueException(msg)\n\n def calculate_pdf(self, x, round_to=2):\n \"\"\"\n Probability density function calculator for the Triangular distribution.\n\n Args:\n x (float): point for calculating the probability density function\n round_to (int): Round the pdf value. [Default value: 2]\n\n Returns:\n float: probability density function\n \"\"\"\n # Check equivalence\n if self.a == self.b or self.a == self.mode or self.b == self.mode:\n raise TriangularValueException()\n\n value = 0 # default value for when x < min or x > max\n if self.a <= x < self.mode:\n value = (2 * (x - self.a)) / ((self.b - self.a) * (self.mode - self.a))\n elif self.mode == x:\n value = 2 / (self.b - self.a)\n elif self.mode < x <= self.b:\n value = (2 * (self.b - x)) / ((self.b - self.a) * (self.b - self.mode))\n\n self.pdf = value\n return round(self.pdf, round_to)\n\n def calculate_cdf(self, x, round_to=2):\n \"\"\"\n Cumulative density function calculator for the Triangular distribution.\n\n Args:\n x (float): point for calculating the cumulative density function\n round_to (int): Round the value. [Default value: 2]\n\n Returns:\n float: cumulative density function output\n \"\"\"\n # Check equivalence\n if self.a == self.b or self.a == self.mode or self.b == self.mode:\n raise TriangularValueException()\n\n if x < self.a:\n value = 0\n elif self.a <= x <= self.mode:\n num = (x - self.a) ** 2\n den = (self.b - self.a) * (self.mode - self.a)\n value = num / den\n elif self.mode < x <= self.b:\n num = (self.b - x) ** 2\n den = (self.b - self.a) * (self.b - self.mode)\n value = 1 - (num / den)\n else:\n value = 1\n\n self.cdf = value\n return round(self.cdf, round_to)\n\n def plot_bar_pdf(self):\n \"\"\"\n Method to plot the pdf of the triangular distribution.\n\n Args:\n self\n Returns:\n None\n \"\"\"\n x = [self.a, self.mode, self.b]\n\n peak = 2 / (self.b - self.a)\n y = [0, peak, 0]\n\n sns.lineplot(x, y).set(\n title=\"Probability Density Plot for Triangular Distribution\",\n xlabel=\"Probability\",\n ylabel=\"x\",\n )\n\n plt.show()\n\n return x, y\n\n def __repr__(self):\n \"\"\"\n Outputs the characteristics of the Triangular Distribution instance.\n\n Args:\n self\n Returns:\n string: characteristics of the Triangle\n \"\"\"\n\n return (\n f\"minimum: {self.a}, maximum: {self.b}, mode: {self.mode}, \"\n f\"mean: {self.mean}, standard deviation: {self.stdev}\"\n )\n\n\nclass TriangularValueException(Exception):\n \"\"\"\n Defines Exception raised when minimum, maximum or mode values are equal\n and TriangularDistribution instance cannot be created\n\n Attributes:\n message (str): Error message to return\n \"\"\"\n\n def __init__(self, msg=None):\n if msg is not None:\n self.message = msg\n else:\n self.message = \"Minimum, Maximum, or Mode cannot be equivalent\"\n\n def __str__(self):\n if self.message:\n return f\"\"\"TriangularValueException: {self.message}\"\"\"\n return f\"\"\"TriangularValueException Raised\"\"\"\n",
"import math\nimport matplotlib.pyplot as plt\nfrom .Generaldistribution import Distribution\nimport seaborn as sns\n\n\nclass Binomial(Distribution):\n \"\"\"Binomial distribution class for calculating and\n visualizing a Binomial distribution.\n\n Attributes:\n mean (float) representing the mean value of the distribution\n stdev (float) representing the standard deviation of the distribution\n data_list (list of floats) to be extracted from the data file\n p (float) representing the probability of an event occurring\n n (int) number of trials\n \"\"\"\n\n def __init__(self, prob=0.5, size=20):\n\n self.n = size\n self.p = prob\n\n Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())\n\n def calculate_mean(self, round_to=2):\n \"\"\"Function to calculate the mean from p and n\n\n Args:\n round_to (int): Round the mean value. [Default value: 2 floating point]\n\n Returns:\n float: mean of the data set\n \"\"\"\n\n self.mean = self.p * self.n\n\n return round(self.mean, round_to)\n\n def calculate_stdev(self, round_to=2):\n \"\"\"Function to calculate the standard deviation from p and n.\n\n Args:\n round_to (int): Round the mean value. [Default value: 2 floating point]\n\n Returns:\n float: standard deviation of the data set\n \"\"\"\n\n self.stdev = math.sqrt(self.n * self.p * (1 - self.p))\n\n return round(self.stdev, round_to)\n\n def replace_stats_with_data(self):\n \"\"\"Function to calculate p and n from the data set\n\n Args:\n None\n\n Returns:\n float: the p value\n float: the n value\n \"\"\"\n\n self.n = len(self.data)\n self.p = 1.0 * sum(self.data) / len(self.data)\n self.calculate_mean()\n self.calculate_stdev()\n\n return self.p, self.n\n\n def plot_bar(self):\n \"\"\"Function to output a histogram of the instance variable data using\n seaborn library.\n\n Args:\n None\n\n Returns:\n None\n \"\"\"\n sns.barplot(x=[0, 1], y=[(1 - self.p) * self.n, self.p * self.n]).set(\n title=\"Bar Chart of Data\", xlabel=\"outcome\", ylabel=\"count\"\n )\n plt.show()\n\n def calculate_pdf(self, k, round_to=2):\n \"\"\"Probability density function calculator for the binomial distribution.\n\n Args:\n k (float): point for calculating the probability density function\n round_to (int): Round the mean value. [Default value: 2 floating point]\n\n Returns:\n float: probability density function output\n \"\"\"\n\n a = math.factorial(self.n)\n b = math.factorial(k) * math.factorial(self.n - k)\n c = (self.p ** k) * (1 - self.p) ** (self.n - k)\n self.pdf = (a / b) * c\n\n return round(self.pdf, round_to)\n\n def calculate_cdf(self, k, round_to=2):\n \"\"\"Cumulative distribution function calculator for the binomial distribution.\n\n Args:\n k (float): point for calculating the cumulative distribution function\n round_to (int): Round the mean value. [Default value: 2 floating point]\n\n Returns:\n float: cumulative distribution function output\n \"\"\"\n\n total_p = 0\n for i in range(k + 1):\n self.calculate_pdf(i)\n total_p += self.pdf\n self.cdf = total_p\n return round(self.cdf, round_to)\n\n def plot_bar_pdf(self):\n \"\"\"Function to plot the pdf of the binomial distribution\n\n Args:\n None\n\n Returns:\n list: x values for the pdf plot\n list: y values for the pdf plot\n \"\"\"\n\n x = []\n y = []\n\n # calculate the x values to visualize\n for i in range(self.n + 1):\n x.append(i)\n self.calculate_pdf(i)\n y.append(self.pdf)\n\n # make the plots\n\n sns.barplot(x=x, y=y).set(\n title=\"Distribution of Outcomes\", ylabel=\"Probability\", xlabel=\"Outcome\"\n )\n\n plt.show()\n\n return x, y\n\n def __add__(self, other):\n \"\"\"Function to add together two Binomial distributions with equal p\n\n Args:\n other (Binomial): Binomial instance\n\n Returns:\n Binomial: Binomial distribution\n \"\"\"\n\n try:\n assert self.p == other.p, \"p values are not equal\"\n except AssertionError as error:\n raise\n\n result = Binomial()\n result.n = self.n + other.n\n result.p = self.p\n result.calculate_mean()\n result.calculate_stdev()\n\n return result\n\n def __repr__(self):\n \"\"\"Function to output the characteristics of the Binomial instance\n\n Args:\n None\n\n Returns:\n string: characteristics of the Binomial\n \"\"\"\n\n return f\"mean {self.mean}, standard deviation {self.stdev}, \\\n p {self.p}, n {self.n}\"\n"
] | [
[
"matplotlib.pyplot.show"
],
[
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LongzeSong/nni | [
"ae36373c8b2dfb338f32af6f9d3e1eed083a085a"
] | [
"src/sdk/pynni/nni/compression/torch/compressor.py"
] | [
"import logging\nimport torch\nfrom . import default_layers\n\n_logger = logging.getLogger(__name__)\n\n\nclass LayerInfo:\n def __init__(self, name, module):\n self.module = module\n self.name = name\n self.type = type(module).__name__\n\n self._forward = None\n\nclass Compressor:\n \"\"\"\n Abstract base PyTorch compressor\n \"\"\"\n\n def __init__(self, model, config_list):\n \"\"\"\n Record necessary info in class members\n\n Parameters\n ----------\n model : pytorch model\n the model user wants to compress\n config_list : list\n the configurations that users specify for compression\n \"\"\"\n self.bound_model = model\n self.config_list = config_list\n self.modules_to_compress = None\n\n def detect_modules_to_compress(self):\n \"\"\"\n detect all modules should be compressed, and save the result in `self.modules_to_compress`.\n The model will be instrumented and user should never edit it after calling this method.\n \"\"\"\n if self.modules_to_compress is None:\n self.modules_to_compress = []\n for name, module in self.bound_model.named_modules():\n layer = LayerInfo(name, module)\n config = self.select_config(layer)\n if config is not None:\n self.modules_to_compress.append((layer, config))\n return self.modules_to_compress\n\n def compress(self):\n \"\"\"\n Compress the model with algorithm implemented by subclass.\n\n The model will be instrumented and user should never edit it after calling this method.\n `self.modules_to_compress` records all the to-be-compressed layers\n \"\"\"\n modules_to_compress = self.detect_modules_to_compress()\n for layer, config in modules_to_compress:\n self._instrument_layer(layer, config)\n return self.bound_model\n\n def get_modules_to_compress(self):\n \"\"\"\n To obtain all the to-be-compressed layers.\n\n Returns\n -------\n list\n a list of the layers, each of which is a tuple (`layer`, `config`),\n `layer` is `LayerInfo`, `config` is a `dict`\n \"\"\"\n return self.modules_to_compress\n\n def select_config(self, layer):\n \"\"\"\n Find the configuration for `layer` by parsing `self.config_list`\n\n Parameters\n ----------\n layer : LayerInfo\n one layer\n\n Returns\n -------\n config or None\n the retrieved configuration for this layer, if None, this layer should\n not be compressed\n \"\"\"\n ret = None\n for config in self.config_list:\n config = config.copy()\n config['op_types'] = self._expand_config_op_types(config)\n if layer.type not in config['op_types']:\n continue\n if config.get('op_names') and layer.name not in config['op_names']:\n continue\n ret = config\n if ret is None or ret.get('exclude'):\n return None\n return ret\n\n def update_epoch(self, epoch):\n \"\"\"\n If user want to update model every epoch, user can override this method.\n This method should be called at the beginning of each epoch\n\n Parameters\n ----------\n epoch : num\n the current epoch number\n \"\"\"\n\n def step(self):\n \"\"\"\n If user want to update model every step, user can override this method\n \"\"\"\n\n def _instrument_layer(self, layer, config):\n \"\"\"\n This method is implemented in the subclasses, i.e., `Pruner` and `Quantizer`\n\n Parameters\n ----------\n layer : LayerInfo\n the layer to instrument the compression operation\n config : dict\n the configuration for compressing this layer\n \"\"\"\n raise NotImplementedError()\n\n def _expand_config_op_types(self, config):\n if config is None:\n return []\n expanded_op_types = []\n for op_type in config.get('op_types', []):\n if op_type == 'default':\n expanded_op_types.extend(default_layers.weighted_modules)\n else:\n expanded_op_types.append(op_type)\n return expanded_op_types\n\n\nclass Pruner(Compressor):\n \"\"\"\n Prune to an exact pruning level specification\n\n Attributes\n ----------\n mask_dict : dict\n Dictionary for saving masks, `key` should be layer name and\n `value` should be a tensor which has the same shape with layer's weight\n\n \"\"\"\n\n def __init__(self, model, config_list):\n super().__init__(model, config_list)\n self.mask_dict = {}\n\n def calc_mask(self, layer, config):\n \"\"\"\n Pruners should overload this method to provide mask for weight tensors.\n The mask must have the same shape and type comparing to the weight.\n It will be applied with `mul()` operation on the weight.\n This method is effectively hooked to `forward()` method of the model.\n\n Parameters\n ----------\n layer : LayerInfo\n calculate mask for `layer`'s weight\n config : dict\n the configuration for generating the mask\n \"\"\"\n raise NotImplementedError(\"Pruners must overload calc_mask()\")\n\n def _instrument_layer(self, layer, config):\n \"\"\"\n Create a wrapper forward function to replace the original one.\n\n Parameters\n ----------\n layer : LayerInfo\n the layer to instrument the mask\n config : dict\n the configuration for generating the mask\n \"\"\"\n assert layer._forward is None, 'Each model can only be compressed once'\n if not _check_weight(layer.module):\n _logger.warning('Module %s does not have parameter \"weight\"', layer.name)\n return\n layer._forward = layer.module.forward\n\n def new_forward(*inputs):\n # apply mask to weight\n old_weight = layer.module.weight.data\n mask = self.calc_mask(layer, config)\n layer.module.weight.data = old_weight.mul(mask)\n # calculate forward\n ret = layer._forward(*inputs)\n return ret\n\n layer.module.forward = new_forward\n\n def export_model(self, model_path, mask_path=None, onnx_path=None, input_shape=None):\n \"\"\"\n Export pruned model weights, masks and onnx model(optional)\n\n Parameters\n ----------\n model_path : str\n path to save pruned model state_dict\n mask_path : str\n (optional) path to save mask dict\n onnx_path : str\n (optional) path to save onnx model\n input_shape : list or tuple\n input shape to onnx model\n \"\"\"\n if self.detect_modules_to_compress() and not self.mask_dict:\n _logger.warning('You may not use self.mask_dict in base Pruner class to record masks')\n assert model_path is not None, 'model_path must be specified'\n for name, m in self.bound_model.named_modules():\n if name == \"\":\n continue\n mask = self.mask_dict.get(name)\n if mask is not None:\n mask_sum = mask.sum().item()\n mask_num = mask.numel()\n _logger.info('Layer: %s Sparsity: %.2f', name, 1 - mask_sum / mask_num)\n m.weight.data = m.weight.data.mul(mask)\n else:\n _logger.info('Layer: %s NOT compressed', name)\n torch.save(self.bound_model.state_dict(), model_path)\n _logger.info('Model state_dict saved to %s', model_path)\n if mask_path is not None:\n torch.save(self.mask_dict, mask_path)\n _logger.info('Mask dict saved to %s', mask_path)\n if onnx_path is not None:\n assert input_shape is not None, 'input_shape must be specified to export onnx model'\n # input info needed\n input_data = torch.Tensor(*input_shape)\n torch.onnx.export(self.bound_model, input_data, onnx_path)\n _logger.info('Model in onnx with input shape %s saved to %s', input_data.shape, onnx_path)\n\n\nclass Quantizer(Compressor):\n \"\"\"\n Base quantizer for pytorch quantizer\n \"\"\"\n\n def quantize_weight(self, weight, config, op, op_type, op_name):\n \"\"\"\n quantize should overload this method to quantize weight.\n This method is effectively hooked to :meth:`forward` of the model.\n\n Parameters\n ----------\n weight : Tensor\n weight that needs to be quantized\n config : dict\n the configuration for weight quantization\n \"\"\"\n raise NotImplementedError(\"Quantizer must overload quantize_weight()\")\n\n def quantize_output(self, output, config, op, op_type, op_name):\n \"\"\"\n quantize should overload this method to quantize output.\n This method is effectively hooked to :meth:`forward` of the model.\n\n Parameters\n ----------\n output : Tensor\n output that needs to be quantized\n config : dict\n the configuration for output quantization\n \"\"\"\n raise NotImplementedError(\"Quantizer must overload quantize_output()\")\n\n def quantize_input(self, *inputs, config, op, op_type, op_name):\n \"\"\"\n quantize should overload this method to quantize input.\n This method is effectively hooked to :meth:`forward` of the model.\n\n Parameters\n ----------\n inputs : Tensor\n inputs that needs to be quantized\n config : dict\n the configuration for inputs quantization\n \"\"\"\n raise NotImplementedError(\"Quantizer must overload quantize_input()\")\n\n\n def _instrument_layer(self, layer, config):\n \"\"\"\n Create a wrapper forward function to replace the original one.\n\n Parameters\n ----------\n layer : LayerInfo\n the layer to instrument the mask\n config : dict\n the configuration for quantization\n \"\"\"\n assert layer._forward is None, 'Each model can only be compressed once'\n assert \"quant_types\" in config, 'must provide quant_types in config'\n assert isinstance(config[\"quant_types\"], list), 'quant_types must be list type'\n\n if 'weight' in config[\"quant_types\"]:\n if not _check_weight(layer.module):\n _logger.warning('Module %s does not have parameter \"weight\"', layer.name)\n layer._forward = layer.module.forward\n\n def new_forward(*inputs):\n if 'input' in config[\"quant_types\"]:\n inputs = self.quantize_input(inputs, config=config, op=layer.module, op_type=layer.type, op_name=layer.name)\n\n if 'weight' in config[\"quant_types\"] and _check_weight(layer.module):\n weight = layer.module.weight.data\n new_weight = self.quantize_weight(weight, config, op=layer.module, op_type=layer.type, op_name=layer.name)\n layer.module.weight.data = new_weight\n result = layer._forward(*inputs)\n layer.module.weight.data = weight\n else:\n result = layer._forward(*inputs)\n\n if 'output' in config[\"quant_types\"]:\n result = self.quantize_output(result, config, op=layer.module, op_type=layer.type, op_name=layer.name)\n\n return result\n\n layer.module.forward = new_forward\n\ndef _check_weight(module):\n try:\n return isinstance(module.weight, torch.nn.Parameter) and isinstance(module.weight.data, torch.Tensor)\n except AttributeError:\n return False\n"
] | [
[
"torch.onnx.export",
"torch.Tensor",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gutenzwerg/colour | [
"24c98dc6e8ac041f8a823a9fce83857faaebc367",
"24c98dc6e8ac041f8a823a9fce83857faaebc367",
"24c98dc6e8ac041f8a823a9fce83857faaebc367",
"24c98dc6e8ac041f8a823a9fce83857faaebc367"
] | [
"colour/models/rgb/transfer_functions/tests/test_itur_bt_709.py",
"colour/algebra/tests/test_common.py",
"colour/models/rgb/transfer_functions/tests/test_linear.py",
"colour/temperature/tests/test_ohno2013.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nDefines unit tests for :mod:`colour.models.rgb.transfer_functions.itur_bt_709`\nmodule.\n\"\"\"\n\nimport numpy as np\nimport unittest\n\nfrom colour.models.rgb.transfer_functions import oetf_BT709, oetf_inverse_BT709\nfrom colour.utilities import domain_range_scale, ignore_numpy_errors\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = ['TestOetf_BT709', 'TestOetf_inverse_BT709']\n\n\nclass TestOetf_BT709(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.models.rgb.transfer_functions.itur_bt_709.oetf_BT709`\n definition unit tests methods.\n \"\"\"\n\n def test_oetf_BT709(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.itur_bt_709.\\\noetf_BT709` definition.\n \"\"\"\n\n self.assertAlmostEqual(oetf_BT709(0.0), 0.0, places=7)\n\n self.assertAlmostEqual(oetf_BT709(0.015), 0.067500000000000, places=7)\n\n self.assertAlmostEqual(oetf_BT709(0.18), 0.409007728864150, places=7)\n\n self.assertAlmostEqual(oetf_BT709(1.0), 1.0, places=7)\n\n def test_n_dimensional_oetf_BT709(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.itur_bt_709.\\\noetf_BT709` definition n-dimensional arrays support.\n \"\"\"\n\n L = 0.18\n V = oetf_BT709(L)\n\n L = np.tile(L, 6)\n V = np.tile(V, 6)\n np.testing.assert_almost_equal(oetf_BT709(L), V, decimal=7)\n\n L = np.reshape(L, (2, 3))\n V = np.reshape(V, (2, 3))\n np.testing.assert_almost_equal(oetf_BT709(L), V, decimal=7)\n\n L = np.reshape(L, (2, 3, 1))\n V = np.reshape(V, (2, 3, 1))\n np.testing.assert_almost_equal(oetf_BT709(L), V, decimal=7)\n\n def test_domain_range_scale_oetf_BT709(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.itur_bt_709.\\\noetf_BT709` definition domain and range scale support.\n \"\"\"\n\n L = 0.18\n V = oetf_BT709(L)\n\n d_r = (('reference', 1), (1, 1), (100, 100))\n for scale, factor in d_r:\n with domain_range_scale(scale):\n np.testing.assert_almost_equal(\n oetf_BT709(L * factor), V * factor, decimal=7)\n\n @ignore_numpy_errors\n def test_nan_oetf_BT709(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.itur_bt_709.\\\noetf_BT709` definition nan support.\n \"\"\"\n\n oetf_BT709(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))\n\n\nclass TestOetf_inverse_BT709(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.models.rgb.transfer_functions.itur_bt_709.\\\noetf_inverse_BT709` definition unit tests methods.\n \"\"\"\n\n def test_oetf_inverse_BT709(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.itur_bt_709.\\\noetf_inverse_BT709` definition.\n \"\"\"\n\n self.assertAlmostEqual(oetf_inverse_BT709(0.0), 0.0, places=7)\n\n self.assertAlmostEqual(\n oetf_inverse_BT709(0.067500000000000), 0.015, places=7)\n\n self.assertAlmostEqual(\n oetf_inverse_BT709(0.409007728864150), 0.18, places=7)\n\n self.assertAlmostEqual(oetf_inverse_BT709(1.0), 1.0, places=7)\n\n def test_n_dimensional_oetf_inverse_BT709(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.itur_bt_709.\\\noetf_inverse_BT709` definition n-dimensional arrays support.\n \"\"\"\n\n V = 0.409007728864150\n L = oetf_inverse_BT709(V)\n\n V = np.tile(V, 6)\n L = np.tile(L, 6)\n np.testing.assert_almost_equal(oetf_inverse_BT709(V), L, decimal=7)\n\n V = np.reshape(V, (2, 3))\n L = np.reshape(L, (2, 3))\n np.testing.assert_almost_equal(oetf_inverse_BT709(V), L, decimal=7)\n\n V = np.reshape(V, (2, 3, 1))\n L = np.reshape(L, (2, 3, 1))\n np.testing.assert_almost_equal(oetf_inverse_BT709(V), L, decimal=7)\n\n def test_domain_range_scale_oetf_inverse_BT709(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.itur_bt_709.\\\noetf_inverse_BT709` definition domain and range scale support.\n \"\"\"\n\n V = 0.409007728864150\n L = oetf_inverse_BT709(V)\n\n d_r = (('reference', 1), (1, 1), (100, 100))\n for scale, factor in d_r:\n with domain_range_scale(scale):\n np.testing.assert_almost_equal(\n oetf_inverse_BT709(V * factor), L * factor, decimal=7)\n\n @ignore_numpy_errors\n def test_nan_oetf_inverse_BT709(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.itur_bt_709.\\\noetf_inverse_BT709` definition nan support.\n \"\"\"\n\n oetf_inverse_BT709(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nDefines unit tests for :mod:`colour.algebra.common` module.\n\"\"\"\n\nimport numpy as np\nimport unittest\n\nfrom colour.algebra import (is_spow_enabled, set_spow_enable, spow_enable,\n spow, smoothstep_function)\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = [\n 'TestIsSpowEnabled', 'TestSetSpowEnabled', 'TestSpowEnable', 'TestSpow',\n 'TestSmoothstepFunction'\n]\n\n\nclass TestIsSpowEnabled(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.algebra.common.is_spow_enabled` definition unit\n tests methods.\n \"\"\"\n\n def test_is_spow_enabled(self):\n \"\"\"\n Tests :func:`colour.algebra.common.is_spow_enabled` definition.\n \"\"\"\n\n with spow_enable(True):\n self.assertTrue(is_spow_enabled())\n\n with spow_enable(False):\n self.assertFalse(is_spow_enabled())\n\n\nclass TestSetSpowEnabled(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.algebra.common.set_spow_enable` definition unit\n tests methods.\n \"\"\"\n\n def test_set_spow_enable(self):\n \"\"\"\n Tests :func:`colour.algebra.common.set_spow_enable` definition.\n \"\"\"\n\n with spow_enable(is_spow_enabled()):\n set_spow_enable(True)\n self.assertTrue(is_spow_enabled())\n\n with spow_enable(is_spow_enabled()):\n set_spow_enable(False)\n self.assertFalse(is_spow_enabled())\n\n\nclass TestSpowEnable(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.algebra.common.spow_enable` definition unit\n tests methods.\n \"\"\"\n\n def test_spow_enable(self):\n \"\"\"\n Tests :func:`colour.algebra.common.spow_enable` definition.\n \"\"\"\n\n with spow_enable(True):\n self.assertTrue(is_spow_enabled())\n\n with spow_enable(False):\n self.assertFalse(is_spow_enabled())\n\n @spow_enable(True)\n def fn_a():\n \"\"\"\n :func:`spow_enable` unit tests :func:`fn_a` definition.\n \"\"\"\n\n self.assertTrue(is_spow_enabled())\n\n fn_a()\n\n @spow_enable(False)\n def fn_b():\n \"\"\"\n :func:`spow_enable` unit tests :func:`fn_b` definition.\n \"\"\"\n\n self.assertFalse(is_spow_enabled())\n\n fn_b()\n\n\nclass TestSpow(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.algebra.common.spow` definition unit\n tests methods.\n \"\"\"\n\n def test_spow(self):\n \"\"\"\n Tests :func:`colour.algebra.common.spow` definition.\n \"\"\"\n\n self.assertEqual(spow(2, 2), 4.0)\n\n self.assertEqual(spow(-2, 2), -4.0)\n\n np.testing.assert_almost_equal(\n spow([2, -2, -2, 0], [2, 2, 0.15, 0]),\n np.array([4.00000000, -4.00000000, -1.10956947, 0.00000000]),\n decimal=7)\n\n with spow_enable(True):\n np.testing.assert_almost_equal(\n spow(-2, 0.15), -1.10956947, decimal=7)\n\n with spow_enable(False):\n np.testing.assert_equal(spow(-2, 0.15), np.nan)\n\n\nclass TestSmoothstepFunction(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.algebra.common.smoothstep_function` definition unit\n tests methods.\n \"\"\"\n\n def test_smoothstep_function(self):\n \"\"\"\n Tests :func:`colour.algebra.common.smoothstep_function` definition.\n \"\"\"\n\n self.assertEqual(smoothstep_function(0.5), 0.5)\n self.assertEqual(smoothstep_function(0.25), 0.15625)\n self.assertEqual(smoothstep_function(0.75), 0.84375)\n\n x = np.linspace(-2, 2, 5)\n np.testing.assert_almost_equal(\n smoothstep_function(x),\n np.array([28.00000, 5.00000, 0.00000, 1.00000, -4.00000]))\n np.testing.assert_almost_equal(\n smoothstep_function(x, -2, 2, clip=True),\n np.array([0.00000, 0.15625, 0.50000, 0.84375, 1.00000]))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nDefines unit tests for :mod:`colour.models.rgb.transfer_functions.linear`\nmodule.\n\"\"\"\n\nimport numpy as np\nimport unittest\n\nfrom colour.models.rgb.transfer_functions import linear_function\nfrom colour.utilities import ignore_numpy_errors\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = ['TestLinearFunction']\n\n\nclass TestLinearFunction(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.models.rgb.transfer_functions.linear.\\\nlinear_function` definition unit tests methods.\n \"\"\"\n\n def test_linear_function(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.linear.\\\nlinear_function` definition.\n \"\"\"\n\n self.assertEqual(linear_function(0.0), 0.0)\n\n self.assertEqual(linear_function(0.18), 0.18)\n\n self.assertEqual(linear_function(1.0), 1.0)\n\n def test_n_dimensional_linear_function(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.linear.\\\nlinear_function` definition n-dimensional arrays support.\n \"\"\"\n\n a = 0.18\n a_p = linear_function(a)\n\n a = np.tile(a, 6)\n a_p = np.tile(a_p, 6)\n np.testing.assert_almost_equal(linear_function(a), a_p, decimal=7)\n\n a = np.reshape(a, (2, 3))\n a_p = np.reshape(a_p, (2, 3))\n np.testing.assert_almost_equal(linear_function(a), a_p, decimal=7)\n\n a = np.reshape(a, (2, 3, 1))\n a_p = np.reshape(a_p, (2, 3, 1))\n np.testing.assert_almost_equal(linear_function(a), a_p, decimal=7)\n\n @ignore_numpy_errors\n def test_nan_linear_function(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.linear.\\\nlinear_function` definition nan support.\n \"\"\"\n\n cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]\n\n for case in cases:\n linear_function(case)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nDefines unit tests for :mod:`colour.temperature.ohno2013` module.\n\"\"\"\n\nimport numpy as np\nimport unittest\nfrom itertools import permutations\n\nfrom colour.colorimetry import MSDS_CMFS_STANDARD_OBSERVER\nfrom colour.temperature import CCT_to_uv_Ohno2013, uv_to_CCT_Ohno2013\nfrom colour.temperature.ohno2013 import (\n planckian_table, planckian_table_minimal_distance_index)\nfrom colour.utilities import ignore_numpy_errors\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = [\n 'TestPlanckianTable', 'TestPlanckianTableMinimalDistanceIndex',\n 'Testuv_to_CCT_Ohno2013', 'TestCCT_to_uv_Ohno2013'\n]\n\nPLANCKIAN_TABLE = np.array([\n [1000.00000000, 0.44796288, 0.35462962, 0.25373557],\n [1001.11111111, 0.44770303, 0.35465214, 0.25348315],\n [1002.22222222, 0.44744348, 0.35467461, 0.25323104],\n [1003.33333333, 0.44718423, 0.35469704, 0.25297924],\n [1004.44444444, 0.44692529, 0.35471942, 0.25272774],\n [1005.55555556, 0.44666666, 0.35474175, 0.25247656],\n [1006.66666667, 0.44640833, 0.35476404, 0.25222569],\n [1007.77777778, 0.44615030, 0.35478628, 0.25197512],\n [1008.88888889, 0.44589258, 0.35480848, 0.25172487],\n [1010.00000000, 0.44563516, 0.35483063, 0.25147492],\n])\n\n\nclass TestPlanckianTable(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.temperature.ohno2013.planckian_table` definition\n units tests methods.\n \"\"\"\n\n def test_planckian_table(self):\n \"\"\"\n Tests :func:`colour.temperature.ohno2013.planckian_table` definition.\n \"\"\"\n\n cmfs = MSDS_CMFS_STANDARD_OBSERVER[\n 'CIE 1931 2 Degree Standard Observer']\n\n np.testing.assert_almost_equal(\n [(x.Ti, x.ui, x.vi, x.di) for x in planckian_table(\n np.array([0.1978, 0.3122]), cmfs, 1000, 1010, 10)],\n PLANCKIAN_TABLE)\n\n\nclass TestPlanckianTableMinimalDistanceIndex(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.temperature.ohno2013.\\\nplanckian_table_minimal_distance_index` definition unit tests methods.\n \"\"\"\n\n def test_planckian_table_minimal_distance_index(self):\n \"\"\"\n Tests :func:`colour.temperature.ohno2013.\\\nplanckian_table_minimal_distance_index` definition.\n \"\"\"\n\n cmfs = MSDS_CMFS_STANDARD_OBSERVER[\n 'CIE 1931 2 Degree Standard Observer']\n self.assertEqual(\n planckian_table_minimal_distance_index(\n planckian_table(\n np.array([0.1978, 0.3122]), cmfs, 1000, 1010, 10)), 9)\n\n\nclass Testuv_to_CCT_Ohno2013(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.temperature.ohno2013.uv_to_CCT_Ohno2013` definition\n units tests methods.\n \"\"\"\n\n def test_uv_to_CCT_Ohno2013(self):\n \"\"\"\n Tests :func:`colour.temperature.ohno2013.uv_to_CCT_Ohno2013`\n definition.\n \"\"\"\n\n cmfs = MSDS_CMFS_STANDARD_OBSERVER[\n 'CIE 1931 2 Degree Standard Observer']\n np.testing.assert_almost_equal(\n uv_to_CCT_Ohno2013(np.array([0.1978, 0.3122]), cmfs),\n np.array([6507.47380460, 0.00322335]),\n decimal=7)\n\n np.testing.assert_almost_equal(\n uv_to_CCT_Ohno2013(np.array([0.4328, 0.2883]), cmfs),\n np.array([1041.68315360, -0.06737802]),\n decimal=7)\n\n np.testing.assert_almost_equal(\n uv_to_CCT_Ohno2013(np.array([0.2927, 0.2722]), cmfs, iterations=4),\n np.array([2452.15316417, -0.08437064]),\n decimal=7)\n\n def test_n_dimensional_uv_to_CCT_Ohno2013(self):\n \"\"\"\n Tests :func:`colour.temperature.ohno2013.uv_to_CCT_Ohno2013` definition\n n-dimensional arrays support.\n \"\"\"\n\n uv = np.array([0.1978, 0.3122])\n CCT_D_uv = uv_to_CCT_Ohno2013(uv)\n\n uv = np.tile(uv, (6, 1))\n CCT_D_uv = np.tile(CCT_D_uv, (6, 1))\n np.testing.assert_almost_equal(\n uv_to_CCT_Ohno2013(uv), CCT_D_uv, decimal=7)\n\n uv = np.reshape(uv, (2, 3, 2))\n CCT_D_uv = np.reshape(CCT_D_uv, (2, 3, 2))\n np.testing.assert_almost_equal(\n uv_to_CCT_Ohno2013(uv), CCT_D_uv, decimal=7)\n\n @ignore_numpy_errors\n def test_nan_uv_to_CCT_Ohno2013(self):\n \"\"\"\n Tests :func:`colour.temperature.ohno2013.uv_to_CCT_Ohno2013` definition\n nan support.\n \"\"\"\n\n cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]\n cases = set(permutations(cases * 3, r=2))\n for case in cases:\n uv = np.array(case)\n uv_to_CCT_Ohno2013(uv)\n\n\nclass TestCCT_to_uv_Ohno2013(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.temperature.ohno2013.CCT_to_uv_Ohno2013` definition\n units tests methods.\n \"\"\"\n\n def test_CCT_to_uv_Ohno2013(self):\n \"\"\"\n Tests :func:`colour.temperature.ohno2013.CCT_to_uv_Ohno2013`\n definition.\n \"\"\"\n\n cmfs = MSDS_CMFS_STANDARD_OBSERVER[\n 'CIE 1931 2 Degree Standard Observer']\n np.testing.assert_almost_equal(\n CCT_to_uv_Ohno2013(np.array([6507.47380460, 0.00322335]), cmfs),\n np.array([0.19779997, 0.31219997]),\n decimal=7)\n\n np.testing.assert_almost_equal(\n CCT_to_uv_Ohno2013(np.array([1041.68315360, -0.06737802]), cmfs),\n np.array([0.43279885, 0.28830013]),\n decimal=7)\n\n np.testing.assert_almost_equal(\n CCT_to_uv_Ohno2013(np.array([2452.15316417, -0.08437064]), cmfs),\n np.array([0.29247364, 0.27215157]),\n decimal=7)\n\n def test_n_dimensional_CCT_to_uv_Ohno2013(self):\n \"\"\"\n Tests :func:`colour.temperature.ohno2013.CCT_to_uv_Ohno2013` definition\n n-dimensional arrays support.\n \"\"\"\n\n cmfs = MSDS_CMFS_STANDARD_OBSERVER[\n 'CIE 1931 2 Degree Standard Observer']\n CCT_D_uv = np.array([6507.47380460, 0.00322335])\n uv = CCT_to_uv_Ohno2013(CCT_D_uv, cmfs)\n\n CCT_D_uv = np.tile(CCT_D_uv, (6, 1))\n uv = np.tile(uv, (6, 1))\n np.testing.assert_almost_equal(\n CCT_to_uv_Ohno2013(CCT_D_uv, cmfs), uv, decimal=7)\n\n CCT_D_uv = np.reshape(CCT_D_uv, (2, 3, 2))\n uv = np.reshape(uv, (2, 3, 2))\n np.testing.assert_almost_equal(\n CCT_to_uv_Ohno2013(CCT_D_uv, cmfs), uv, decimal=7)\n\n @ignore_numpy_errors\n def test_nan_CCT_to_uv_Ohno2013(self):\n \"\"\"\n Tests :func:`colour.temperature.ohno2013.CCT_to_uv_Ohno2013` definition\n nan support.\n \"\"\"\n\n cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]\n cases = set(permutations(cases * 3, r=2))\n for case in cases:\n CCT_D_uv = np.array(case)\n CCT_to_uv_Ohno2013(CCT_D_uv)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.reshape",
"numpy.array",
"numpy.tile"
],
[
"numpy.array",
"numpy.linspace"
],
[
"numpy.reshape",
"numpy.tile"
],
[
"numpy.reshape",
"numpy.array",
"numpy.tile"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wenwei8268/Alink | [
"c00702538c95a32403985ebd344eb6aeb81749a7",
"c00702538c95a32403985ebd344eb6aeb81749a7",
"c00702538c95a32403985ebd344eb6aeb81749a7",
"c00702538c95a32403985ebd344eb6aeb81749a7",
"c00702538c95a32403985ebd344eb6aeb81749a7",
"c00702538c95a32403985ebd344eb6aeb81749a7",
"c00702538c95a32403985ebd344eb6aeb81749a7",
"c00702538c95a32403985ebd344eb6aeb81749a7",
"c00702538c95a32403985ebd344eb6aeb81749a7",
"c00702538c95a32403985ebd344eb6aeb81749a7",
"c00702538c95a32403985ebd344eb6aeb81749a7",
"c00702538c95a32403985ebd344eb6aeb81749a7",
"c00702538c95a32403985ebd344eb6aeb81749a7"
] | [
"python/src/main/python/pyalink/alink/tests/examples/from_docs/test_vectorminmaxscalertrainbatchop.py",
"python/src/main/python/pyalink/alink/tests/examples/from_docs/test_usercfusersperitemrecommender.py",
"python/src/main/python/pyalink/alink/tests/examples/from_docs/test_stringapproxnearestneighborpredictbatchop.py",
"python/src/main/python/pyalink/alink/tests/examples/from_docs/test_alssimilarusersrecommstreamop.py",
"python/src/main/python/pyalink/alink/tests/examples/from_docs/test_gmmtrainbatchop.py",
"python/src/main/python/pyalink/alink/tests/examples/from_docs/test_vectorinteractionbatchop.py",
"python/src/main/python/pyalink/alink/tests/common/types/file_system/test_ak_batch.py",
"python/src/main/python/pyalink/alink/tests/examples/from_docs/test_ksigmaoutlierbatchop.py",
"python/src/main/python/pyalink/alink/tests/examples/pipeline/test_pca.py",
"python/src/main/python/pyalink/alink/tests/examples/from_docs/test_textapproxnearestneighborpredictbatchop.py",
"python/src/main/python/pyalink/alink/tests/examples/from_docs/test_crossfeaturepredictstreamop.py",
"python/src/main/python/pyalink/alink/tests/examples/from_docs/test_vectorsizehintstreamop.py",
"python/src/main/python/pyalink/alink/tests/examples/operator/stream/test_standard_scaler.py"
] | [
"import unittest\nfrom pyalink.alink import *\nimport numpy as np\nimport pandas as pd\nclass TestVectorMinMaxScalerTrainBatchOp(unittest.TestCase):\n def test_vectorminmaxscalertrainbatchop(self):\n\n df = pd.DataFrame([\n [\"a\", \"10.0, 100\"],\n [\"b\", \"-2.5, 9\"],\n [\"c\", \"100.2, 1\"],\n [\"d\", \"-99.9, 100\"],\n [\"a\", \"1.4, 1\"],\n [\"b\", \"-2.2, 9\"],\n [\"c\", \"100.9, 1\"]\n ])\n \n data = BatchOperator.fromDataframe(df, schemaStr=\"col string, vec string\")\n \n trainOp = VectorMinMaxScalerTrainBatchOp()\\\n .setSelectedCol(\"vec\")\n model = trainOp.linkFrom(data)\n \n batchPredictOp = VectorMinMaxScalerPredictBatchOp()\n batchPredictOp.linkFrom(model, data).print()\n \n pass",
"import unittest\nfrom pyalink.alink import *\nimport numpy as np\nimport pandas as pd\nclass TestUserCfUsersPerItemRecommender(unittest.TestCase):\n def test_usercfusersperitemrecommender(self):\n\n df_data = pd.DataFrame([\n [1, 1, 0.6],\n [2, 2, 0.8],\n [2, 3, 0.6],\n [4, 1, 0.6],\n [4, 2, 0.3],\n [4, 3, 0.4],\n ])\n \n data = BatchOperator.fromDataframe(df_data, schemaStr='user bigint, item bigint, rating double')\n \n model = UserCfTrainBatchOp()\\\n .setUserCol(\"user\")\\\n .setItemCol(\"item\")\\\n .setRateCol(\"rating\").linkFrom(data);\n \n predictor = UserCfUsersPerItemRecommender()\\\n .setItemCol(\"item\")\\\n .setReservedCols([\"item\"])\\\n .setK(1)\\\n .setRecommCol(\"prediction_result\")\\\n .setModelData(model)\n \n predictor.transform(data).print()\n pass",
"import unittest\nfrom pyalink.alink import *\nimport numpy as np\nimport pandas as pd\nclass TestStringApproxNearestNeighborPredictBatchOp(unittest.TestCase):\n def test_stringapproxnearestneighborpredictbatchop(self):\n\n df = pd.DataFrame([\n [0, \"abcde\", \"aabce\"],\n [1, \"aacedw\", \"aabbed\"],\n [2, \"cdefa\", \"bbcefa\"],\n [3, \"bdefh\", \"ddeac\"],\n [4, \"acedm\", \"aeefbc\"]\n ])\n \n inOp = BatchOperator.fromDataframe(df, schemaStr='id long, text1 string, text2 string')\n \n train = StringApproxNearestNeighborTrainBatchOp().setIdCol(\"id\").setSelectedCol(\"text1\").setMetric(\"SIMHASH_HAMMING_SIM\").linkFrom(inOp)\n predict = StringApproxNearestNeighborPredictBatchOp().setSelectedCol(\"text2\").setTopN(3).linkFrom(train, inOp)\n predict.print()\n pass",
"import unittest\nfrom pyalink.alink import *\nimport numpy as np\nimport pandas as pd\nclass TestAlsSimilarUsersRecommStreamOp(unittest.TestCase):\n def test_alssimilarusersrecommstreamop(self):\n\n df_data = pd.DataFrame([\n [1, 1, 0.6],\n [2, 2, 0.8],\n [2, 3, 0.6],\n [4, 1, 0.6],\n [4, 2, 0.3],\n [4, 3, 0.4],\n ])\n \n data = BatchOperator.fromDataframe(df_data, schemaStr='user bigint, item bigint, rating double')\n sdata = StreamOperator.fromDataframe(df_data, schemaStr='user bigint, item bigint, rating double')\n \n als = AlsTrainBatchOp().setUserCol(\"user\").setItemCol(\"item\").setRateCol(\"rating\") \\\n .setNumIter(10).setRank(10).setLambda(0.01)\n \n model = als.linkFrom(data)\n predictor = AlsSimilarUsersRecommStreamOp(model) \\\n .setUserCol(\"user\").setRecommCol(\"rec\").setK(1).setReservedCols([\"user\"])\n \n predictor.linkFrom(sdata).print();\n StreamOperator.execute()\n pass",
"import unittest\nfrom pyalink.alink import *\nimport numpy as np\nimport pandas as pd\nclass TestGmmTrainBatchOp(unittest.TestCase):\n def test_gmmtrainbatchop(self):\n\n df_data = pd.DataFrame([\n [\"-0.6264538 0.1836433\"],\n [\"-0.8356286 1.5952808\"],\n [\"0.3295078 -0.8204684\"],\n [\"0.4874291 0.7383247\"],\n [\"0.5757814 -0.3053884\"],\n [\"1.5117812 0.3898432\"],\n [\"-0.6212406 -2.2146999\"],\n [\"11.1249309 9.9550664\"],\n [\"9.9838097 10.9438362\"],\n [\"10.8212212 10.5939013\"],\n [\"10.9189774 10.7821363\"],\n [\"10.0745650 8.0106483\"],\n [\"10.6198257 9.9438713\"],\n [\"9.8442045 8.5292476\"],\n [\"9.5218499 10.4179416\"],\n ])\n \n data = BatchOperator.fromDataframe(df_data, schemaStr='features string')\n dataStream = StreamOperator.fromDataframe(df_data, schemaStr='features string')\n \n gmm = GmmTrainBatchOp() \\\n .setVectorCol(\"features\") \\\n .setEpsilon(0.)\n \n model = gmm.linkFrom(data)\n \n predictor = GmmPredictBatchOp() \\\n .setPredictionCol(\"cluster_id\") \\\n .setVectorCol(\"features\") \\\n .setPredictionDetailCol(\"cluster_detail\")\n \n predictor.linkFrom(model, data).print()\n \n predictorStream = GmmPredictStreamOp(model) \\\n .setPredictionCol(\"cluster_id\") \\\n .setVectorCol(\"features\") \\\n .setPredictionDetailCol(\"cluster_detail\")\n \n predictorStream.linkFrom(dataStream).print()\n \n StreamOperator.execute()\n pass",
"import unittest\nfrom pyalink.alink import *\nimport numpy as np\nimport pandas as pd\nclass TestVectorInteractionBatchOp(unittest.TestCase):\n def test_vectorinteractionbatchop(self):\n\n df = pd.DataFrame([\n [\"$8$1:3,2:4,4:7\", \"$8$1:3,2:4,4:7\"],\n [\"$8$0:3,5:5\", \"$8$1:2,2:4,4:7\"],\n [\"$8$2:4,4:5\", \"$5$1:3,2:3,4:7\"]\n ])\n \n data = BatchOperator.fromDataframe(df, schemaStr=\"vec1 string, vec2 string\")\n vecInter = VectorInteractionBatchOp().setSelectedCols([\"vec1\",\"vec2\"]).setOutputCol(\"vec_product\")\n vecInter.linkFrom(data).print()\n pass",
"import unittest\n\nimport pytest\n\nfrom pyalink.alink import *\n\n\ndef print_value_and_type(v):\n print(type(v), v)\n\n\nclass TestAkBatch(unittest.TestCase):\n\n def setUp(self) -> None:\n self.lfs = LocalFileSystem()\n self.hfs = HadoopFileSystem(\"2.8.3\", \"hdfs://xxx:9000\")\n self.ofs = OssFileSystem(\"3.4.1\", \"xxx\", \"xxx\", \"xxx\", \"xxx\")\n\n @pytest.mark.skip()\n def test_batch(self):\n import numpy as np\n import pandas as pd\n arr = np.array([\n [1, 2, 3],\n [1, 2, 3],\n [3, 4, 5]\n ])\n df = pd.DataFrame(arr)\n source = BatchOperator.fromDataframe(df, \"uid int, iid int, label int\")\n\n for fs in [self.lfs, self.hfs, self.ofs]:\n if fs == self.lfs:\n filepath = FilePath(\"/tmp/test_alink_file_sink_str\", fs)\n else:\n filepath = FilePath(\"tmp/test_alink_file_sink_str\", fs)\n AkSinkBatchOp() \\\n .setFilePath(filepath) \\\n .setOverwriteSink(True) \\\n .setNumFiles(3) \\\n .linkFrom(source)\n BatchOperator.execute()\n\n AkSourceBatchOp() \\\n .setFilePath(filepath) \\\n .print()\n",
"import unittest\nfrom pyalink.alink import *\nimport numpy as np\nimport pandas as pd\nclass TestKSigmaOutlierBatchOp(unittest.TestCase):\n def test_ksigmaoutlierbatchop(self):\n\n import pandas as pd\n df = pd.DataFrame([\n [0.73, 0],\n [0.24, 0],\n [0.63, 0],\n [0.55, 0],\n [0.73, 0],\n [0.41, 0]\n ])\n \n dataOp = BatchOperator.fromDataframe(df, schemaStr='val double, label int')\n \n outlierOp = KSigmaOutlierBatchOp()\\\n \t\t\t.setFeatureCol(\"val\")\\\n \t\t\t.setOutlierThreshold(3.0)\\\n \t\t\t.setPredictionCol(\"pred\")\\\n \t\t\t.setPredictionDetailCol(\"pred_detail\")\n \n evalOp = EvalOutlierBatchOp()\\\n \t\t\t.setLabelCol(\"label\")\\\n \t\t\t.setPredictionDetailCol(\"pred_detail\")\\\n \t\t\t.setOutlierValueStrings([\"1\"]);\n \n metrics = dataOp\\\n \t\t\t.link(outlierOp)\\\n \t\t\t.link(evalOp)\\\n \t\t\t.collectMetrics()\n \n print(metrics)\n pass",
"import unittest\n\nimport numpy as np\nimport pandas as pd\n\nfrom pyalink.alink import *\n\n\nclass TestPinyi(unittest.TestCase):\n\n def test_pca(self):\n data = np.array([\n [\"1.0 2.0 4.0\", \"a\"],\n [\"-1.0 -3.0 4.0\", \"a\"],\n [\"4.0 2.0 3.0\", \"b\"],\n [\"3.4 5.1 5.0\", \"b\"]\n ])\n df = pd.DataFrame({\"vec\": data[:, 0], \"lable\": data[:, 1]})\n\n source = dataframeToOperator(df, schemaStr='vec string, label string', op_type='batch')\n\n pca = PCA() \\\n .setK(2) \\\n .setCalculationType(\"CORR\") \\\n .setPredictionCol(\"pred\") \\\n .setReservedCols([\"label\"]) \\\n .setVectorCol(\"vec\")\n\n model = pca.fit(source)\n model.transform(source).print()\n\n def test_pca2(self):\n data = np.array([\n [0.0, 0.0, 0.0],\n [0.1, 0.2, 0.1],\n [0.2, 0.2, 0.8],\n [9.0, 9.5, 9.7],\n [9.1, 9.1, 9.6],\n [9.2, 9.3, 9.9]\n ])\n\n df = pd.DataFrame({\"x1\": data[:, 0], \"x2\": data[:, 1], \"x3\": data[:, 2]})\n\n # batch source\n inOp = dataframeToOperator(df, schemaStr='x1 double, x2 double, x3 double', op_type='batch')\n\n pca = PCA() \\\n .setK(2) \\\n .setSelectedCols([\"x1\", \"x2\", \"x3\"]) \\\n .setPredictionCol(\"pred\")\n\n # train\n model = pca.fit(inOp)\n\n # batch predict\n model.transform(inOp).print()\n\n # stream predict\n inStreamOp = dataframeToOperator(df, schemaStr='x1 double, x2 double, x3 double', op_type='stream')\n\n model.transform(inStreamOp).print()\n\n StreamOperator.execute()\n",
"import unittest\nfrom pyalink.alink import *\nimport numpy as np\nimport pandas as pd\nclass TestTextApproxNearestNeighborPredictBatchOp(unittest.TestCase):\n def test_textapproxnearestneighborpredictbatchop(self):\n\n df = pd.DataFrame([\n [0, \"a b c d e\", \"a a b c e\"],\n [1, \"a a c e d w\", \"a a b b e d\"],\n [2, \"c d e f a\", \"b b c e f a\"],\n [3, \"b d e f h\", \"d d e a c\"],\n [4, \"a c e d m\", \"a e e f b c\"]\n ])\n \n inOp = BatchOperator.fromDataframe(df, schemaStr='id long, text1 string, text2 string')\n \n train = TextApproxNearestNeighborTrainBatchOp().setIdCol(\"id\").setSelectedCol(\"text1\").setMetric(\"SIMHASH_HAMMING_SIM\").linkFrom(inOp)\n predict = TextApproxNearestNeighborPredictBatchOp().setSelectedCol(\"text2\").setTopN(3).linkFrom(train, inOp)\n predict.print()\n pass",
"import unittest\nfrom pyalink.alink import *\nimport numpy as np\nimport pandas as pd\nclass TestCrossFeaturePredictStreamOp(unittest.TestCase):\n def test_crossfeaturepredictstreamop(self):\n\n df = pd.DataFrame([\n [\"1.0\", \"1.0\", 1.0, 1],\n [\"1.0\", \"1.0\", 0.0, 1],\n [\"1.0\", \"0.0\", 1.0, 1],\n [\"1.0\", \"0.0\", 1.0, 1],\n [\"2.0\", \"3.0\", None, 0],\n [\"2.0\", \"3.0\", 1.0, 0],\n [\"0.0\", \"1.0\", 2.0, 0],\n [\"0.0\", \"1.0\", 1.0, 0]])\n batchData = BatchOperator.fromDataframe(df, schemaStr=\"f0 string, f1 string, f2 double, label bigint\")\n streamData = StreamOperator.fromDataframe(df, schemaStr=\"f0 string, f1 string, f2 double, label bigint\")\n train = CrossFeatureTrainBatchOp().setSelectedCols(['f0','f1','f2']).linkFrom(batchData)\n CrossFeaturePredictStreamOp(train).setOutputCol(\"cross\").linkFrom(streamData).print()\n StreamOperator.execute()\n pass",
"import unittest\nfrom pyalink.alink import *\nimport numpy as np\nimport pandas as pd\nclass TestVectorSizeHintStreamOp(unittest.TestCase):\n def test_vectorsizehintstreamop(self):\n\n df = pd.DataFrame([\n [\"$8$1:3,2:4,4:7\"],\n [\"$8$2:4,4:5\"]\n ])\n data = StreamOperator.fromDataframe(df, schemaStr=\"vec string\")\n VectorSizeHintStreamOp().setSelectedCol(\"vec\").setOutputCol(\"vec_hint\").setHandleInvalidMethod(\"Skip\").setSize(3).linkFrom(data).print()\n StreamOperator.execute()\n pass",
"import unittest\n\nimport numpy as np\nimport pandas as pd\n\nfrom pyalink.alink import *\n\n\nclass TestPinyi(unittest.TestCase):\n\n def run_standard_scaler_op(self):\n data = np.array([\n [\"a\", 10.0, 100],\n [\"b\", -2.5, 9],\n [\"c\", 100.2, 1],\n [\"d\", -99.9, 100],\n [\"a\", 1.4, 1],\n [\"b\", -2.2, 9],\n [\"c\", 100.9, 1]\n ])\n\n colnames = [\"col1\", \"col2\", \"col3\"]\n selectedColNames = [\"col2\", \"col3\"]\n\n df = pd.DataFrame({\"col1\": data[:, 0], \"col2\": data[:, 1], \"col3\": data[:, 2]})\n inOp = dataframeToOperator(df, schemaStr='col1 string, col2 double, col3 long', op_type='batch')\n\n # train\n trainOp = StandardScalerTrainBatchOp() \\\n .setSelectedCols(selectedColNames)\n\n trainOp.linkFrom(inOp)\n\n # batch predict\n predictOp = StandardScalerPredictBatchOp()\n predictOp.linkFrom(trainOp, inOp).print()\n\n # stream predict\n sinOp = dataframeToOperator(df, schemaStr='col1 string, col2 double, col3 long', op_type='stream')\n\n predictStreamOp = StandardScalerPredictStreamOp(trainOp)\n predictStreamOp.linkFrom(sinOp).print()\n\n StreamOperator.execute()\n"
] | [
[
"pandas.DataFrame"
],
[
"pandas.DataFrame"
],
[
"pandas.DataFrame"
],
[
"pandas.DataFrame"
],
[
"pandas.DataFrame"
],
[
"pandas.DataFrame"
],
[
"numpy.array",
"pandas.DataFrame"
],
[
"pandas.DataFrame"
],
[
"numpy.array",
"pandas.DataFrame"
],
[
"pandas.DataFrame"
],
[
"pandas.DataFrame"
],
[
"pandas.DataFrame"
],
[
"numpy.array",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
COSIMA/esmgrids | [
"130c8d00774c624c196fc2ca15f5cad78c4cdbc1"
] | [
"esmgrids/util.py"
] | [
"\nimport numpy as np\nimport pyproj\nfrom shapely.geometry import shape\n\nproj_str = '+proj=laea +lat_0={} +lon_0={} +ellps=sphere'\n\n\ndef calc_area_of_polygons(clons, clats):\n \"\"\"\n Calculate the area of lat-lon polygons.\n\n We project sphere onto a flat surface using an equal area projection\n and then calculate the area of flat polygon.\n\n This is slow we should do some caching to avoid recomputing.\n \"\"\"\n\n areas = np.zeros(clons.shape[1:])\n areas[:] = np.NAN\n\n for j in range(areas.shape[0]):\n for i in range(areas.shape[1]):\n\n lats = clats[:, j, i]\n lons = clons[:, j, i]\n\n lat_centre = lats[0] + abs(lats[2] - lats[1]) / 2\n lon_centre = lons[0] + abs(lons[1] - lons[0]) / 2\n\n pa = pyproj.Proj(proj_str.format(lat_centre, lon_centre))\n x, y = pa(lons, lats)\n\n cop = {\"type\": \"Polygon\", \"coordinates\": [zip(x, y)]}\n areas[j, i] = shape(cop).area\n\n assert(np.sum(areas) is not np.NAN)\n assert(np.min(areas) > 0)\n\n return areas\n"
] | [
[
"numpy.zeros",
"numpy.sum",
"numpy.min"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
arp95/pytorch-lightning | [
"649f3f142fe53103e63bf9c6a5161152ec216768"
] | [
"tests/base/model_test_steps.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport random\nfrom abc import ABC\nfrom collections import OrderedDict\n\nimport torch\n\nfrom pytorch_lightning.core.step_result import EvalResult\n\n\nclass TestStepVariations(ABC):\n \"\"\"\n Houses all variations of test steps\n \"\"\"\n\n def test_step(self, batch, batch_idx, *args, **kwargs):\n \"\"\"\n Default, baseline test_step\n :param batch:\n :return:\n \"\"\"\n self.test_step_called = True\n\n x, y = batch\n x = x.view(x.size(0), -1)\n y_hat = self(x)\n\n loss_test = self.loss(y, y_hat)\n\n # acc\n labels_hat = torch.argmax(y_hat, dim=1)\n test_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)\n test_acc = torch.tensor(test_acc)\n\n test_acc = test_acc.type_as(x)\n\n # alternate possible outputs to test\n if batch_idx % 1 == 0:\n output = OrderedDict({'test_loss': loss_test, 'test_acc': test_acc})\n return output\n if batch_idx % 2 == 0:\n return test_acc\n\n if batch_idx % 3 == 0:\n output = OrderedDict({'test_loss': loss_test,\n 'test_acc': test_acc,\n 'test_dic': {'test_loss_a': loss_test}})\n return output\n\n def test_step__result_obj(self, batch, batch_idx, *args, **kwargs):\n \"\"\"\n Default, baseline test_step\n :param batch:\n :return:\n \"\"\"\n x, y = batch\n x = x.view(x.size(0), -1)\n y_hat = self(x)\n\n loss_test = self.loss(y, y_hat)\n\n # acc\n labels_hat = torch.argmax(y_hat, dim=1)\n test_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)\n test_acc = torch.tensor(test_acc)\n\n test_acc = test_acc.type_as(x)\n\n result = EvalResult()\n # alternate possible outputs to test\n if batch_idx % 1 == 0:\n result.log_dict({'test_loss': loss_test, 'test_acc': test_acc})\n return result\n if batch_idx % 2 == 0:\n return test_acc\n\n if batch_idx % 3 == 0:\n result.log_dict({'test_loss': loss_test, 'test_acc': test_acc})\n result.test_dic = {'test_loss_a': loss_test}\n return result\n\n def test_step__multiple_dataloaders(self, batch, batch_idx, dataloader_idx, **kwargs):\n \"\"\"\n Default, baseline test_step\n :param batch:\n :return:\n \"\"\"\n x, y = batch\n x = x.view(x.size(0), -1)\n y_hat = self(x)\n\n loss_test = self.loss(y, y_hat)\n\n # acc\n labels_hat = torch.argmax(y_hat, dim=1)\n test_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)\n test_acc = torch.tensor(test_acc)\n\n test_acc = test_acc.type_as(x)\n\n # alternate possible outputs to test\n if batch_idx % 1 == 0:\n output = OrderedDict({'test_loss': loss_test, 'test_acc': test_acc})\n return output\n if batch_idx % 2 == 0:\n return test_acc\n\n if batch_idx % 3 == 0:\n output = OrderedDict({\n 'test_loss': loss_test,\n 'test_acc': test_acc,\n 'test_dic': {'test_loss_a': loss_test}\n })\n return output\n if batch_idx % 5 == 0:\n output = OrderedDict({f'test_loss_{dataloader_idx}': loss_test, f'test_acc_{dataloader_idx}': test_acc})\n return output\n\n def test_step__empty(self, batch, batch_idx, *args, **kwargs):\n return {}\n\n def test_step__result_preds(self, batch, batch_idx, optimizer_idx=None):\n x, y = batch\n x = x.view(x.size(0), -1)\n y_hat = self(x)\n\n loss_test = self.loss(y, y_hat)\n\n # acc\n labels_hat = torch.argmax(y_hat, dim=1)\n test_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)\n test_acc = torch.tensor(test_acc)\n\n test_acc = test_acc.type_as(x)\n\n # Do regular EvalResult Logging\n result = EvalResult(checkpoint_on=loss_test)\n result.log('test_loss', loss_test)\n result.log('test_acc', test_acc)\n\n batch_size = x.size(0)\n lst_of_str = [random.choice(['dog', 'cat']) for i in range(batch_size)]\n lst_of_int = [random.randint(500, 1000) for i in range(batch_size)]\n lst_of_lst = [[x] for x in lst_of_int]\n lst_of_dict = [{k: v} for k, v in zip(lst_of_str, lst_of_int)]\n\n # This is passed in from pytest via parameterization\n option = getattr(self, 'test_option', 0)\n prediction_file = getattr(self, 'prediction_file', 'predictions.pt')\n\n lazy_ids = torch.arange(batch_idx * self.batch_size, batch_idx * self.batch_size + x.size(0))\n\n # Base\n if option == 0:\n self.write_prediction('idxs', lazy_ids, prediction_file)\n self.write_prediction('preds', labels_hat, prediction_file)\n\n # Check mismatching tensor len\n elif option == 1:\n self.write_prediction('idxs', torch.cat((lazy_ids, lazy_ids)), prediction_file)\n self.write_prediction('preds', labels_hat, prediction_file)\n\n # write multi-dimension\n elif option == 2:\n self.write_prediction('idxs', lazy_ids, prediction_file)\n self.write_prediction('preds', labels_hat, prediction_file)\n self.write_prediction('x', x, prediction_file)\n\n # write str list\n elif option == 3:\n self.write_prediction('idxs', lazy_ids, prediction_file)\n self.write_prediction('vals', lst_of_str, prediction_file)\n\n # write int list\n elif option == 4:\n self.write_prediction('idxs', lazy_ids, prediction_file)\n self.write_prediction('vals', lst_of_int, prediction_file)\n\n # write nested list\n elif option == 5:\n self.write_prediction('idxs', lazy_ids, prediction_file)\n self.write_prediction('vals', lst_of_lst, prediction_file)\n\n # write dict list\n elif option == 6:\n self.write_prediction('idxs', lazy_ids, prediction_file)\n self.write_prediction('vals', lst_of_dict, prediction_file)\n\n elif option == 7:\n self.write_prediction_dict({'idxs': lazy_ids, 'preds': labels_hat}, prediction_file)\n\n return result\n"
] | [
[
"torch.tensor",
"torch.sum",
"torch.cat",
"torch.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kuanghuei/scene-graph-TF-release | [
"0e2eb7d3d3875a0c965e8093a7acf437879fe576"
] | [
"data_tools/vg_to_roidb_vrr_fullVGv2_coco_aware.py"
] | [
"# coding=utf8\n# --------------------------------------------------------\n# Scene Graph Generation by Iterative Message Passing\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Danfei Xu\n# --------------------------------------------------------\n\nimport argparse, json, string\nfrom collections import Counter\nimport math\nimport os\nfrom math import floor\nimport h5py as h5\nimport numpy as np\nimport pprint\n\nimport xml.etree.ElementTree as ET\n\n\"\"\"\nA script for generating an hdf5 ROIDB from the VisualGenome dataset\n\"\"\"\n\ndef preprocess_object_labels(data, alias_dict={}):\n for img in data:\n for obj in img['objects']:\n obj['ids'] = [obj['object_id']]\n names = []\n for name in obj['names']:\n label = sentence_preprocess(name)\n if label in alias_dict:\n label = alias_dict[label]\n names.append(label)\n obj['names'] = names\n\n\ndef preprocess_predicates(data, alias_dict={}):\n for img in data:\n for relation in img['relationships']:\n predicate = sentence_preprocess(relation['predicate'])\n if predicate in alias_dict:\n predicate = alias_dict[predicate]\n relation['predicate'] = predicate\n\n\ndef extract_object_token(data, obj_list=[], verbose=True):\n \"\"\" Builds a set that contains the object names. Filters infrequent tokens. \"\"\"\n token_counter = Counter()\n for img in data:\n for region in img['objects']:\n for name in region['names']:\n if not obj_list or name in obj_list:\n token_counter.update([name])\n tokens = set()\n # pick top N tokens\n token_counter_return = {}\n for token, count in token_counter.most_common():\n tokens.add(token)\n token_counter_return[token] = count\n if verbose:\n print(('Keeping %d / %d objects'\n % (len(tokens), len(token_counter))))\n return tokens, token_counter_return\n\n\ndef extract_predicate_token(data, pred_list=[], verbose=True):\n \"\"\" Builds a set that contains the relationship predicates. Filters infrequent tokens. \"\"\"\n token_counter = Counter()\n total = 0\n for img in data:\n for relation in img['relationships']:\n predicate = relation['predicate']\n if not pred_list or predicate in pred_list:\n token_counter.update([predicate])\n total += 1\n tokens = set()\n token_counter_return = {}\n for token, count in token_counter.most_common():\n tokens.add(token)\n token_counter_return[token] = count\n if verbose:\n print(('Keeping %d / %d predicates with enough instances'\n % (len(tokens), len(token_counter))))\n return tokens, token_counter_return\n\n\ndef merge_duplicate_boxes(data):\n def IoU(b1, b2):\n if b1[2] <= b2[0] or \\\n b1[3] <= b2[1] or \\\n b1[0] >= b2[2] or \\\n b1[1] >= b2[3]:\n return 0\n\n b1b2 = np.vstack([b1,b2])\n minc = np.min(b1b2, 0)\n maxc = np.max(b1b2, 0)\n union_area = (maxc[2]-minc[0])*(maxc[3]-minc[1])\n int_area = (minc[2]-maxc[0])*(minc[3]-maxc[1])\n return float(int_area)/float(union_area)\n\n def to_x1y1x2y2(obj):\n x1 = obj['x']\n y1 = obj['y']\n x2 = obj['x'] + obj['w']\n y2 = obj['y'] + obj['h']\n return np.array([x1, y1, x2, y2], dtype=np.int32)\n\n def inside(b1, b2):\n return b1[0] >= b2[0] and b1[1] >= b2[1] \\\n and b1[2] <= b2[2] and b1[3] <= b2[3]\n\n def overlap(obj1, obj2):\n b1 = to_x1y1x2y2(obj1)\n b2 = to_x1y1x2y2(obj2)\n iou = IoU(b1, b2)\n if all(b1 == b2) or iou > 0.9: # consider as the same box\n return 1\n elif (inside(b1, b2) or inside(b2, b1))\\\n and obj1['names'][0] == obj2['names'][0]: # same object inside the other\n return 2\n elif iou > 0.6 and obj1['names'][0] == obj2['names'][0]: # multiple overlapping same object\n return 3\n else:\n return 0 # no overlap\n\n num_merged = {1:0, 2:0, 3:0}\n print('merging boxes..')\n for img in data:\n # mark objects to be merged and save their ids\n objs = img['objects']\n num_obj = len(objs)\n for i in range(num_obj):\n if 'M_TYPE' in objs[i]: # has been merged\n continue\n merged_objs = [] # circular refs, but fine\n for j in range(i+1, num_obj):\n if 'M_TYPE' in objs[j]: # has been merged\n continue\n overlap_type = overlap(objs[i], objs[j])\n if overlap_type > 0:\n objs[j]['M_TYPE'] = overlap_type\n merged_objs.append(objs[j])\n objs[i]['mobjs'] = merged_objs\n\n # merge boxes\n filtered_objs = []\n merged_num_obj = 0\n for obj in objs:\n if 'M_TYPE' not in obj:\n ids = [obj['object_id']]\n dims = [to_x1y1x2y2(obj)]\n prominent_type = 1\n for mo in obj['mobjs']:\n ids.append(mo['object_id'])\n obj['names'].extend(mo['names'])\n dims.append(to_x1y1x2y2(mo))\n if mo['M_TYPE'] > prominent_type:\n prominent_type = mo['M_TYPE']\n merged_num_obj += len(ids)\n obj['ids'] = ids\n mdims = np.zeros(4)\n if prominent_type > 1: # use extreme\n mdims[:2] = np.min(np.vstack(dims)[:,:2], 0)\n mdims[2:] = np.max(np.vstack(dims)[:,2:], 0)\n else: # use mean\n mdims = np.mean(np.vstack(dims), 0)\n obj['x'] = int(mdims[0])\n obj['y'] = int(mdims[1])\n obj['w'] = int(mdims[2] - mdims[0])\n obj['h'] = int(mdims[3] - mdims[1])\n\n num_merged[prominent_type] += len(obj['mobjs'])\n\n obj['mobjs'] = None\n obj['names'] = list(set(obj['names'])) # remove duplicates\n\n filtered_objs.append(obj)\n else:\n assert 'mobjs' not in obj\n\n img['objects'] = filtered_objs\n assert(merged_num_obj == num_obj)\n\n print('# merged boxes per merging type:')\n print(num_merged)\n\n\ndef build_token_dict(vocab):\n \"\"\" build bi-directional mapping between index and token\"\"\"\n token_to_idx, idx_to_token = {}, {}\n next_idx = 1\n vocab_sorted = sorted(list(vocab)) # make sure it's the same order everytime\n for token in vocab_sorted:\n token_to_idx[token] = next_idx\n idx_to_token[next_idx] = token\n next_idx = next_idx + 1\n\n return token_to_idx, idx_to_token\n\n\ndef encode_box(region, org_h, org_w, im_long_size):\n x = region['x']\n y = region['y']\n w = region['w']\n h = region['h']\n scale = float(im_long_size) / max(org_h, org_w)\n image_size = im_long_size\n # recall: x,y are 1-indexed\n x, y = math.floor(scale*(region['x']-1)), math.floor(scale*(region['y']-1))\n w, h = math.ceil(scale*region['w']), math.ceil(scale*region['h'])\n\n # clamp to image\n if x < 0: x = 0\n if y < 0: y = 0\n\n # box should be at least 2 by 2\n if x > image_size - 2:\n x = image_size - 2\n if y > image_size - 2:\n y = image_size - 2\n if x + w >= image_size:\n w = image_size - x\n if y + h >= image_size:\n h = image_size - y\n\n # also convert to center-coord oriented\n box = np.asarray([x+floor(w/2), y+floor(h/2), w, h], dtype=np.int32)\n assert box[2] > 0 # width height should be positive numbers\n assert box[3] > 0\n return box\n\n\ndef encode_objects(obj_data, token_to_idx, token_counter, org_h, org_w, im_long_sizes):\n encoded_labels = []\n encoded_boxes = {}\n for size in im_long_sizes:\n encoded_boxes[size] = []\n im_to_first_obj = np.zeros(len(obj_data), dtype=np.int32)\n im_to_last_obj = np.zeros(len(obj_data), dtype=np.int32)\n obj_counter = 0\n\n for i, img in enumerate(obj_data):\n im_to_first_obj[i] = obj_counter\n img['id_to_idx'] = {} # object id to region idx\n for obj in img['objects']:\n # pick a label for the object\n max_occur = 0\n obj_label = None\n for name in obj['names']:\n # pick the name that has maximum occurance\n if name in token_to_idx and token_counter[name] > max_occur:\n obj_label = name\n max_occur = token_counter[obj_label]\n # if obj_label is None:\n # print(obj['names'])\n if obj_label is not None:\n # encode region\n for size in im_long_sizes:\n encoded_boxes[size].append(encode_box(obj, org_h[i], org_w[i], size))\n\n encoded_labels.append(token_to_idx[obj_label])\n\n for obj_id in obj['ids']: # assign same index for merged ids\n img['id_to_idx'][obj_id] = obj_counter\n\n obj_counter += 1\n\n\n if im_to_first_obj[i] == obj_counter:\n im_to_first_obj[i] = -1\n im_to_last_obj[i] = -1\n else:\n im_to_last_obj[i] = obj_counter - 1\n\n for k, boxes in encoded_boxes.items():\n # print(boxes)\n encoded_boxes[k] = np.vstack(boxes)\n return np.vstack(encoded_labels), encoded_boxes, im_to_first_obj, im_to_last_obj\n\n\ndef encode_relationship(sub_id, obj_id, id_to_idx):\n # builds a tuple of the index of object and subject in the object list\n sub_idx = id_to_idx[sub_id]\n obj_idx = id_to_idx[obj_id]\n return np.asarray([sub_idx, obj_idx], dtype=np.int32)\n\n\ndef encode_relationships(rel_data, token_to_idx, obj_data):\n \"\"\"MUST BE CALLED AFTER encode_objects!!!\"\"\"\n encoded_pred = [] # encoded predicates\n encoded_rel = [] # encoded relationship tuple\n im_to_first_rel = np.zeros(len(rel_data), dtype=np.int32)\n im_to_last_rel = np.zeros(len(rel_data), dtype=np.int32)\n rel_idx_counter = 0\n\n no_rel_counter = 0\n obj_filtered = 0\n predicate_filtered = 0\n duplicate_filtered = 0\n for i, img in enumerate(rel_data):\n im_to_first_rel[i] = rel_idx_counter\n id_to_idx = obj_data[i]['id_to_idx'] # object id to object list idx\n for relation in img['relationships']:\n subj = relation['subject']\n obj = relation['object']\n predicate = relation['predicate']\n if subj['object_id'] not in id_to_idx or obj['object_id'] not in id_to_idx:\n obj_filtered += 1\n continue\n elif predicate not in token_to_idx:\n predicate_filtered += 1\n continue\n elif id_to_idx[subj['object_id']] == id_to_idx[obj['object_id']]: # sub and obj can't be the same box\n duplicate_filtered += 1\n continue\n else:\n encoded_pred.append(token_to_idx[predicate])\n encoded_rel.append(\n encode_relationship(subj['object_id'],\n obj['object_id'],\n id_to_idx\n ))\n rel_idx_counter += 1 # accumulate counter\n\n if im_to_first_rel[i] == rel_idx_counter:\n # if no qualifying relationship\n im_to_first_rel[i] = -1\n im_to_last_rel[i] = -1\n no_rel_counter += 1\n else:\n im_to_last_rel[i] = rel_idx_counter - 1\n print('%i rel is filtered by object' % obj_filtered)\n print('%i rel is filtered by predicate' % predicate_filtered)\n print('%i rel is filtered by duplicate' % duplicate_filtered)\n print('%i rel remains ' % len(encoded_pred))\n\n print('%i out of %i valid images have relationships' % (len(rel_data)-no_rel_counter, len(rel_data)))\n return np.vstack(encoded_pred), np.vstack(encoded_rel), im_to_first_rel, im_to_last_rel\n\n\ndef sentence_preprocess(phrase):\n \"\"\" preprocess a sentence: lowercase, clean up weird chars, remove punctuation \"\"\"\n replacements = {\n '½': 'half',\n '—' : '-',\n '™': '',\n '¢': 'cent',\n 'ç': 'c',\n 'û': 'u',\n 'é': 'e',\n '°': ' degree',\n 'è': 'e',\n '…': '',\n }\n phrase = phrase.encode('utf-8')\n phrase = phrase.lstrip(' ').rstrip(' ')\n for k, v in replacements.iteritems():\n phrase = phrase.replace(k, v)\n return str(phrase).lower().translate(None, string.punctuation).decode('utf-8', 'ignore')\n\n\ndef encode_splits(obj_data, img_data, args):\n # if opt is not None:\n # val_begin_idx = opt['val_begin_idx']\n # test_begin_idx = opt['test_begin_idx']\n val_cocoids = set()\n test_cocoids = set()\n with open(args.coco_meta) as fp:\n coco_meta = json.load(fp)\n for img in coco_meta['images']:\n if img['split'] == 'val':\n val_cocoids.add(int(img['cocoid']))\n elif img['split'] == 'test':\n test_cocoids.add(int(img['cocoid']))\n\n print(\"val_cocoids\", len(val_cocoids))\n print(\"test_cocoids\", len(test_cocoids))\n\n vg_split_dict = {}\n for img in img_data:\n vg_split_dict[img['image_id']] = img['coco_id']\n \n split = np.zeros(len(obj_data), dtype=np.int32)\n for i, info in enumerate(obj_data):\n splitix = 0\n coco_id = vg_split_dict[int(info['image_id'])]\n if coco_id in val_cocoids or coco_id in test_cocoids:\n splitix = 2\n # elif int(info['image_id']) in test_cocoids:\n # splitix = 2\n # if opt is None: # use encode from input file\n # s = info['split']\n # if s == 'val': splitix = 1\n # if s == 'test': splitix = 2\n # else: # use portion split\n # if i >= val_begin_idx: splitix = 1\n # if i >= test_begin_idx: splitix = 2\n split[i] = splitix\n # if opt is not None and opt['shuffle']:\n # np.random.shuffle(split)\n\n print(('assigned %d/%d/%d to train/val/test split' % (np.sum(split==0), np.sum(split==1), np.sum(split==2))))\n return split\n\n\ndef make_alias_dict(dict_file):\n \"\"\"create an alias dictionary from a file\"\"\"\n out_dict = {}\n vocab = []\n for line in open(dict_file, 'r'):\n alias = line.strip('\\n').strip('\\r').split(',')\n alias_target = alias[0] if alias[0] not in out_dict else out_dict[alias[0]]\n for a in alias:\n out_dict[a] = alias_target # use the first term as the aliasing target\n vocab.append(alias_target)\n return out_dict, vocab\n\n\ndef make_list(list_file):\n \"\"\"create a blacklist list from a file\"\"\"\n return [line.strip('\\n').strip('\\r') for line in open(list_file)]\n\n\ndef filter_object_boxes(data, heights, widths, area_frac_thresh):\n \"\"\"\n filter boxes by a box area-image area ratio threshold\n \"\"\"\n thresh_count = 0\n all_count = 0\n for i, img in enumerate(data):\n filtered_obj = []\n area = float(heights[i]*widths[i])\n for obj in img['objects']:\n if float(obj['h'] * obj['w']) > area * area_frac_thresh:\n filtered_obj.append(obj)\n thresh_count += 1\n all_count += 1\n img['objects'] = filtered_obj\n print('box threshod: keeping %i/%i boxes' % (thresh_count, all_count))\n\n\ndef filter_by_idx(data, valid_list):\n return [data[i] for i in valid_list]\n\n\ndef obj_rel_cross_check(obj_data, rel_data, verbose=False):\n \"\"\"\n make sure all objects that are in relationship dataset\n are in object dataset\n \"\"\"\n num_img = len(obj_data)\n num_correct = 0\n total_rel = 0\n for i in xrange(num_img):\n assert(obj_data[i]['image_id'] == rel_data[i]['image_id'])\n objs = obj_data[i]['objects']\n rels = rel_data[i]['relationships']\n ids = [obj['object_id'] for obj in objs]\n for rel in rels:\n if rel['subject']['object_id'] in ids \\\n and rel['object']['object_id'] in ids:\n num_correct += 1\n elif verbose:\n if rel['subject']['object_id'] not in ids:\n print(str(rel['subject']['object_id']) + 'cannot be found in ' + str(i))\n if rel['object']['object_id'] not in ids:\n print(str(rel['object']['object_id']) + 'cannot be found in ' + str(i))\n total_rel += 1\n print('cross check: %i/%i relationship are correct' % (num_correct, total_rel))\n\n\ndef sync_objects(obj_data, rel_data):\n num_img = len(obj_data)\n for i in xrange(num_img):\n assert(obj_data[i]['image_id'] == rel_data[i]['image_id'])\n objs = obj_data[i]['objects']\n rels = rel_data[i]['relationships']\n\n ids = [obj['object_id'] for obj in objs]\n for rel in rels:\n if rel['subject']['object_id'] not in ids:\n rel_obj = rel['subject']\n rel_obj['names'] = [rel_obj['name']]\n objs.append(rel_obj)\n if rel['object']['object_id'] not in ids:\n rel_obj = rel['object']\n rel_obj['names'] = [rel_obj['name']]\n objs.append(rel_obj)\n\n obj_data[i]['objects'] = objs\n\n# def create_from_xml(img_data, orginal_obj_data, args):\n# pred_list = set()\n# obj_list = set()\n# obj_data, rel_data = [], []\n\n# for im in img_data:\n# tree = ET.parse('{}/{}.xml'.format(args.vrrvg_dir, im['image_id']))\n# root = tree.getroot()\n# for child in root:\n# if child.tag == 'object':\n# name = str(child[0].text)\n# obj_list.add(name)\n\n# if child.tag == 'relation':\n# predicate = str(child[2].text)\n# pred_list.add(predicate)\n\n\n# for im, im_obj in zip(img_data, orginal_obj_data):\n# tree = ET.parse('{}/{}.xml'.format(args.vrrvg_dir, im['image_id']))\n# root = tree.getroot()\n# obj_data.append({'objects':[], 'image_id':im['image_id']})\n# rel_data.append({'relationships':[], 'image_id':im['image_id']})\n\n# im_obj_ids = set()\n# for obj in im_obj['objects']:\n# if any([name in obj_list for name in obj['names']]):\n# obj_data[-1]['objects'].append(obj)\n# im_obj_ids.add(obj['object_id'])\n\n# for child in root:\n# if child.tag == 'object':\n# name = str(child[0].text)\n# object_id = int(child[1].text)\n# if object_id not in im_obj_ids:\n# print(\"NEW OBJ: {}/{}\".format(im['image_id'], object_id))\n# xmin = int(child[3][0].text)\n# ymin = int(child[3][1].text)\n# xmax = int(child[3][2].text)\n# ymax = int(child[3][3].text)\n# w = xmax - xmin\n# h = ymax - ymin\n# obj_data[-1]['objects'].append({'x': xmin, 'y': ymin, 'w': w, 'h': h, 'object_id': object_id, 'names': [name]})\n\n# if child.tag == 'relation':\n# subject_id = int(child[0].text)\n# object_id = int(child[1].text)\n# predicate = str(child[2].text)\n# rel_data[-1]['relationships'].append({'object': {'object_id': object_id}, 'subject': {'object_id': subject_id}, 'predicate': predicate})\n \n# return list(obj_list), list(pred_list), obj_data, rel_data\n\n\ndef create_from_xml(img_data, orginal_obj_data, args):\n pred_list = set()\n obj_list = set()\n obj_data, rel_data = [], []\n\n for im in img_data:\n if os.path.exists('{}/{}.xml'.format(args.vrrvg_dir, im['image_id'])):\n tree = ET.parse('{}/{}.xml'.format(args.vrrvg_dir, im['image_id']))\n root = tree.getroot()\n for child in root:\n if child.tag == 'object':\n name = str(child[0].text)\n obj_list.add(name)\n\n if child.tag == 'relation':\n predicate = str(child[2].text)\n pred_list.add(predicate)\n\n\n for im, im_obj in zip(img_data, orginal_obj_data):\n obj_data.append({'objects':[], 'image_id':im['image_id']})\n rel_data.append({'relationships':[], 'image_id':im['image_id']})\n\n if os.path.exists('{}/{}.xml'.format(args.vrrvg_dir, im['image_id'])):\n tree = ET.parse('{}/{}.xml'.format(args.vrrvg_dir, im['image_id']))\n root = tree.getroot()\n im_obj_ids = set()\n \n for child in root:\n if child.tag == 'object':\n name = str(child[0].text)\n object_id = int(child[1].text)\n im_obj_ids.add(object_id)\n # print(\"NEW OBJ: {}/{}\".format(im['image_id'], object_id))\n xmin = int(child[3][0].text)\n ymin = int(child[3][1].text)\n xmax = int(child[3][2].text)\n ymax = int(child[3][3].text)\n w = xmax - xmin\n h = ymax - ymin\n obj_data[-1]['objects'].append({'x': xmin, 'y': ymin, 'w': w, 'h': h, 'object_id': object_id, 'names': [name]})\n\n for obj in im_obj['objects']:\n if any([name in obj_list for name in obj['names']]):\n if obj['object_id'] not in im_obj_ids:\n obj_data[-1]['objects'].append(obj)\n im_obj_ids.add(int(obj['object_id']))\n\n for child in root:\n if child.tag == 'relation':\n subject_id = int(child[0].text)\n object_id = int(child[1].text)\n predicate = str(child[2].text)\n if subject_id not in im_obj_ids or object_id not in im_obj_ids:\n print(subject_id, object_id, predicate)\n rel_data[-1]['relationships'].append({'object': {'object_id': object_id}, 'subject': {'object_id': subject_id}, 'predicate': predicate})\n else:\n for obj in im_obj['objects']:\n if any([name in obj_list for name in obj['names']]):\n obj_data[-1]['objects'].append(obj)\n im_obj_ids.add(int(obj['object_id']))\n\n return list(obj_list), list(pred_list), obj_data, rel_data\n\n\ndef main(args):\n print('start')\n pprint.pprint(args)\n\n # val_cocoids = set()\n # test_cocoids = set()\n # with open(args.coco_meta) as fp:\n # coco_meta = json.load(fp)\n # for img in coco_meta['images']:\n # if img['split'] == 'val':\n # val_cocoids.add(int(img['cocoid']))\n # elif img['split'] == 'test':\n # test_cocoids.add(int(img['cocoid']))\n # print(\"val_cocoids\", len(val_cocoids))\n # print(\"test_cocoids\", len(test_cocoids))\n\n # read in the annotation data\n print('loading json files..')\n orginal_obj_data = json.load(open(args.object_input))\n img_data = json.load(open(args.metadata_input))\n\n print('read image db from %s' % args.imdb)\n imdb = h5.File(args.imdb, 'r')\n num_im, _, _, _ = imdb['images'].shape\n img_long_sizes = [512, 1024]\n valid_im_idx = imdb['valid_idx'][:] # valid image indices\n img_ids = imdb['image_ids'][:]\n\n print('len(valid_im_idx)', len(valid_im_idx))\n print('len(img_ids)', len(img_ids))\n\n orginal_obj_data = filter_by_idx(orginal_obj_data, valid_im_idx)\n img_data = filter_by_idx(img_data, valid_im_idx)\n\n with open(\"image_data_.json\", 'w') as f:\n json.dump(img_data, f)\n\n print('len(img_data) before filtering', len(img_data))\n\n obj_list, pred_list, obj_data, rel_data = create_from_xml(img_data, orginal_obj_data, args)\n print(\"number of object classes\", len(obj_list))\n print(\"number of predicate classes\", len(pred_list))\n\n print('len(obj_data) after filtering', len(obj_data))\n print('len(rel_data) after filtering', len(rel_data))\n print('len(img_data) after filtering', len(img_data))\n\n # sanity check\n for i in xrange(num_im):\n assert(obj_data[i]['image_id'] \\\n == rel_data[i]['image_id'] \\\n == img_data[i]['image_id'] \\\n == img_ids[i]\n )\n\n # may only load a fraction of the data\n if args.load_frac < 1:\n num_im = int(num_im*args.load_frac)\n obj_data = obj_data[:num_im]\n rel_data = rel_data[:num_im]\n print('processing %i images' % num_im)\n\n # sync objects from rel to obj_data\n sync_objects(obj_data, rel_data)\n\n obj_rel_cross_check(obj_data, rel_data)\n\n heights, widths = imdb['original_heights'][:], imdb['original_widths'][:]\n if args.min_box_area_frac > 0:\n # filter out invalid small boxes\n print('threshold bounding box by %f area fraction' % args.min_box_area_frac)\n filter_object_boxes(obj_data, heights, widths, args.min_box_area_frac) # filter by box dimensions\n\n merge_duplicate_boxes(obj_data)\n\n # build vocabulary\n object_tokens, object_token_counter = extract_object_token(obj_data, obj_list)\n\n label_to_idx, idx_to_label = build_token_dict(object_tokens)\n\n predicate_tokens, predicate_token_counter = extract_predicate_token(rel_data, pred_list)\n predicate_to_idx, idx_to_predicate = build_token_dict(predicate_tokens)\n\n # print out vocabulary\n print('objects: ')\n print(object_token_counter)\n print('relationships: ')\n print(predicate_token_counter)\n\n # write the h5 file\n f = h5.File(args.h5_file, 'w')\n\n # encode object\n encoded_label, encoded_boxes, im_to_first_obj, im_to_last_obj = \\\n encode_objects(obj_data, label_to_idx, object_token_counter, \\\n heights, widths, img_long_sizes)\n\n f.create_dataset('labels', data=encoded_label)\n for k, boxes in encoded_boxes.items():\n f.create_dataset('boxes_%i' % k, data=boxes)\n f.create_dataset('img_to_first_box', data=im_to_first_obj)\n f.create_dataset('img_to_last_box', data=im_to_last_obj)\n\n encoded_predicate, encoded_rel, im_to_first_rel, im_to_last_rel = \\\n encode_relationships(rel_data, predicate_to_idx, obj_data)\n\n f.create_dataset('predicates', data=encoded_predicate)\n f.create_dataset('relationships', data=encoded_rel)\n f.create_dataset('img_to_first_rel', data=im_to_first_rel)\n f.create_dataset('img_to_last_rel', data=im_to_last_rel)\n\n # build train/val/test splits\n\n print('num objects = %i' % encoded_label.shape[0])\n print('num relationships = %i' % encoded_predicate.shape[0])\n\n # opt = None\n # if not args.use_input_split:\n # opt = {}\n # opt['val_begin_idx'] = int(len(obj_data) * args.train_frac)\n # opt['test_begin_idx'] = int(len(obj_data) * args.val_frac)\n # opt['shuffle'] = args.shuffle\n split = encode_splits(obj_data, img_data, args)\n\n if split is not None:\n f.create_dataset('split', data=split) # 1 = test, 0 = train\n\n # and write the additional json file\n json_struct = {\n 'label_to_idx': label_to_idx,\n 'idx_to_label': idx_to_label,\n 'predicate_to_idx': predicate_to_idx,\n 'idx_to_predicate': idx_to_predicate,\n 'predicate_count': predicate_token_counter,\n 'object_count': object_token_counter\n }\n\n with open(args.json_file, 'w') as f:\n json.dump(json_struct, f)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--imdb', default='VG/imdb_1024.h5', type=str)\n parser.add_argument('--object_input', default='VG/objects.json', type=str)\n parser.add_argument('--relationship_input', default='VG/relationships.json', type=str)\n parser.add_argument('--vrrvg_dir', default='VG/VrR-VG', type=str)\n parser.add_argument('--metadata_input', default='VG/image_data.json', type=str)\n parser.add_argument('--coco_meta', default='VG/dataset_coco.json', type=str)\n\n # parser.add_argument('--object_alias', default='VG/object_alias.txt', type=str)\n # parser.add_argument('--pred_alias', default='VG/predicate_alias.txt', type=str)\n # parser.add_argument('--object_list', default='VG/object_list.txt', type=str)\n # parser.add_argument('--pred_list', default='VG/predicate_list.txt', type=str)\n # parser.add_argument('--num_objects', default=150, type=int, help=\"set to 0 to disable filtering\")\n # parser.add_argument('--num_predicates', default=50, type=int, help=\"set to 0 to disable filtering\")\n parser.add_argument('--min_box_area_frac', default=0.002, type=float)\n parser.add_argument('--json_file', default='VG-dicts.json')\n parser.add_argument('--h5_file', default='VG.h5')\n parser.add_argument('--load_frac', default=1, type=float)\n parser.add_argument('--use_input_split', default=False, type=bool)\n # parser.add_argument('--train_frac', default=0.7, type=float)\n # parser.add_argument('--val_frac', default=0.7, type=float)\n parser.add_argument('--shuffle', default=False, type=bool)\n\n args = parser.parse_args()\n main(args)\n"
] | [
[
"numpy.min",
"numpy.asarray",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
timhunderwood/pandas | [
"148f9fd74fc71cb7509c0898883036316efc6f89",
"0159cba6eb14983ab7eaf38ff138c3c397a6fe3b",
"148f9fd74fc71cb7509c0898883036316efc6f89",
"0159cba6eb14983ab7eaf38ff138c3c397a6fe3b"
] | [
"pandas/core/indexes/interval.py",
"pandas/tests/frame/methods/test_reset_index.py",
"pandas/tests/frame/methods/test_isin.py",
"pandas/tests/groupby/test_categorical.py"
] | [
"\"\"\" define the IntervalIndex \"\"\"\nfrom operator import le, lt\nimport textwrap\nfrom typing import Any, Optional, Tuple, Union\n\nimport numpy as np\n\nfrom pandas._config import get_option\n\nfrom pandas._libs import lib\nfrom pandas._libs.interval import Interval, IntervalMixin, IntervalTree\nfrom pandas._libs.tslibs import BaseOffset, Timedelta, Timestamp, to_offset\nfrom pandas._typing import AnyArrayLike, Label\nfrom pandas.errors import InvalidIndexError\nfrom pandas.util._decorators import Appender, Substitution, cache_readonly\nfrom pandas.util._exceptions import rewrite_exception\n\nfrom pandas.core.dtypes.cast import (\n find_common_type,\n infer_dtype_from_scalar,\n maybe_downcast_to_dtype,\n)\nfrom pandas.core.dtypes.common import (\n ensure_platform_int,\n is_categorical_dtype,\n is_datetime64tz_dtype,\n is_datetime_or_timedelta_dtype,\n is_dtype_equal,\n is_float,\n is_float_dtype,\n is_integer,\n is_integer_dtype,\n is_interval_dtype,\n is_list_like,\n is_number,\n is_object_dtype,\n is_scalar,\n)\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas.core.algorithms import take_1d\nfrom pandas.core.arrays.interval import IntervalArray, _interval_shared_docs\nimport pandas.core.common as com\nfrom pandas.core.indexers import is_valid_positional_slice\nimport pandas.core.indexes.base as ibase\nfrom pandas.core.indexes.base import (\n Index,\n _index_shared_docs,\n default_pprint,\n ensure_index,\n maybe_extract_name,\n)\nfrom pandas.core.indexes.datetimes import DatetimeIndex, date_range\nfrom pandas.core.indexes.extension import ExtensionIndex, inherit_names\nfrom pandas.core.indexes.multi import MultiIndex\nfrom pandas.core.indexes.timedeltas import TimedeltaIndex, timedelta_range\nfrom pandas.core.ops import get_op_result_name\n\n_VALID_CLOSED = {\"left\", \"right\", \"both\", \"neither\"}\n_index_doc_kwargs = dict(ibase._index_doc_kwargs)\n\n_index_doc_kwargs.update(\n dict(\n klass=\"IntervalIndex\",\n qualname=\"IntervalIndex\",\n target_klass=\"IntervalIndex or list of Intervals\",\n name=textwrap.dedent(\n \"\"\"\\\n name : object, optional\n Name to be stored in the index.\n \"\"\"\n ),\n )\n)\n\n\ndef _get_next_label(label):\n dtype = getattr(label, \"dtype\", type(label))\n if isinstance(label, (Timestamp, Timedelta)):\n dtype = \"datetime64\"\n if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):\n return label + np.timedelta64(1, \"ns\")\n elif is_integer_dtype(dtype):\n return label + 1\n elif is_float_dtype(dtype):\n return np.nextafter(label, np.infty)\n else:\n raise TypeError(f\"cannot determine next label for type {repr(type(label))}\")\n\n\ndef _get_prev_label(label):\n dtype = getattr(label, \"dtype\", type(label))\n if isinstance(label, (Timestamp, Timedelta)):\n dtype = \"datetime64\"\n if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):\n return label - np.timedelta64(1, \"ns\")\n elif is_integer_dtype(dtype):\n return label - 1\n elif is_float_dtype(dtype):\n return np.nextafter(label, -np.infty)\n else:\n raise TypeError(f\"cannot determine next label for type {repr(type(label))}\")\n\n\ndef _new_IntervalIndex(cls, d):\n \"\"\"\n This is called upon unpickling, rather than the default which doesn't have\n arguments and breaks __new__.\n \"\"\"\n return cls.from_arrays(**d)\n\n\nclass SetopCheck:\n \"\"\"\n This is called to decorate the set operations of IntervalIndex\n to perform the type check in advance.\n \"\"\"\n\n def __init__(self, op_name):\n self.op_name = op_name\n\n def __call__(self, setop):\n def func(intvidx_self, other, sort=False):\n intvidx_self._assert_can_do_setop(other)\n other = ensure_index(other)\n\n if not isinstance(other, IntervalIndex):\n result = getattr(intvidx_self.astype(object), self.op_name)(other)\n if self.op_name in (\"difference\",):\n result = result.astype(intvidx_self.dtype)\n return result\n elif intvidx_self.closed != other.closed:\n raise ValueError(\n \"can only do set operations between two IntervalIndex \"\n \"objects that are closed on the same side\"\n )\n\n # GH 19016: ensure set op will not return a prohibited dtype\n subtypes = [intvidx_self.dtype.subtype, other.dtype.subtype]\n common_subtype = find_common_type(subtypes)\n if is_object_dtype(common_subtype):\n raise TypeError(\n f\"can only do {self.op_name} between two IntervalIndex \"\n \"objects that have compatible dtypes\"\n )\n\n return setop(intvidx_self, other, sort)\n\n return func\n\n\n@Appender(\n _interval_shared_docs[\"class\"]\n % dict(\n klass=\"IntervalIndex\",\n summary=\"Immutable index of intervals that are closed on the same side.\",\n name=_index_doc_kwargs[\"name\"],\n versionadded=\"0.20.0\",\n extra_attributes=\"is_overlapping\\nvalues\\n\",\n extra_methods=\"\",\n examples=textwrap.dedent(\n \"\"\"\\\n Examples\n --------\n A new ``IntervalIndex`` is typically constructed using\n :func:`interval_range`:\n\n >>> pd.interval_range(start=0, end=5)\n IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],\n closed='right',\n dtype='interval[int64]')\n\n It may also be constructed using one of the constructor\n methods: :meth:`IntervalIndex.from_arrays`,\n :meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`.\n\n See further examples in the doc strings of ``interval_range`` and the\n mentioned constructor methods.\n \"\"\"\n ),\n )\n)\n@inherit_names([\"set_closed\", \"to_tuples\"], IntervalArray, wrap=True)\n@inherit_names(\n [\"__array__\", \"overlaps\", \"contains\", \"left\", \"right\", \"length\"], IntervalArray,\n)\n@inherit_names(\n [\"is_non_overlapping_monotonic\", \"mid\", \"closed\"], IntervalArray, cache=True,\n)\nclass IntervalIndex(IntervalMixin, ExtensionIndex):\n _typ = \"intervalindex\"\n _comparables = [\"name\"]\n _attributes = [\"name\"]\n\n # we would like our indexing holder to defer to us\n _defer_to_indexing = True\n\n # Immutable, so we are able to cache computations like isna in '_mask'\n _mask = None\n\n _data: IntervalArray\n # --------------------------------------------------------------------\n # Constructors\n\n def __new__(\n cls,\n data,\n closed=None,\n dtype=None,\n copy: bool = False,\n name=None,\n verify_integrity: bool = True,\n ):\n\n name = maybe_extract_name(name, data, cls)\n\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n array = IntervalArray(\n data,\n closed=closed,\n copy=copy,\n dtype=dtype,\n verify_integrity=verify_integrity,\n )\n\n return cls._simple_new(array, name)\n\n @classmethod\n def _simple_new(cls, array: IntervalArray, name: Label = None):\n \"\"\"\n Construct from an IntervalArray\n\n Parameters\n ----------\n array : IntervalArray\n name : Label, default None\n Attached as result.name\n \"\"\"\n assert isinstance(array, IntervalArray), type(array)\n\n result = IntervalMixin.__new__(cls)\n result._data = array\n result.name = name\n result._cache = {}\n result._no_setting_name = False\n result._reset_identity()\n return result\n\n @classmethod\n @Appender(\n _interval_shared_docs[\"from_breaks\"]\n % dict(\n klass=\"IntervalIndex\",\n examples=textwrap.dedent(\n \"\"\"\\\n Examples\n --------\n >>> pd.IntervalIndex.from_breaks([0, 1, 2, 3])\n IntervalIndex([(0, 1], (1, 2], (2, 3]],\n closed='right',\n dtype='interval[int64]')\n \"\"\"\n ),\n )\n )\n def from_breaks(\n cls, breaks, closed: str = \"right\", name=None, copy: bool = False, dtype=None\n ):\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n array = IntervalArray.from_breaks(\n breaks, closed=closed, copy=copy, dtype=dtype\n )\n return cls._simple_new(array, name=name)\n\n @classmethod\n @Appender(\n _interval_shared_docs[\"from_arrays\"]\n % dict(\n klass=\"IntervalIndex\",\n examples=textwrap.dedent(\n \"\"\"\\\n Examples\n --------\n >>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3])\n IntervalIndex([(0, 1], (1, 2], (2, 3]],\n closed='right',\n dtype='interval[int64]')\n \"\"\"\n ),\n )\n )\n def from_arrays(\n cls,\n left,\n right,\n closed: str = \"right\",\n name=None,\n copy: bool = False,\n dtype=None,\n ):\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n array = IntervalArray.from_arrays(\n left, right, closed, copy=copy, dtype=dtype\n )\n return cls._simple_new(array, name=name)\n\n @classmethod\n @Appender(\n _interval_shared_docs[\"from_tuples\"]\n % dict(\n klass=\"IntervalIndex\",\n examples=textwrap.dedent(\n \"\"\"\\\n Examples\n --------\n >>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)])\n IntervalIndex([(0, 1], (1, 2]],\n closed='right',\n dtype='interval[int64]')\n \"\"\"\n ),\n )\n )\n def from_tuples(\n cls, data, closed: str = \"right\", name=None, copy: bool = False, dtype=None\n ):\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, dtype=dtype)\n return cls._simple_new(arr, name=name)\n\n # --------------------------------------------------------------------\n\n @Appender(Index._shallow_copy.__doc__)\n def _shallow_copy(self, values=None, name: Label = lib.no_default):\n name = self.name if name is lib.no_default else name\n cache = self._cache.copy() if values is None else {}\n if values is None:\n values = self._data\n\n result = self._simple_new(values, name=name)\n result._cache = cache\n return result\n\n @cache_readonly\n def _isnan(self):\n \"\"\"\n Return a mask indicating if each value is NA.\n \"\"\"\n if self._mask is None:\n self._mask = isna(self.left)\n return self._mask\n\n @cache_readonly\n def _engine(self):\n left = self._maybe_convert_i8(self.left)\n right = self._maybe_convert_i8(self.right)\n return IntervalTree(left, right, closed=self.closed)\n\n def __contains__(self, key: Any) -> bool:\n \"\"\"\n return a boolean if this key is IN the index\n We *only* accept an Interval\n\n Parameters\n ----------\n key : Interval\n\n Returns\n -------\n bool\n \"\"\"\n hash(key)\n if not isinstance(key, Interval):\n return False\n\n try:\n self.get_loc(key)\n return True\n except KeyError:\n return False\n\n @cache_readonly\n def _multiindex(self) -> MultiIndex:\n return MultiIndex.from_arrays([self.left, self.right], names=[\"left\", \"right\"])\n\n @cache_readonly\n def values(self) -> IntervalArray:\n \"\"\"\n Return the IntervalIndex's data as an IntervalArray.\n \"\"\"\n return self._data\n\n @property\n def _has_complex_internals(self) -> bool:\n # used to avoid libreduction code paths, which raise or require conversion\n return True\n\n def __array_wrap__(self, result, context=None):\n # we don't want the superclass implementation\n return result\n\n def __reduce__(self):\n d = dict(left=self.left, right=self.right)\n d.update(self._get_attributes_dict())\n return _new_IntervalIndex, (type(self), d), None\n\n @Appender(Index.astype.__doc__)\n def astype(self, dtype, copy=True):\n with rewrite_exception(\"IntervalArray\", type(self).__name__):\n new_values = self._values.astype(dtype, copy=copy)\n if is_interval_dtype(new_values.dtype):\n return self._shallow_copy(new_values)\n return Index.astype(self, dtype, copy=copy)\n\n @property\n def inferred_type(self) -> str:\n \"\"\"Return a string of the type inferred from the values\"\"\"\n return \"interval\"\n\n @Appender(Index.memory_usage.__doc__)\n def memory_usage(self, deep: bool = False) -> int:\n # we don't use an explicit engine\n # so return the bytes here\n return self.left.memory_usage(deep=deep) + self.right.memory_usage(deep=deep)\n\n # IntervalTree doesn't have a is_monotonic_decreasing, so have to override\n # the Index implementation\n @cache_readonly\n def is_monotonic_decreasing(self) -> bool:\n \"\"\"\n Return True if the IntervalIndex is monotonic decreasing (only equal or\n decreasing values), else False\n \"\"\"\n return self[::-1].is_monotonic_increasing\n\n @cache_readonly\n def is_unique(self):\n \"\"\"\n Return True if the IntervalIndex contains unique elements, else False.\n \"\"\"\n left = self.left\n right = self.right\n\n if self.isna().sum() > 1:\n return False\n\n if left.is_unique or right.is_unique:\n return True\n\n seen_pairs = set()\n check_idx = np.where(left.duplicated(keep=False))[0]\n for idx in check_idx:\n pair = (left[idx], right[idx])\n if pair in seen_pairs:\n return False\n seen_pairs.add(pair)\n\n return True\n\n @property\n def is_overlapping(self) -> bool:\n \"\"\"\n Return True if the IntervalIndex has overlapping intervals, else False.\n\n Two intervals overlap if they share a common point, including closed\n endpoints. Intervals that only have an open endpoint in common do not\n overlap.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n bool\n Boolean indicating if the IntervalIndex has overlapping intervals.\n\n See Also\n --------\n Interval.overlaps : Check whether two Interval objects overlap.\n IntervalIndex.overlaps : Check an IntervalIndex elementwise for\n overlaps.\n\n Examples\n --------\n >>> index = pd.IntervalIndex.from_tuples([(0, 2), (1, 3), (4, 5)])\n >>> index\n IntervalIndex([(0, 2], (1, 3], (4, 5]],\n closed='right',\n dtype='interval[int64]')\n >>> index.is_overlapping\n True\n\n Intervals that share closed endpoints overlap:\n\n >>> index = pd.interval_range(0, 3, closed='both')\n >>> index\n IntervalIndex([[0, 1], [1, 2], [2, 3]],\n closed='both',\n dtype='interval[int64]')\n >>> index.is_overlapping\n True\n\n Intervals that only have an open endpoint in common do not overlap:\n\n >>> index = pd.interval_range(0, 3, closed='left')\n >>> index\n IntervalIndex([[0, 1), [1, 2), [2, 3)],\n closed='left',\n dtype='interval[int64]')\n >>> index.is_overlapping\n False\n \"\"\"\n # GH 23309\n return self._engine.is_overlapping\n\n def _should_fallback_to_positional(self) -> bool:\n # integer lookups in Series.__getitem__ are unambiguously\n # positional in this case\n return self.dtype.subtype.kind in [\"m\", \"M\"]\n\n def _maybe_cast_slice_bound(self, label, side, kind):\n return getattr(self, side)._maybe_cast_slice_bound(label, side, kind)\n\n @Appender(Index._convert_list_indexer.__doc__)\n def _convert_list_indexer(self, keyarr):\n \"\"\"\n we are passed a list-like indexer. Return the\n indexer for matching intervals.\n \"\"\"\n locs = self.get_indexer_for(keyarr)\n\n # we have missing values\n if (locs == -1).any():\n raise KeyError\n\n return locs\n\n def _can_reindex(self, indexer: np.ndarray) -> None:\n \"\"\"\n Check if we are allowing reindexing with this particular indexer.\n\n Parameters\n ----------\n indexer : an integer indexer\n\n Raises\n ------\n ValueError if its a duplicate axis\n \"\"\"\n # trying to reindex on an axis with duplicates\n if self.is_overlapping and len(indexer):\n raise ValueError(\"cannot reindex from an overlapping axis\")\n\n def _needs_i8_conversion(self, key) -> bool:\n \"\"\"\n Check if a given key needs i8 conversion. Conversion is necessary for\n Timestamp, Timedelta, DatetimeIndex, and TimedeltaIndex keys. An\n Interval-like requires conversion if it's endpoints are one of the\n aforementioned types.\n\n Assumes that any list-like data has already been cast to an Index.\n\n Parameters\n ----------\n key : scalar or Index-like\n The key that should be checked for i8 conversion\n\n Returns\n -------\n bool\n \"\"\"\n if is_interval_dtype(key) or isinstance(key, Interval):\n return self._needs_i8_conversion(key.left)\n\n i8_types = (Timestamp, Timedelta, DatetimeIndex, TimedeltaIndex)\n return isinstance(key, i8_types)\n\n def _maybe_convert_i8(self, key):\n \"\"\"\n Maybe convert a given key to it's equivalent i8 value(s). Used as a\n preprocessing step prior to IntervalTree queries (self._engine), which\n expects numeric data.\n\n Parameters\n ----------\n key : scalar or list-like\n The key that should maybe be converted to i8.\n\n Returns\n -------\n scalar or list-like\n The original key if no conversion occurred, int if converted scalar,\n Int64Index if converted list-like.\n \"\"\"\n original = key\n if is_list_like(key):\n key = ensure_index(key)\n\n if not self._needs_i8_conversion(key):\n return original\n\n scalar = is_scalar(key)\n if is_interval_dtype(key) or isinstance(key, Interval):\n # convert left/right and reconstruct\n left = self._maybe_convert_i8(key.left)\n right = self._maybe_convert_i8(key.right)\n constructor = Interval if scalar else IntervalIndex.from_arrays\n return constructor(left, right, closed=self.closed)\n\n if scalar:\n # Timestamp/Timedelta\n key_dtype, key_i8 = infer_dtype_from_scalar(key, pandas_dtype=True)\n else:\n # DatetimeIndex/TimedeltaIndex\n key_dtype, key_i8 = key.dtype, Index(key.asi8)\n if key.hasnans:\n # convert NaT from it's i8 value to np.nan so it's not viewed\n # as a valid value, maybe causing errors (e.g. is_overlapping)\n key_i8 = key_i8.where(~key._isnan)\n\n # ensure consistency with IntervalIndex subtype\n subtype = self.dtype.subtype\n\n if not is_dtype_equal(subtype, key_dtype):\n raise ValueError(\n f\"Cannot index an IntervalIndex of subtype {subtype} with \"\n f\"values of dtype {key_dtype}\"\n )\n\n return key_i8\n\n def _check_method(self, method):\n if method is None:\n return\n\n if method in [\"bfill\", \"backfill\", \"pad\", \"ffill\", \"nearest\"]:\n raise NotImplementedError(\n f\"method {method} not yet implemented for IntervalIndex\"\n )\n\n raise ValueError(\"Invalid fill method\")\n\n def _searchsorted_monotonic(self, label, side, exclude_label=False):\n if not self.is_non_overlapping_monotonic:\n raise KeyError(\n \"can only get slices from an IntervalIndex if bounds are \"\n \"non-overlapping and all monotonic increasing or decreasing\"\n )\n\n if isinstance(label, IntervalMixin):\n raise NotImplementedError(\"Interval objects are not currently supported\")\n\n # GH 20921: \"not is_monotonic_increasing\" for the second condition\n # instead of \"is_monotonic_decreasing\" to account for single element\n # indexes being both increasing and decreasing\n if (side == \"left\" and self.left.is_monotonic_increasing) or (\n side == \"right\" and not self.left.is_monotonic_increasing\n ):\n sub_idx = self.right\n if self.open_right or exclude_label:\n label = _get_next_label(label)\n else:\n sub_idx = self.left\n if self.open_left or exclude_label:\n label = _get_prev_label(label)\n\n return sub_idx._searchsorted_monotonic(label, side)\n\n def get_loc(\n self, key, method: Optional[str] = None, tolerance=None\n ) -> Union[int, slice, np.ndarray]:\n \"\"\"\n Get integer location, slice or boolean mask for requested label.\n\n Parameters\n ----------\n key : label\n method : {None}, optional\n * default: matches where the label is within an interval only.\n\n Returns\n -------\n int if unique index, slice if monotonic index, else mask\n\n Examples\n --------\n >>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2)\n >>> index = pd.IntervalIndex([i1, i2])\n >>> index.get_loc(1)\n 0\n\n You can also supply a point inside an interval.\n\n >>> index.get_loc(1.5)\n 1\n\n If a label is in several intervals, you get the locations of all the\n relevant intervals.\n\n >>> i3 = pd.Interval(0, 2)\n >>> overlapping_index = pd.IntervalIndex([i1, i2, i3])\n >>> overlapping_index.get_loc(0.5)\n array([ True, False, True])\n\n Only exact matches will be returned if an interval is provided.\n\n >>> index.get_loc(pd.Interval(0, 1))\n 0\n \"\"\"\n self._check_method(method)\n\n if not is_scalar(key):\n raise InvalidIndexError(key)\n\n if isinstance(key, Interval):\n if self.closed != key.closed:\n raise KeyError(key)\n mask = (self.left == key.left) & (self.right == key.right)\n else:\n # assume scalar\n op_left = le if self.closed_left else lt\n op_right = le if self.closed_right else lt\n try:\n mask = op_left(self.left, key) & op_right(key, self.right)\n except TypeError as err:\n # scalar is not comparable to II subtype --> invalid label\n raise KeyError(key) from err\n\n matches = mask.sum()\n if matches == 0:\n raise KeyError(key)\n elif matches == 1:\n return mask.argmax()\n return lib.maybe_booleans_to_slice(mask.view(\"u1\"))\n\n @Substitution(\n **dict(\n _index_doc_kwargs,\n **{\n \"raises_section\": textwrap.dedent(\n \"\"\"\n Raises\n ------\n NotImplementedError\n If any method argument other than the default of\n None is specified as these are not yet implemented.\n \"\"\"\n )\n },\n )\n )\n @Appender(_index_shared_docs[\"get_indexer\"])\n def get_indexer(\n self,\n target: AnyArrayLike,\n method: Optional[str] = None,\n limit: Optional[int] = None,\n tolerance: Optional[Any] = None,\n ) -> np.ndarray:\n\n self._check_method(method)\n\n if self.is_overlapping:\n raise InvalidIndexError(\n \"cannot handle overlapping indices; \"\n \"use IntervalIndex.get_indexer_non_unique\"\n )\n\n target_as_index = ensure_index(target)\n\n if isinstance(target_as_index, IntervalIndex):\n # equal indexes -> 1:1 positional match\n if self.equals(target_as_index):\n return np.arange(len(self), dtype=\"intp\")\n\n # different closed or incompatible subtype -> no matches\n common_subtype = find_common_type(\n [self.dtype.subtype, target_as_index.dtype.subtype]\n )\n if self.closed != target_as_index.closed or is_object_dtype(common_subtype):\n return np.repeat(np.intp(-1), len(target_as_index))\n\n # non-overlapping -> at most one match per interval in target_as_index\n # want exact matches -> need both left/right to match, so defer to\n # left/right get_indexer, compare elementwise, equality -> match\n left_indexer = self.left.get_indexer(target_as_index.left)\n right_indexer = self.right.get_indexer(target_as_index.right)\n indexer = np.where(left_indexer == right_indexer, left_indexer, -1)\n elif is_categorical_dtype(target_as_index.dtype):\n # get an indexer for unique categories then propagate to codes via take_1d\n categories_indexer = self.get_indexer(target_as_index.categories)\n indexer = take_1d(categories_indexer, target_as_index.codes, fill_value=-1)\n elif not is_object_dtype(target_as_index):\n # homogeneous scalar index: use IntervalTree\n target_as_index = self._maybe_convert_i8(target_as_index)\n indexer = self._engine.get_indexer(target_as_index.values)\n else:\n # heterogeneous scalar index: defer elementwise to get_loc\n # (non-overlapping so get_loc guarantees scalar of KeyError)\n indexer = []\n for key in target_as_index:\n try:\n loc = self.get_loc(key)\n except KeyError:\n loc = -1\n except InvalidIndexError as err:\n # i.e. non-scalar key\n raise TypeError(key) from err\n indexer.append(loc)\n\n return ensure_platform_int(indexer)\n\n @Appender(_index_shared_docs[\"get_indexer_non_unique\"] % _index_doc_kwargs)\n def get_indexer_non_unique(\n self, target: AnyArrayLike\n ) -> Tuple[np.ndarray, np.ndarray]:\n target_as_index = ensure_index(target)\n\n # check that target_as_index IntervalIndex is compatible\n if isinstance(target_as_index, IntervalIndex):\n common_subtype = find_common_type(\n [self.dtype.subtype, target_as_index.dtype.subtype]\n )\n if self.closed != target_as_index.closed or is_object_dtype(common_subtype):\n # different closed or incompatible subtype -> no matches\n return (\n np.repeat(-1, len(target_as_index)),\n np.arange(len(target_as_index)),\n )\n\n if is_object_dtype(target_as_index) or isinstance(\n target_as_index, IntervalIndex\n ):\n # target_as_index might contain intervals: defer elementwise to get_loc\n indexer, missing = [], []\n for i, key in enumerate(target_as_index):\n try:\n locs = self.get_loc(key)\n if isinstance(locs, slice):\n locs = np.arange(locs.start, locs.stop, locs.step, dtype=\"intp\")\n locs = np.array(locs, ndmin=1)\n except KeyError:\n missing.append(i)\n locs = np.array([-1])\n indexer.append(locs)\n indexer = np.concatenate(indexer)\n else:\n target_as_index = self._maybe_convert_i8(target_as_index)\n indexer, missing = self._engine.get_indexer_non_unique(\n target_as_index.values\n )\n\n return ensure_platform_int(indexer), ensure_platform_int(missing)\n\n def get_indexer_for(self, target: AnyArrayLike, **kwargs) -> np.ndarray:\n \"\"\"\n Guaranteed return of an indexer even when overlapping.\n\n This dispatches to get_indexer or get_indexer_non_unique\n as appropriate.\n\n Returns\n -------\n numpy.ndarray\n List of indices.\n \"\"\"\n if self.is_overlapping:\n return self.get_indexer_non_unique(target)[0]\n return self.get_indexer(target, **kwargs)\n\n def _convert_slice_indexer(self, key: slice, kind: str):\n if not (key.step is None or key.step == 1):\n # GH#31658 if label-based, we require step == 1,\n # if positional, we disallow float start/stop\n msg = \"label-based slicing with step!=1 is not supported for IntervalIndex\"\n if kind == \"loc\":\n raise ValueError(msg)\n elif kind == \"getitem\":\n if not is_valid_positional_slice(key):\n # i.e. this cannot be interpreted as a positional slice\n raise ValueError(msg)\n\n return super()._convert_slice_indexer(key, kind)\n\n @Appender(Index.where.__doc__)\n def where(self, cond, other=None):\n if other is None:\n other = self._na_value\n values = np.where(cond, self._values, other)\n result = IntervalArray(values)\n return self._shallow_copy(result)\n\n def delete(self, loc):\n \"\"\"\n Return a new IntervalIndex with passed location(-s) deleted\n\n Returns\n -------\n IntervalIndex\n \"\"\"\n new_left = self.left.delete(loc)\n new_right = self.right.delete(loc)\n result = self._data._shallow_copy(new_left, new_right)\n return self._shallow_copy(result)\n\n def insert(self, loc, item):\n \"\"\"\n Return a new IntervalIndex inserting new item at location. Follows\n Python list.append semantics for negative values. Only Interval\n objects and NA can be inserted into an IntervalIndex\n\n Parameters\n ----------\n loc : int\n item : object\n\n Returns\n -------\n IntervalIndex\n \"\"\"\n if isinstance(item, Interval):\n if item.closed != self.closed:\n raise ValueError(\n \"inserted item must be closed on the same side as the index\"\n )\n left_insert = item.left\n right_insert = item.right\n elif is_scalar(item) and isna(item):\n # GH 18295\n left_insert = right_insert = item\n else:\n raise ValueError(\n \"can only insert Interval objects and NA into an IntervalIndex\"\n )\n\n new_left = self.left.insert(loc, left_insert)\n new_right = self.right.insert(loc, right_insert)\n result = self._data._shallow_copy(new_left, new_right)\n return self._shallow_copy(result)\n\n @Appender(_index_shared_docs[\"take\"] % _index_doc_kwargs)\n def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):\n result = self._data.take(\n indices, axis=axis, allow_fill=allow_fill, fill_value=fill_value, **kwargs\n )\n return self._shallow_copy(result)\n\n # --------------------------------------------------------------------\n # Rendering Methods\n # __repr__ associated methods are based on MultiIndex\n\n def _format_with_header(self, header, **kwargs):\n return header + list(self._format_native_types(**kwargs))\n\n def _format_native_types(self, na_rep=\"NaN\", quoting=None, **kwargs):\n # GH 28210: use base method but with different default na_rep\n return super()._format_native_types(na_rep=na_rep, quoting=quoting, **kwargs)\n\n def _format_data(self, name=None):\n\n # TODO: integrate with categorical and make generic\n # name argument is unused here; just for compat with base / categorical\n n = len(self)\n max_seq_items = min((get_option(\"display.max_seq_items\") or n) // 10, 10)\n\n formatter = str\n\n if n == 0:\n summary = \"[]\"\n elif n == 1:\n first = formatter(self[0])\n summary = f\"[{first}]\"\n elif n == 2:\n first = formatter(self[0])\n last = formatter(self[-1])\n summary = f\"[{first}, {last}]\"\n else:\n\n if n > max_seq_items:\n n = min(max_seq_items // 2, 10)\n head = [formatter(x) for x in self[:n]]\n tail = [formatter(x) for x in self[-n:]]\n head_joined = \", \".join(head)\n tail_joined = \", \".join(tail)\n summary = f\"[{head_joined} ... {tail_joined}]\"\n else:\n tail = [formatter(x) for x in self]\n joined = \", \".join(tail)\n summary = f\"[{joined}]\"\n\n return summary + \",\" + self._format_space()\n\n def _format_attrs(self):\n attrs = [(\"closed\", repr(self.closed))]\n if self.name is not None:\n attrs.append((\"name\", default_pprint(self.name)))\n attrs.append((\"dtype\", f\"'{self.dtype}'\"))\n return attrs\n\n def _format_space(self) -> str:\n space = \" \" * (len(type(self).__name__) + 1)\n return f\"\\n{space}\"\n\n # --------------------------------------------------------------------\n\n def argsort(self, *args, **kwargs) -> np.ndarray:\n return np.lexsort((self.right, self.left))\n\n def equals(self, other) -> bool:\n \"\"\"\n Determines if two IntervalIndex objects contain the same elements.\n \"\"\"\n if self.is_(other):\n return True\n\n # if we can coerce to an II\n # then we can compare\n if not isinstance(other, IntervalIndex):\n if not is_interval_dtype(other):\n return False\n other = Index(other)\n\n return (\n self.left.equals(other.left)\n and self.right.equals(other.right)\n and self.closed == other.closed\n )\n\n @Appender(Index.intersection.__doc__)\n @SetopCheck(op_name=\"intersection\")\n def intersection(\n self, other: \"IntervalIndex\", sort: bool = False\n ) -> \"IntervalIndex\":\n if self.left.is_unique and self.right.is_unique:\n taken = self._intersection_unique(other)\n elif other.left.is_unique and other.right.is_unique and self.isna().sum() <= 1:\n # Swap other/self if other is unique and self does not have\n # multiple NaNs\n taken = other._intersection_unique(self)\n else:\n # duplicates\n taken = self._intersection_non_unique(other)\n\n if sort is None:\n taken = taken.sort_values()\n\n return taken\n\n def _intersection_unique(self, other: \"IntervalIndex\") -> \"IntervalIndex\":\n \"\"\"\n Used when the IntervalIndex does not have any common endpoint,\n no mater left or right.\n Return the intersection with another IntervalIndex.\n\n Parameters\n ----------\n other : IntervalIndex\n\n Returns\n -------\n IntervalIndex\n \"\"\"\n lindexer = self.left.get_indexer(other.left)\n rindexer = self.right.get_indexer(other.right)\n\n match = (lindexer == rindexer) & (lindexer != -1)\n indexer = lindexer.take(match.nonzero()[0])\n\n return self.take(indexer)\n\n def _intersection_non_unique(self, other: \"IntervalIndex\") -> \"IntervalIndex\":\n \"\"\"\n Used when the IntervalIndex does have some common endpoints,\n on either sides.\n Return the intersection with another IntervalIndex.\n\n Parameters\n ----------\n other : IntervalIndex\n\n Returns\n -------\n IntervalIndex\n \"\"\"\n mask = np.zeros(len(self), dtype=bool)\n\n if self.hasnans and other.hasnans:\n first_nan_loc = np.arange(len(self))[self.isna()][0]\n mask[first_nan_loc] = True\n\n other_tups = set(zip(other.left, other.right))\n for i, tup in enumerate(zip(self.left, self.right)):\n if tup in other_tups:\n mask[i] = True\n\n return self[mask]\n\n def _setop(op_name: str, sort=None):\n @SetopCheck(op_name=op_name)\n def func(self, other, sort=sort):\n result = getattr(self._multiindex, op_name)(other._multiindex, sort=sort)\n result_name = get_op_result_name(self, other)\n\n # GH 19101: ensure empty results have correct dtype\n if result.empty:\n result = result._values.astype(self.dtype.subtype)\n else:\n result = result._values\n\n return type(self).from_tuples(result, closed=self.closed, name=result_name)\n\n return func\n\n @property\n def is_all_dates(self) -> bool:\n \"\"\"\n This is False even when left/right contain datetime-like objects,\n as the check is done on the Interval itself\n \"\"\"\n return False\n\n union = _setop(\"union\")\n difference = _setop(\"difference\")\n symmetric_difference = _setop(\"symmetric_difference\")\n\n # TODO: arithmetic operations\n\n # GH#30817 until IntervalArray implements inequalities, get them from Index\n def __lt__(self, other):\n return Index.__lt__(self, other)\n\n def __le__(self, other):\n return Index.__le__(self, other)\n\n def __gt__(self, other):\n return Index.__gt__(self, other)\n\n def __ge__(self, other):\n return Index.__ge__(self, other)\n\n\nIntervalIndex._add_logical_methods_disabled()\n\n\ndef _is_valid_endpoint(endpoint) -> bool:\n \"\"\"\n Helper for interval_range to check if start/end are valid types.\n \"\"\"\n return any(\n [\n is_number(endpoint),\n isinstance(endpoint, Timestamp),\n isinstance(endpoint, Timedelta),\n endpoint is None,\n ]\n )\n\n\ndef _is_type_compatible(a, b) -> bool:\n \"\"\"\n Helper for interval_range to check type compat of start/end/freq.\n \"\"\"\n is_ts_compat = lambda x: isinstance(x, (Timestamp, BaseOffset))\n is_td_compat = lambda x: isinstance(x, (Timedelta, BaseOffset))\n return (\n (is_number(a) and is_number(b))\n or (is_ts_compat(a) and is_ts_compat(b))\n or (is_td_compat(a) and is_td_compat(b))\n or com.any_none(a, b)\n )\n\n\ndef interval_range(\n start=None, end=None, periods=None, freq=None, name=None, closed=\"right\"\n):\n \"\"\"\n Return a fixed frequency IntervalIndex.\n\n Parameters\n ----------\n start : numeric or datetime-like, default None\n Left bound for generating intervals.\n end : numeric or datetime-like, default None\n Right bound for generating intervals.\n periods : int, default None\n Number of periods to generate.\n freq : numeric, str, or DateOffset, default None\n The length of each interval. Must be consistent with the type of start\n and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1\n for numeric and 'D' for datetime-like.\n name : str, default None\n Name of the resulting IntervalIndex.\n closed : {'left', 'right', 'both', 'neither'}, default 'right'\n Whether the intervals are closed on the left-side, right-side, both\n or neither.\n\n Returns\n -------\n IntervalIndex\n\n See Also\n --------\n IntervalIndex : An Index of intervals that are all closed on the same side.\n\n Notes\n -----\n Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,\n exactly three must be specified. If ``freq`` is omitted, the resulting\n ``IntervalIndex`` will have ``periods`` linearly spaced elements between\n ``start`` and ``end``, inclusively.\n\n To learn more about datetime-like frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n Numeric ``start`` and ``end`` is supported.\n\n >>> pd.interval_range(start=0, end=5)\n IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],\n closed='right', dtype='interval[int64]')\n\n Additionally, datetime-like input is also supported.\n\n >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),\n ... end=pd.Timestamp('2017-01-04'))\n IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03],\n (2017-01-03, 2017-01-04]],\n closed='right', dtype='interval[datetime64[ns]]')\n\n The ``freq`` parameter specifies the frequency between the left and right.\n endpoints of the individual intervals within the ``IntervalIndex``. For\n numeric ``start`` and ``end``, the frequency must also be numeric.\n\n >>> pd.interval_range(start=0, periods=4, freq=1.5)\n IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],\n closed='right', dtype='interval[float64]')\n\n Similarly, for datetime-like ``start`` and ``end``, the frequency must be\n convertible to a DateOffset.\n\n >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),\n ... periods=3, freq='MS')\n IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01],\n (2017-03-01, 2017-04-01]],\n closed='right', dtype='interval[datetime64[ns]]')\n\n Specify ``start``, ``end``, and ``periods``; the frequency is generated\n automatically (linearly spaced).\n\n >>> pd.interval_range(start=0, end=6, periods=4)\n IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],\n closed='right',\n dtype='interval[float64]')\n\n The ``closed`` parameter specifies which endpoints of the individual\n intervals within the ``IntervalIndex`` are closed.\n\n >>> pd.interval_range(end=5, periods=4, closed='both')\n IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]],\n closed='both', dtype='interval[int64]')\n \"\"\"\n start = com.maybe_box_datetimelike(start)\n end = com.maybe_box_datetimelike(end)\n endpoint = start if start is not None else end\n\n if freq is None and com.any_none(periods, start, end):\n freq = 1 if is_number(endpoint) else \"D\"\n\n if com.count_not_none(start, end, periods, freq) != 3:\n raise ValueError(\n \"Of the four parameters: start, end, periods, and \"\n \"freq, exactly three must be specified\"\n )\n\n if not _is_valid_endpoint(start):\n raise ValueError(f\"start must be numeric or datetime-like, got {start}\")\n elif not _is_valid_endpoint(end):\n raise ValueError(f\"end must be numeric or datetime-like, got {end}\")\n\n if is_float(periods):\n periods = int(periods)\n elif not is_integer(periods) and periods is not None:\n raise TypeError(f\"periods must be a number, got {periods}\")\n\n if freq is not None and not is_number(freq):\n try:\n freq = to_offset(freq)\n except ValueError as err:\n raise ValueError(\n f\"freq must be numeric or convertible to DateOffset, got {freq}\"\n ) from err\n\n # verify type compatibility\n if not all(\n [\n _is_type_compatible(start, end),\n _is_type_compatible(start, freq),\n _is_type_compatible(end, freq),\n ]\n ):\n raise TypeError(\"start, end, freq need to be type compatible\")\n\n # +1 to convert interval count to breaks count (n breaks = n-1 intervals)\n if periods is not None:\n periods += 1\n\n if is_number(endpoint):\n # force consistency between start/end/freq (lower end if freq skips it)\n if com.all_not_none(start, end, freq):\n end -= (end - start) % freq\n\n # compute the period/start/end if unspecified (at most one)\n if periods is None:\n periods = int((end - start) // freq) + 1\n elif start is None:\n start = end - (periods - 1) * freq\n elif end is None:\n end = start + (periods - 1) * freq\n\n breaks = np.linspace(start, end, periods)\n if all(is_integer(x) for x in com.not_none(start, end, freq)):\n # np.linspace always produces float output\n breaks = maybe_downcast_to_dtype(breaks, \"int64\")\n else:\n # delegate to the appropriate range function\n if isinstance(endpoint, Timestamp):\n range_func = date_range\n else:\n range_func = timedelta_range\n\n breaks = range_func(start=start, end=end, periods=periods, freq=freq)\n\n return IntervalIndex.from_breaks(breaks, name=name, closed=closed)\n",
"from datetime import datetime\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n Index,\n IntervalIndex,\n MultiIndex,\n RangeIndex,\n Series,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\n\n\nclass TestResetIndex:\n def test_reset_index_tz(self, tz_aware_fixture):\n # GH 3950\n # reset_index with single level\n tz = tz_aware_fixture\n idx = date_range(\"1/1/2011\", periods=5, freq=\"D\", tz=tz, name=\"idx\")\n df = DataFrame({\"a\": range(5), \"b\": [\"A\", \"B\", \"C\", \"D\", \"E\"]}, index=idx)\n\n expected = DataFrame(\n {\n \"idx\": [\n datetime(2011, 1, 1),\n datetime(2011, 1, 2),\n datetime(2011, 1, 3),\n datetime(2011, 1, 4),\n datetime(2011, 1, 5),\n ],\n \"a\": range(5),\n \"b\": [\"A\", \"B\", \"C\", \"D\", \"E\"],\n },\n columns=[\"idx\", \"a\", \"b\"],\n )\n expected[\"idx\"] = expected[\"idx\"].apply(lambda d: Timestamp(d, tz=tz))\n tm.assert_frame_equal(df.reset_index(), expected)\n\n def test_reset_index_with_intervals(self):\n idx = IntervalIndex.from_breaks(np.arange(11), name=\"x\")\n original = DataFrame({\"x\": idx, \"y\": np.arange(10)})[[\"x\", \"y\"]]\n\n result = original.set_index(\"x\")\n expected = DataFrame({\"y\": np.arange(10)}, index=idx)\n tm.assert_frame_equal(result, expected)\n\n result2 = result.reset_index()\n tm.assert_frame_equal(result2, original)\n\n def test_reset_index(self, float_frame):\n stacked = float_frame.stack()[::2]\n stacked = DataFrame({\"foo\": stacked, \"bar\": stacked})\n\n names = [\"first\", \"second\"]\n stacked.index.names = names\n deleveled = stacked.reset_index()\n for i, (lev, level_codes) in enumerate(\n zip(stacked.index.levels, stacked.index.codes)\n ):\n values = lev.take(level_codes)\n name = names[i]\n tm.assert_index_equal(values, Index(deleveled[name]))\n\n stacked.index.names = [None, None]\n deleveled2 = stacked.reset_index()\n tm.assert_series_equal(\n deleveled[\"first\"], deleveled2[\"level_0\"], check_names=False\n )\n tm.assert_series_equal(\n deleveled[\"second\"], deleveled2[\"level_1\"], check_names=False\n )\n\n # default name assigned\n rdf = float_frame.reset_index()\n exp = Series(float_frame.index.values, name=\"index\")\n tm.assert_series_equal(rdf[\"index\"], exp)\n\n # default name assigned, corner case\n df = float_frame.copy()\n df[\"index\"] = \"foo\"\n rdf = df.reset_index()\n exp = Series(float_frame.index.values, name=\"level_0\")\n tm.assert_series_equal(rdf[\"level_0\"], exp)\n\n # but this is ok\n float_frame.index.name = \"index\"\n deleveled = float_frame.reset_index()\n tm.assert_series_equal(deleveled[\"index\"], Series(float_frame.index))\n tm.assert_index_equal(deleveled.index, Index(np.arange(len(deleveled))))\n\n # preserve column names\n float_frame.columns.name = \"columns\"\n resetted = float_frame.reset_index()\n assert resetted.columns.name == \"columns\"\n\n # only remove certain columns\n df = float_frame.reset_index().set_index([\"index\", \"A\", \"B\"])\n rs = df.reset_index([\"A\", \"B\"])\n\n # TODO should reset_index check_names ?\n tm.assert_frame_equal(rs, float_frame, check_names=False)\n\n rs = df.reset_index([\"index\", \"A\", \"B\"])\n tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False)\n\n rs = df.reset_index([\"index\", \"A\", \"B\"])\n tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False)\n\n rs = df.reset_index(\"A\")\n xp = float_frame.reset_index().set_index([\"index\", \"B\"])\n tm.assert_frame_equal(rs, xp, check_names=False)\n\n # test resetting in place\n df = float_frame.copy()\n resetted = float_frame.reset_index()\n df.reset_index(inplace=True)\n tm.assert_frame_equal(df, resetted, check_names=False)\n\n df = float_frame.reset_index().set_index([\"index\", \"A\", \"B\"])\n rs = df.reset_index(\"A\", drop=True)\n xp = float_frame.copy()\n del xp[\"A\"]\n xp = xp.set_index([\"B\"], append=True)\n tm.assert_frame_equal(rs, xp, check_names=False)\n\n def test_reset_index_name(self):\n df = DataFrame(\n [[1, 2, 3, 4], [5, 6, 7, 8]],\n columns=[\"A\", \"B\", \"C\", \"D\"],\n index=Index(range(2), name=\"x\"),\n )\n assert df.reset_index().index.name is None\n assert df.reset_index(drop=True).index.name is None\n df.reset_index(inplace=True)\n assert df.index.name is None\n\n def test_reset_index_level(self):\n df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=[\"A\", \"B\", \"C\", \"D\"])\n\n for levels in [\"A\", \"B\"], [0, 1]:\n # With MultiIndex\n result = df.set_index([\"A\", \"B\"]).reset_index(level=levels[0])\n tm.assert_frame_equal(result, df.set_index(\"B\"))\n\n result = df.set_index([\"A\", \"B\"]).reset_index(level=levels[:1])\n tm.assert_frame_equal(result, df.set_index(\"B\"))\n\n result = df.set_index([\"A\", \"B\"]).reset_index(level=levels)\n tm.assert_frame_equal(result, df)\n\n result = df.set_index([\"A\", \"B\"]).reset_index(level=levels, drop=True)\n tm.assert_frame_equal(result, df[[\"C\", \"D\"]])\n\n # With single-level Index (GH 16263)\n result = df.set_index(\"A\").reset_index(level=levels[0])\n tm.assert_frame_equal(result, df)\n\n result = df.set_index(\"A\").reset_index(level=levels[:1])\n tm.assert_frame_equal(result, df)\n\n result = df.set_index([\"A\"]).reset_index(level=levels[0], drop=True)\n tm.assert_frame_equal(result, df[[\"B\", \"C\", \"D\"]])\n\n # Missing levels - for both MultiIndex and single-level Index:\n for idx_lev in [\"A\", \"B\"], [\"A\"]:\n with pytest.raises(KeyError, match=r\"(L|l)evel \\(?E\\)?\"):\n df.set_index(idx_lev).reset_index(level=[\"A\", \"E\"])\n with pytest.raises(IndexError, match=\"Too many levels\"):\n df.set_index(idx_lev).reset_index(level=[0, 1, 2])\n\n def test_reset_index_right_dtype(self):\n time = np.arange(0.0, 10, np.sqrt(2) / 2)\n s1 = Series(\n (9.81 * time ** 2) / 2, index=Index(time, name=\"time\"), name=\"speed\"\n )\n df = DataFrame(s1)\n\n resetted = s1.reset_index()\n assert resetted[\"time\"].dtype == np.float64\n\n resetted = df.reset_index()\n assert resetted[\"time\"].dtype == np.float64\n\n def test_reset_index_multiindex_col(self):\n vals = np.random.randn(3, 3).astype(object)\n idx = [\"x\", \"y\", \"z\"]\n full = np.hstack(([[x] for x in idx], vals))\n df = DataFrame(\n vals,\n Index(idx, name=\"a\"),\n columns=[[\"b\", \"b\", \"c\"], [\"mean\", \"median\", \"mean\"]],\n )\n rs = df.reset_index()\n xp = DataFrame(\n full, columns=[[\"a\", \"b\", \"b\", \"c\"], [\"\", \"mean\", \"median\", \"mean\"]]\n )\n tm.assert_frame_equal(rs, xp)\n\n rs = df.reset_index(col_fill=None)\n xp = DataFrame(\n full, columns=[[\"a\", \"b\", \"b\", \"c\"], [\"a\", \"mean\", \"median\", \"mean\"]]\n )\n tm.assert_frame_equal(rs, xp)\n\n rs = df.reset_index(col_level=1, col_fill=\"blah\")\n xp = DataFrame(\n full, columns=[[\"blah\", \"b\", \"b\", \"c\"], [\"a\", \"mean\", \"median\", \"mean\"]]\n )\n tm.assert_frame_equal(rs, xp)\n\n df = DataFrame(\n vals,\n MultiIndex.from_arrays([[0, 1, 2], [\"x\", \"y\", \"z\"]], names=[\"d\", \"a\"]),\n columns=[[\"b\", \"b\", \"c\"], [\"mean\", \"median\", \"mean\"]],\n )\n rs = df.reset_index(\"a\")\n xp = DataFrame(\n full,\n Index([0, 1, 2], name=\"d\"),\n columns=[[\"a\", \"b\", \"b\", \"c\"], [\"\", \"mean\", \"median\", \"mean\"]],\n )\n tm.assert_frame_equal(rs, xp)\n\n rs = df.reset_index(\"a\", col_fill=None)\n xp = DataFrame(\n full,\n Index(range(3), name=\"d\"),\n columns=[[\"a\", \"b\", \"b\", \"c\"], [\"a\", \"mean\", \"median\", \"mean\"]],\n )\n tm.assert_frame_equal(rs, xp)\n\n rs = df.reset_index(\"a\", col_fill=\"blah\", col_level=1)\n xp = DataFrame(\n full,\n Index(range(3), name=\"d\"),\n columns=[[\"blah\", \"b\", \"b\", \"c\"], [\"a\", \"mean\", \"median\", \"mean\"]],\n )\n tm.assert_frame_equal(rs, xp)\n\n def test_reset_index_multiindex_nan(self):\n # GH#6322, testing reset_index on MultiIndexes\n # when we have a nan or all nan\n df = DataFrame(\n {\"A\": [\"a\", \"b\", \"c\"], \"B\": [0, 1, np.nan], \"C\": np.random.rand(3)}\n )\n rs = df.set_index([\"A\", \"B\"]).reset_index()\n tm.assert_frame_equal(rs, df)\n\n df = DataFrame(\n {\"A\": [np.nan, \"b\", \"c\"], \"B\": [0, 1, 2], \"C\": np.random.rand(3)}\n )\n rs = df.set_index([\"A\", \"B\"]).reset_index()\n tm.assert_frame_equal(rs, df)\n\n df = DataFrame({\"A\": [\"a\", \"b\", \"c\"], \"B\": [0, 1, 2], \"C\": [np.nan, 1.1, 2.2]})\n rs = df.set_index([\"A\", \"B\"]).reset_index()\n tm.assert_frame_equal(rs, df)\n\n df = DataFrame(\n {\n \"A\": [\"a\", \"b\", \"c\"],\n \"B\": [np.nan, np.nan, np.nan],\n \"C\": np.random.rand(3),\n }\n )\n rs = df.set_index([\"A\", \"B\"]).reset_index()\n tm.assert_frame_equal(rs, df)\n\n def test_reset_index_with_datetimeindex_cols(self):\n # GH#5818\n df = DataFrame(\n [[1, 2], [3, 4]],\n columns=date_range(\"1/1/2013\", \"1/2/2013\"),\n index=[\"A\", \"B\"],\n )\n\n result = df.reset_index()\n expected = DataFrame(\n [[\"A\", 1, 2], [\"B\", 3, 4]],\n columns=[\"index\", datetime(2013, 1, 1), datetime(2013, 1, 2)],\n )\n tm.assert_frame_equal(result, expected)\n\n def test_reset_index_range(self):\n # GH#12071\n df = DataFrame([[0, 0], [1, 1]], columns=[\"A\", \"B\"], index=RangeIndex(stop=2))\n result = df.reset_index()\n assert isinstance(result.index, RangeIndex)\n expected = DataFrame(\n [[0, 0, 0], [1, 1, 1]],\n columns=[\"index\", \"A\", \"B\"],\n index=RangeIndex(stop=2),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_reset_index_dtypes_on_empty_frame_with_multiindex():\n # GH 19602 - Preserve dtype on empty DataFrame with MultiIndex\n idx = MultiIndex.from_product([[0, 1], [0.5, 1.0], [\"a\", \"b\"]])\n result = DataFrame(index=idx)[:0].reset_index().dtypes\n expected = Series({\"level_0\": np.int64, \"level_1\": np.float64, \"level_2\": object})\n tm.assert_series_equal(result, expected)\n",
"import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import DataFrame, MultiIndex, Series\nimport pandas._testing as tm\n\n\nclass TestDataFrameIsIn:\n def test_isin(self):\n # GH#4211\n df = DataFrame(\n {\n \"vals\": [1, 2, 3, 4],\n \"ids\": [\"a\", \"b\", \"f\", \"n\"],\n \"ids2\": [\"a\", \"n\", \"c\", \"n\"],\n },\n index=[\"foo\", \"bar\", \"baz\", \"qux\"],\n )\n other = [\"a\", \"b\", \"c\"]\n\n result = df.isin(other)\n expected = DataFrame([df.loc[s].isin(other) for s in df.index])\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"empty\", [[], Series(dtype=object), np.array([])])\n def test_isin_empty(self, empty):\n # GH#16991\n df = DataFrame({\"A\": [\"a\", \"b\", \"c\"], \"B\": [\"a\", \"e\", \"f\"]})\n expected = DataFrame(False, df.index, df.columns)\n\n result = df.isin(empty)\n tm.assert_frame_equal(result, expected)\n\n def test_isin_dict(self):\n df = DataFrame({\"A\": [\"a\", \"b\", \"c\"], \"B\": [\"a\", \"e\", \"f\"]})\n d = {\"A\": [\"a\"]}\n\n expected = DataFrame(False, df.index, df.columns)\n expected.loc[0, \"A\"] = True\n\n result = df.isin(d)\n tm.assert_frame_equal(result, expected)\n\n # non unique columns\n df = DataFrame({\"A\": [\"a\", \"b\", \"c\"], \"B\": [\"a\", \"e\", \"f\"]})\n df.columns = [\"A\", \"A\"]\n expected = DataFrame(False, df.index, df.columns)\n expected.loc[0, \"A\"] = True\n result = df.isin(d)\n tm.assert_frame_equal(result, expected)\n\n def test_isin_with_string_scalar(self):\n # GH#4763\n df = DataFrame(\n {\n \"vals\": [1, 2, 3, 4],\n \"ids\": [\"a\", \"b\", \"f\", \"n\"],\n \"ids2\": [\"a\", \"n\", \"c\", \"n\"],\n },\n index=[\"foo\", \"bar\", \"baz\", \"qux\"],\n )\n msg = (\n r\"only list-like or dict-like objects are allowed \"\n r\"to be passed to DataFrame.isin\\(\\), you passed a 'str'\"\n )\n with pytest.raises(TypeError, match=msg):\n df.isin(\"a\")\n\n with pytest.raises(TypeError, match=msg):\n df.isin(\"aaa\")\n\n def test_isin_df(self):\n df1 = DataFrame({\"A\": [1, 2, 3, 4], \"B\": [2, np.nan, 4, 4]})\n df2 = DataFrame({\"A\": [0, 2, 12, 4], \"B\": [2, np.nan, 4, 5]})\n expected = DataFrame(False, df1.index, df1.columns)\n result = df1.isin(df2)\n expected[\"A\"].loc[[1, 3]] = True\n expected[\"B\"].loc[[0, 2]] = True\n tm.assert_frame_equal(result, expected)\n\n # partial overlapping columns\n df2.columns = [\"A\", \"C\"]\n result = df1.isin(df2)\n expected[\"B\"] = False\n tm.assert_frame_equal(result, expected)\n\n def test_isin_tuples(self):\n # GH#16394\n df = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [\"a\", \"b\", \"f\"]})\n df[\"C\"] = list(zip(df[\"A\"], df[\"B\"]))\n result = df[\"C\"].isin([(1, \"a\")])\n tm.assert_series_equal(result, Series([True, False, False], name=\"C\"))\n\n def test_isin_df_dupe_values(self):\n df1 = DataFrame({\"A\": [1, 2, 3, 4], \"B\": [2, np.nan, 4, 4]})\n # just cols duped\n df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]], columns=[\"B\", \"B\"])\n msg = r\"cannot compute isin with a duplicate axis\\.\"\n with pytest.raises(ValueError, match=msg):\n df1.isin(df2)\n\n # just index duped\n df2 = DataFrame(\n [[0, 2], [12, 4], [2, np.nan], [4, 5]],\n columns=[\"A\", \"B\"],\n index=[0, 0, 1, 1],\n )\n with pytest.raises(ValueError, match=msg):\n df1.isin(df2)\n\n # cols and index:\n df2.columns = [\"B\", \"B\"]\n with pytest.raises(ValueError, match=msg):\n df1.isin(df2)\n\n def test_isin_dupe_self(self):\n other = DataFrame({\"A\": [1, 0, 1, 0], \"B\": [1, 1, 0, 0]})\n df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=[\"A\", \"A\"])\n result = df.isin(other)\n expected = DataFrame(False, index=df.index, columns=df.columns)\n expected.loc[0] = True\n expected.iloc[1, 1] = True\n tm.assert_frame_equal(result, expected)\n\n def test_isin_against_series(self):\n df = pd.DataFrame(\n {\"A\": [1, 2, 3, 4], \"B\": [2, np.nan, 4, 4]}, index=[\"a\", \"b\", \"c\", \"d\"]\n )\n s = pd.Series([1, 3, 11, 4], index=[\"a\", \"b\", \"c\", \"d\"])\n expected = DataFrame(False, index=df.index, columns=df.columns)\n expected[\"A\"].loc[\"a\"] = True\n expected.loc[\"d\"] = True\n result = df.isin(s)\n tm.assert_frame_equal(result, expected)\n\n def test_isin_multiIndex(self):\n idx = MultiIndex.from_tuples(\n [\n (0, \"a\", \"foo\"),\n (0, \"a\", \"bar\"),\n (0, \"b\", \"bar\"),\n (0, \"b\", \"baz\"),\n (2, \"a\", \"foo\"),\n (2, \"a\", \"bar\"),\n (2, \"c\", \"bar\"),\n (2, \"c\", \"baz\"),\n (1, \"b\", \"foo\"),\n (1, \"b\", \"bar\"),\n (1, \"c\", \"bar\"),\n (1, \"c\", \"baz\"),\n ]\n )\n df1 = DataFrame({\"A\": np.ones(12), \"B\": np.zeros(12)}, index=idx)\n df2 = DataFrame(\n {\n \"A\": [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],\n \"B\": [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1],\n }\n )\n # against regular index\n expected = DataFrame(False, index=df1.index, columns=df1.columns)\n result = df1.isin(df2)\n tm.assert_frame_equal(result, expected)\n\n df2.index = idx\n expected = df2.values.astype(bool)\n expected[:, 1] = ~expected[:, 1]\n expected = DataFrame(expected, columns=[\"A\", \"B\"], index=idx)\n\n result = df1.isin(df2)\n tm.assert_frame_equal(result, expected)\n\n def test_isin_empty_datetimelike(self):\n # GH#15473\n df1_ts = DataFrame({\"date\": pd.to_datetime([\"2014-01-01\", \"2014-01-02\"])})\n df1_td = DataFrame({\"date\": [pd.Timedelta(1, \"s\"), pd.Timedelta(2, \"s\")]})\n df2 = DataFrame({\"date\": []})\n df3 = DataFrame()\n\n expected = DataFrame({\"date\": [False, False]})\n\n result = df1_ts.isin(df2)\n tm.assert_frame_equal(result, expected)\n result = df1_ts.isin(df3)\n tm.assert_frame_equal(result, expected)\n\n result = df1_td.isin(df2)\n tm.assert_frame_equal(result, expected)\n result = df1_td.isin(df3)\n tm.assert_frame_equal(result, expected)\n",
"from datetime import datetime\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import PY37, is_platform_windows\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n CategoricalIndex,\n DataFrame,\n Index,\n MultiIndex,\n Series,\n _np_version_under1p17,\n qcut,\n)\nimport pandas._testing as tm\n\n\ndef cartesian_product_for_groupers(result, args, names):\n \"\"\" Reindex to a cartesian production for the groupers,\n preserving the nature (Categorical) of each grouper\n \"\"\"\n\n def f(a):\n if isinstance(a, (CategoricalIndex, Categorical)):\n categories = a.categories\n a = Categorical.from_codes(\n np.arange(len(categories)), categories=categories, ordered=a.ordered\n )\n return a\n\n index = MultiIndex.from_product(map(f, args), names=names)\n return result.reindex(index).sort_index()\n\n\ndef test_apply_use_categorical_name(df):\n cats = qcut(df.C, 4)\n\n def get_stats(group):\n return {\n \"min\": group.min(),\n \"max\": group.max(),\n \"count\": group.count(),\n \"mean\": group.mean(),\n }\n\n result = df.groupby(cats, observed=False).D.apply(get_stats)\n assert result.index.names[0] == \"C\"\n\n\ndef test_basic():\n\n cats = Categorical(\n [\"a\", \"a\", \"a\", \"b\", \"b\", \"b\", \"c\", \"c\", \"c\"],\n categories=[\"a\", \"b\", \"c\", \"d\"],\n ordered=True,\n )\n data = DataFrame({\"a\": [1, 1, 1, 2, 2, 2, 3, 4, 5], \"b\": cats})\n\n exp_index = CategoricalIndex(list(\"abcd\"), name=\"b\", ordered=True)\n expected = DataFrame({\"a\": [1, 2, 4, np.nan]}, index=exp_index)\n result = data.groupby(\"b\", observed=False).mean()\n tm.assert_frame_equal(result, expected)\n\n cat1 = Categorical([\"a\", \"a\", \"b\", \"b\"], categories=[\"a\", \"b\", \"z\"], ordered=True)\n cat2 = Categorical([\"c\", \"d\", \"c\", \"d\"], categories=[\"c\", \"d\", \"y\"], ordered=True)\n df = DataFrame({\"A\": cat1, \"B\": cat2, \"values\": [1, 2, 3, 4]})\n\n # single grouper\n gb = df.groupby(\"A\", observed=False)\n exp_idx = CategoricalIndex([\"a\", \"b\", \"z\"], name=\"A\", ordered=True)\n expected = DataFrame({\"values\": Series([3, 7, 0], index=exp_idx)})\n result = gb.sum()\n tm.assert_frame_equal(result, expected)\n\n # GH 8623\n x = DataFrame(\n [[1, \"John P. Doe\"], [2, \"Jane Dove\"], [1, \"John P. Doe\"]],\n columns=[\"person_id\", \"person_name\"],\n )\n x[\"person_name\"] = Categorical(x.person_name)\n\n g = x.groupby([\"person_id\"], observed=False)\n result = g.transform(lambda x: x)\n tm.assert_frame_equal(result, x[[\"person_name\"]])\n\n result = x.drop_duplicates(\"person_name\")\n expected = x.iloc[[0, 1]]\n tm.assert_frame_equal(result, expected)\n\n def f(x):\n return x.drop_duplicates(\"person_name\").iloc[0]\n\n result = g.apply(f)\n expected = x.iloc[[0, 1]].copy()\n expected.index = Index([1, 2], name=\"person_id\")\n expected[\"person_name\"] = expected[\"person_name\"].astype(\"object\")\n tm.assert_frame_equal(result, expected)\n\n # GH 9921\n # Monotonic\n df = DataFrame({\"a\": [5, 15, 25]})\n c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])\n\n result = df.a.groupby(c, observed=False).transform(sum)\n tm.assert_series_equal(result, df[\"a\"])\n\n tm.assert_series_equal(\n df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[\"a\"]\n )\n tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[[\"a\"]])\n tm.assert_frame_equal(\n df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[[\"a\"]]\n )\n\n # Filter\n tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df[\"a\"])\n tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)\n\n # Non-monotonic\n df = DataFrame({\"a\": [5, 15, 25, -5]})\n c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])\n\n result = df.a.groupby(c, observed=False).transform(sum)\n tm.assert_series_equal(result, df[\"a\"])\n\n tm.assert_series_equal(\n df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[\"a\"]\n )\n tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[[\"a\"]])\n tm.assert_frame_equal(\n df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[[\"a\"]]\n )\n\n # GH 9603\n df = DataFrame({\"a\": [1, 0, 0, 0]})\n c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list(\"abcd\")))\n result = df.groupby(c, observed=False).apply(len)\n\n exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)\n expected = Series([1, 0, 0, 0], index=exp_index)\n expected.index.name = \"a\"\n tm.assert_series_equal(result, expected)\n\n # more basic\n levels = [\"foo\", \"bar\", \"baz\", \"qux\"]\n codes = np.random.randint(0, 4, size=100)\n\n cats = Categorical.from_codes(codes, levels, ordered=True)\n\n data = DataFrame(np.random.randn(100, 4))\n\n result = data.groupby(cats, observed=False).mean()\n\n expected = data.groupby(np.asarray(cats), observed=False).mean()\n exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)\n expected = expected.reindex(exp_idx)\n\n tm.assert_frame_equal(result, expected)\n\n grouped = data.groupby(cats, observed=False)\n desc_result = grouped.describe()\n\n idx = cats.codes.argsort()\n ord_labels = np.asarray(cats).take(idx)\n ord_data = data.take(idx)\n\n exp_cats = Categorical(\n ord_labels, ordered=True, categories=[\"foo\", \"bar\", \"baz\", \"qux\"]\n )\n expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()\n tm.assert_frame_equal(desc_result, expected)\n\n # GH 10460\n expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)\n exp = CategoricalIndex(expc)\n tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)\n exp = Index([\"count\", \"mean\", \"std\", \"min\", \"25%\", \"50%\", \"75%\", \"max\"] * 4)\n tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)\n\n\ndef test_level_get_group(observed):\n # GH15155\n df = DataFrame(\n data=np.arange(2, 22, 2),\n index=MultiIndex(\n levels=[CategoricalIndex([\"a\", \"b\"]), range(10)],\n codes=[[0] * 5 + [1] * 5, range(10)],\n names=[\"Index1\", \"Index2\"],\n ),\n )\n g = df.groupby(level=[\"Index1\"], observed=observed)\n\n # expected should equal test.loc[[\"a\"]]\n # GH15166\n expected = DataFrame(\n data=np.arange(2, 12, 2),\n index=MultiIndex(\n levels=[CategoricalIndex([\"a\", \"b\"]), range(5)],\n codes=[[0] * 5, range(5)],\n names=[\"Index1\", \"Index2\"],\n ),\n )\n result = g.get_group(\"a\")\n\n tm.assert_frame_equal(result, expected)\n\n\n# GH#21636 flaky on py37; may be related to older numpy, see discussion\n# https://github.com/MacPython/pandas-wheels/pull/64\[email protected](\n PY37 and _np_version_under1p17 and not is_platform_windows(),\n reason=\"Flaky, GH-27902\",\n)\[email protected](\"ordered\", [True, False])\ndef test_apply(ordered):\n # GH 10138\n\n dense = Categorical(list(\"abc\"), ordered=ordered)\n\n # 'b' is in the categories but not in the list\n missing = Categorical(list(\"aaa\"), categories=[\"a\", \"b\"], ordered=ordered)\n values = np.arange(len(dense))\n df = DataFrame({\"missing\": missing, \"dense\": dense, \"values\": values})\n grouped = df.groupby([\"missing\", \"dense\"], observed=True)\n\n # missing category 'b' should still exist in the output index\n idx = MultiIndex.from_arrays([missing, dense], names=[\"missing\", \"dense\"])\n expected = DataFrame([0, 1, 2.0], index=idx, columns=[\"values\"])\n\n # GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])\n # is coming back as Series([0., 1., 0.], index=[\"missing\", \"dense\", \"values\"])\n # when we expect Series(0., index=[\"values\"])\n result = grouped.apply(lambda x: np.mean(x))\n tm.assert_frame_equal(result, expected)\n\n # we coerce back to ints\n expected = expected.astype(\"int\")\n result = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n result = grouped.agg(np.mean)\n tm.assert_frame_equal(result, expected)\n\n # but for transform we should still get back the original index\n idx = MultiIndex.from_arrays([missing, dense], names=[\"missing\", \"dense\"])\n expected = Series(1, index=idx)\n result = grouped.apply(lambda x: 1)\n tm.assert_series_equal(result, expected)\n\n\ndef test_observed(observed):\n # multiple groupers, don't re-expand the output space\n # of the grouper\n # gh-14942 (implement)\n # gh-10132 (back-compat)\n # gh-8138 (back-compat)\n # gh-8869\n\n cat1 = Categorical([\"a\", \"a\", \"b\", \"b\"], categories=[\"a\", \"b\", \"z\"], ordered=True)\n cat2 = Categorical([\"c\", \"d\", \"c\", \"d\"], categories=[\"c\", \"d\", \"y\"], ordered=True)\n df = DataFrame({\"A\": cat1, \"B\": cat2, \"values\": [1, 2, 3, 4]})\n df[\"C\"] = [\"foo\", \"bar\"] * 2\n\n # multiple groupers with a non-cat\n gb = df.groupby([\"A\", \"B\", \"C\"], observed=observed)\n exp_index = MultiIndex.from_arrays(\n [cat1, cat2, [\"foo\", \"bar\"] * 2], names=[\"A\", \"B\", \"C\"]\n )\n expected = DataFrame({\"values\": Series([1, 2, 3, 4], index=exp_index)}).sort_index()\n result = gb.sum()\n if not observed:\n expected = cartesian_product_for_groupers(\n expected, [cat1, cat2, [\"foo\", \"bar\"]], list(\"ABC\")\n )\n\n tm.assert_frame_equal(result, expected)\n\n gb = df.groupby([\"A\", \"B\"], observed=observed)\n exp_index = MultiIndex.from_arrays([cat1, cat2], names=[\"A\", \"B\"])\n expected = DataFrame({\"values\": [1, 2, 3, 4]}, index=exp_index)\n result = gb.sum()\n if not observed:\n expected = cartesian_product_for_groupers(expected, [cat1, cat2], list(\"AB\"))\n\n tm.assert_frame_equal(result, expected)\n\n # https://github.com/pandas-dev/pandas/issues/8138\n d = {\n \"cat\": Categorical(\n [\"a\", \"b\", \"a\", \"b\"], categories=[\"a\", \"b\", \"c\"], ordered=True\n ),\n \"ints\": [1, 1, 2, 2],\n \"val\": [10, 20, 30, 40],\n }\n df = DataFrame(d)\n\n # Grouping on a single column\n groups_single_key = df.groupby(\"cat\", observed=observed)\n result = groups_single_key.mean()\n\n exp_index = CategoricalIndex(\n list(\"ab\"), name=\"cat\", categories=list(\"abc\"), ordered=True\n )\n expected = DataFrame({\"ints\": [1.5, 1.5], \"val\": [20.0, 30]}, index=exp_index)\n if not observed:\n index = CategoricalIndex(\n list(\"abc\"), name=\"cat\", categories=list(\"abc\"), ordered=True\n )\n expected = expected.reindex(index)\n\n tm.assert_frame_equal(result, expected)\n\n # Grouping on two columns\n groups_double_key = df.groupby([\"cat\", \"ints\"], observed=observed)\n result = groups_double_key.agg(\"mean\")\n expected = DataFrame(\n {\n \"val\": [10, 30, 20, 40],\n \"cat\": Categorical(\n [\"a\", \"a\", \"b\", \"b\"], categories=[\"a\", \"b\", \"c\"], ordered=True\n ),\n \"ints\": [1, 2, 1, 2],\n }\n ).set_index([\"cat\", \"ints\"])\n if not observed:\n expected = cartesian_product_for_groupers(\n expected, [df.cat.values, [1, 2]], [\"cat\", \"ints\"]\n )\n\n tm.assert_frame_equal(result, expected)\n\n # GH 10132\n for key in [(\"a\", 1), (\"b\", 2), (\"b\", 1), (\"a\", 2)]:\n c, i = key\n result = groups_double_key.get_group(key)\n expected = df[(df.cat == c) & (df.ints == i)]\n tm.assert_frame_equal(result, expected)\n\n # gh-8869\n # with as_index\n d = {\n \"foo\": [10, 8, 4, 8, 4, 1, 1],\n \"bar\": [10, 20, 30, 40, 50, 60, 70],\n \"baz\": [\"d\", \"c\", \"e\", \"a\", \"a\", \"d\", \"c\"],\n }\n df = DataFrame(d)\n cat = pd.cut(df[\"foo\"], np.linspace(0, 10, 3))\n df[\"range\"] = cat\n groups = df.groupby([\"range\", \"baz\"], as_index=False, observed=observed)\n result = groups.agg(\"mean\")\n\n groups2 = df.groupby([\"range\", \"baz\"], as_index=True, observed=observed)\n expected = groups2.agg(\"mean\").reset_index()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_observed_codes_remap(observed):\n d = {\"C1\": [3, 3, 4, 5], \"C2\": [1, 2, 3, 4], \"C3\": [10, 100, 200, 34]}\n df = DataFrame(d)\n values = pd.cut(df[\"C1\"], [1, 2, 3, 6])\n values.name = \"cat\"\n groups_double_key = df.groupby([values, \"C2\"], observed=observed)\n\n idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=[\"cat\", \"C2\"])\n expected = DataFrame({\"C1\": [3, 3, 4, 5], \"C3\": [10, 100, 200, 34]}, index=idx)\n if not observed:\n expected = cartesian_product_for_groupers(\n expected, [values.values, [1, 2, 3, 4]], [\"cat\", \"C2\"]\n )\n\n result = groups_double_key.agg(\"mean\")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_observed_perf():\n # we create a cartesian product, so this is\n # non-performant if we don't use observed values\n # gh-14942\n df = DataFrame(\n {\n \"cat\": np.random.randint(0, 255, size=30000),\n \"int_id\": np.random.randint(0, 255, size=30000),\n \"other_id\": np.random.randint(0, 10000, size=30000),\n \"foo\": 0,\n }\n )\n df[\"cat\"] = df.cat.astype(str).astype(\"category\")\n\n grouped = df.groupby([\"cat\", \"int_id\", \"other_id\"], observed=True)\n result = grouped.count()\n assert result.index.levels[0].nunique() == df.cat.nunique()\n assert result.index.levels[1].nunique() == df.int_id.nunique()\n assert result.index.levels[2].nunique() == df.other_id.nunique()\n\n\ndef test_observed_groups(observed):\n # gh-20583\n # test that we have the appropriate groups\n\n cat = Categorical([\"a\", \"c\", \"a\"], categories=[\"a\", \"b\", \"c\"])\n df = DataFrame({\"cat\": cat, \"vals\": [1, 2, 3]})\n g = df.groupby(\"cat\", observed=observed)\n\n result = g.groups\n if observed:\n expected = {\"a\": Index([0, 2], dtype=\"int64\"), \"c\": Index([1], dtype=\"int64\")}\n else:\n expected = {\n \"a\": Index([0, 2], dtype=\"int64\"),\n \"b\": Index([], dtype=\"int64\"),\n \"c\": Index([1], dtype=\"int64\"),\n }\n\n tm.assert_dict_equal(result, expected)\n\n\ndef test_observed_groups_with_nan(observed):\n # GH 24740\n df = DataFrame(\n {\n \"cat\": Categorical([\"a\", np.nan, \"a\"], categories=[\"a\", \"b\", \"d\"]),\n \"vals\": [1, 2, 3],\n }\n )\n g = df.groupby(\"cat\", observed=observed)\n result = g.groups\n if observed:\n expected = {\"a\": Index([0, 2], dtype=\"int64\")}\n else:\n expected = {\n \"a\": Index([0, 2], dtype=\"int64\"),\n \"b\": Index([], dtype=\"int64\"),\n \"d\": Index([], dtype=\"int64\"),\n }\n tm.assert_dict_equal(result, expected)\n\n\ndef test_observed_nth():\n # GH 26385\n cat = pd.Categorical([\"a\", np.nan, np.nan], categories=[\"a\", \"b\", \"c\"])\n ser = pd.Series([1, 2, 3])\n df = pd.DataFrame({\"cat\": cat, \"ser\": ser})\n\n result = df.groupby(\"cat\", observed=False)[\"ser\"].nth(0)\n\n index = pd.Categorical([\"a\", \"b\", \"c\"], categories=[\"a\", \"b\", \"c\"])\n expected = pd.Series([1, np.nan, np.nan], index=index, name=\"ser\")\n expected.index.name = \"cat\"\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_dataframe_categorical_with_nan(observed):\n # GH 21151\n s1 = Categorical([np.nan, \"a\", np.nan, \"a\"], categories=[\"a\", \"b\", \"c\"])\n s2 = Series([1, 2, 3, 4])\n df = DataFrame({\"s1\": s1, \"s2\": s2})\n result = df.groupby(\"s1\", observed=observed).first().reset_index()\n if observed:\n expected = DataFrame(\n {\"s1\": Categorical([\"a\"], categories=[\"a\", \"b\", \"c\"]), \"s2\": [2]}\n )\n else:\n expected = DataFrame(\n {\n \"s1\": Categorical([\"a\", \"b\", \"c\"], categories=[\"a\", \"b\", \"c\"]),\n \"s2\": [2, np.nan, np.nan],\n }\n )\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"ordered\", [True, False])\[email protected](\"observed\", [True, False])\[email protected](\"sort\", [True, False])\ndef test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):\n # GH 25871: Fix groupby sorting on ordered Categoricals\n # GH 25167: Groupby with observed=True doesn't sort\n\n # Build a dataframe with cat having one unobserved category ('missing'),\n # and a Series with identical values\n label = Categorical(\n [\"d\", \"a\", \"b\", \"a\", \"d\", \"b\"],\n categories=[\"a\", \"b\", \"missing\", \"d\"],\n ordered=ordered,\n )\n val = Series([\"d\", \"a\", \"b\", \"a\", \"d\", \"b\"])\n df = DataFrame({\"label\": label, \"val\": val})\n\n # aggregate on the Categorical\n result = df.groupby(\"label\", observed=observed, sort=sort)[\"val\"].aggregate(\"first\")\n\n # If ordering works, we expect index labels equal to aggregation results,\n # except for 'observed=False': label 'missing' has aggregation None\n label = Series(result.index.array, dtype=\"object\")\n aggr = Series(result.array)\n if not observed:\n aggr[aggr.isna()] = \"missing\"\n if not all(label == aggr):\n msg = (\n \"Labels and aggregation results not consistently sorted\\n\"\n f\"for (ordered={ordered}, observed={observed}, sort={sort})\\n\"\n f\"Result:\\n{result}\"\n )\n assert False, msg\n\n\ndef test_datetime():\n # GH9049: ensure backward compatibility\n levels = pd.date_range(\"2014-01-01\", periods=4)\n codes = np.random.randint(0, 4, size=100)\n\n cats = Categorical.from_codes(codes, levels, ordered=True)\n\n data = DataFrame(np.random.randn(100, 4))\n result = data.groupby(cats, observed=False).mean()\n\n expected = data.groupby(np.asarray(cats), observed=False).mean()\n expected = expected.reindex(levels)\n expected.index = CategoricalIndex(\n expected.index, categories=expected.index, ordered=True\n )\n\n tm.assert_frame_equal(result, expected)\n\n grouped = data.groupby(cats, observed=False)\n desc_result = grouped.describe()\n\n idx = cats.codes.argsort()\n ord_labels = cats.take(idx)\n ord_data = data.take(idx)\n expected = ord_data.groupby(ord_labels, observed=False).describe()\n tm.assert_frame_equal(desc_result, expected)\n tm.assert_index_equal(desc_result.index, expected.index)\n tm.assert_index_equal(\n desc_result.index.get_level_values(0), expected.index.get_level_values(0)\n )\n\n # GH 10460\n expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)\n exp = CategoricalIndex(expc)\n tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)\n exp = Index([\"count\", \"mean\", \"std\", \"min\", \"25%\", \"50%\", \"75%\", \"max\"] * 4)\n tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)\n\n\ndef test_categorical_index():\n\n s = np.random.RandomState(12345)\n levels = [\"foo\", \"bar\", \"baz\", \"qux\"]\n codes = s.randint(0, 4, size=20)\n cats = Categorical.from_codes(codes, levels, ordered=True)\n df = DataFrame(np.repeat(np.arange(20), 4).reshape(-1, 4), columns=list(\"abcd\"))\n df[\"cats\"] = cats\n\n # with a cat index\n result = df.set_index(\"cats\").groupby(level=0, observed=False).sum()\n expected = df[list(\"abcd\")].groupby(cats.codes, observed=False).sum()\n expected.index = CategoricalIndex(\n Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name=\"cats\"\n )\n tm.assert_frame_equal(result, expected)\n\n # with a cat column, should produce a cat index\n result = df.groupby(\"cats\", observed=False).sum()\n expected = df[list(\"abcd\")].groupby(cats.codes, observed=False).sum()\n expected.index = CategoricalIndex(\n Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name=\"cats\"\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_describe_categorical_columns():\n # GH 11558\n cats = CategoricalIndex(\n [\"qux\", \"foo\", \"baz\", \"bar\"],\n categories=[\"foo\", \"bar\", \"baz\", \"qux\"],\n ordered=True,\n )\n df = DataFrame(np.random.randn(20, 4), columns=cats)\n result = df.groupby([1, 2, 3, 4] * 5).describe()\n\n tm.assert_index_equal(result.stack().columns, cats)\n tm.assert_categorical_equal(result.stack().columns.values, cats.values)\n\n\ndef test_unstack_categorical():\n # GH11558 (example is taken from the original issue)\n df = DataFrame(\n {\"a\": range(10), \"medium\": [\"A\", \"B\"] * 5, \"artist\": list(\"XYXXY\") * 2}\n )\n df[\"medium\"] = df[\"medium\"].astype(\"category\")\n\n gcat = df.groupby([\"artist\", \"medium\"], observed=False)[\"a\"].count().unstack()\n result = gcat.describe()\n\n exp_columns = CategoricalIndex([\"A\", \"B\"], ordered=False, name=\"medium\")\n tm.assert_index_equal(result.columns, exp_columns)\n tm.assert_categorical_equal(result.columns.values, exp_columns.values)\n\n result = gcat[\"A\"] + gcat[\"B\"]\n expected = Series([6, 4], index=Index([\"X\", \"Y\"], name=\"artist\"))\n tm.assert_series_equal(result, expected)\n\n\ndef test_bins_unequal_len():\n # GH3011\n series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])\n bins = pd.cut(series.dropna().values, 4)\n\n # len(bins) != len(series) here\n msg = r\"Length of grouper \\(8\\) and axis \\(10\\) must be same length\"\n with pytest.raises(ValueError, match=msg):\n series.groupby(bins).mean()\n\n\ndef test_as_index():\n # GH13204\n df = DataFrame(\n {\n \"cat\": Categorical([1, 2, 2], [1, 2, 3]),\n \"A\": [10, 11, 11],\n \"B\": [101, 102, 103],\n }\n )\n result = df.groupby([\"cat\", \"A\"], as_index=False, observed=True).sum()\n expected = DataFrame(\n {\n \"cat\": Categorical([1, 2], categories=df.cat.cat.categories),\n \"A\": [10, 11],\n \"B\": [101, 205],\n },\n columns=[\"cat\", \"A\", \"B\"],\n )\n tm.assert_frame_equal(result, expected)\n\n # function grouper\n f = lambda r: df.loc[r, \"A\"]\n result = df.groupby([\"cat\", f], as_index=False, observed=True).sum()\n expected = DataFrame(\n {\n \"cat\": Categorical([1, 2], categories=df.cat.cat.categories),\n \"A\": [10, 22],\n \"B\": [101, 205],\n },\n columns=[\"cat\", \"A\", \"B\"],\n )\n tm.assert_frame_equal(result, expected)\n\n # another not in-axis grouper (conflicting names in index)\n s = Series([\"a\", \"b\", \"b\"], name=\"cat\")\n result = df.groupby([\"cat\", s], as_index=False, observed=True).sum()\n tm.assert_frame_equal(result, expected)\n\n # is original index dropped?\n group_columns = [\"cat\", \"A\"]\n expected = DataFrame(\n {\n \"cat\": Categorical([1, 2], categories=df.cat.cat.categories),\n \"A\": [10, 11],\n \"B\": [101, 205],\n },\n columns=[\"cat\", \"A\", \"B\"],\n )\n\n for name in [None, \"X\", \"B\"]:\n df.index = Index(list(\"abc\"), name=name)\n result = df.groupby(group_columns, as_index=False, observed=True).sum()\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_preserve_categories():\n # GH-13179\n categories = list(\"abc\")\n\n # ordered=True\n df = DataFrame({\"A\": Categorical(list(\"ba\"), categories=categories, ordered=True)})\n index = CategoricalIndex(categories, categories, ordered=True, name=\"A\")\n tm.assert_index_equal(\n df.groupby(\"A\", sort=True, observed=False).first().index, index\n )\n tm.assert_index_equal(\n df.groupby(\"A\", sort=False, observed=False).first().index, index\n )\n\n # ordered=False\n df = DataFrame({\"A\": Categorical(list(\"ba\"), categories=categories, ordered=False)})\n sort_index = CategoricalIndex(categories, categories, ordered=False, name=\"A\")\n nosort_index = CategoricalIndex(list(\"bac\"), list(\"bac\"), ordered=False, name=\"A\")\n tm.assert_index_equal(\n df.groupby(\"A\", sort=True, observed=False).first().index, sort_index\n )\n tm.assert_index_equal(\n df.groupby(\"A\", sort=False, observed=False).first().index, nosort_index\n )\n\n\ndef test_preserve_categorical_dtype():\n # GH13743, GH13854\n df = DataFrame(\n {\n \"A\": [1, 2, 1, 1, 2],\n \"B\": [10, 16, 22, 28, 34],\n \"C1\": Categorical(list(\"abaab\"), categories=list(\"bac\"), ordered=False),\n \"C2\": Categorical(list(\"abaab\"), categories=list(\"bac\"), ordered=True),\n }\n )\n # single grouper\n exp_full = DataFrame(\n {\n \"A\": [2.0, 1.0, np.nan],\n \"B\": [25.0, 20.0, np.nan],\n \"C1\": Categorical(list(\"bac\"), categories=list(\"bac\"), ordered=False),\n \"C2\": Categorical(list(\"bac\"), categories=list(\"bac\"), ordered=True),\n }\n )\n for col in [\"C1\", \"C2\"]:\n result1 = df.groupby(by=col, as_index=False, observed=False).mean()\n result2 = df.groupby(by=col, as_index=True, observed=False).mean().reset_index()\n expected = exp_full.reindex(columns=result1.columns)\n tm.assert_frame_equal(result1, expected)\n tm.assert_frame_equal(result2, expected)\n\n\[email protected](\n \"func, values\",\n [\n (\"first\", [\"second\", \"first\"]),\n (\"last\", [\"fourth\", \"third\"]),\n (\"min\", [\"fourth\", \"first\"]),\n (\"max\", [\"second\", \"third\"]),\n ],\n)\ndef test_preserve_on_ordered_ops(func, values):\n # gh-18502\n # preserve the categoricals on ops\n c = pd.Categorical([\"first\", \"second\", \"third\", \"fourth\"], ordered=True)\n df = pd.DataFrame({\"payload\": [-1, -2, -1, -2], \"col\": c})\n g = df.groupby(\"payload\")\n result = getattr(g, func)()\n expected = pd.DataFrame(\n {\"payload\": [-2, -1], \"col\": pd.Series(values, dtype=c.dtype)}\n ).set_index(\"payload\")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_categorical_no_compress():\n data = Series(np.random.randn(9))\n\n codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])\n cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)\n\n result = data.groupby(cats, observed=False).mean()\n exp = data.groupby(codes, observed=False).mean()\n\n exp.index = CategoricalIndex(\n exp.index, categories=cats.categories, ordered=cats.ordered\n )\n tm.assert_series_equal(result, exp)\n\n codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])\n cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)\n\n result = data.groupby(cats, observed=False).mean()\n exp = data.groupby(codes, observed=False).mean().reindex(cats.categories)\n exp.index = CategoricalIndex(\n exp.index, categories=cats.categories, ordered=cats.ordered\n )\n tm.assert_series_equal(result, exp)\n\n cats = Categorical(\n [\"a\", \"a\", \"a\", \"b\", \"b\", \"b\", \"c\", \"c\", \"c\"],\n categories=[\"a\", \"b\", \"c\", \"d\"],\n ordered=True,\n )\n data = DataFrame({\"a\": [1, 1, 1, 2, 2, 2, 3, 4, 5], \"b\": cats})\n\n result = data.groupby(\"b\", observed=False).mean()\n result = result[\"a\"].values\n exp = np.array([1, 2, 4, np.nan])\n tm.assert_numpy_array_equal(result, exp)\n\n\ndef test_groupby_empty_with_category():\n # GH-9614\n # test fix for when group by on None resulted in\n # coercion of dtype categorical -> float\n df = pd.DataFrame(\n {\"A\": [None] * 3, \"B\": pd.Categorical([\"train\", \"train\", \"test\"])}\n )\n result = df.groupby(\"A\").first()[\"B\"]\n expected = pd.Series(\n pd.Categorical([], categories=[\"test\", \"train\"]),\n index=pd.Series([], dtype=\"object\", name=\"A\"),\n name=\"B\",\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_sort():\n\n # https://stackoverflow.com/questions/23814368/sorting-pandas-\n # categorical-labels-after-groupby\n # This should result in a properly sorted Series so that the plot\n # has a sorted x axis\n # self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')\n\n df = DataFrame({\"value\": np.random.randint(0, 10000, 100)})\n labels = [f\"{i} - {i+499}\" for i in range(0, 10000, 500)]\n cat_labels = Categorical(labels, labels)\n\n df = df.sort_values(by=[\"value\"], ascending=True)\n df[\"value_group\"] = pd.cut(\n df.value, range(0, 10500, 500), right=False, labels=cat_labels\n )\n\n res = df.groupby([\"value_group\"], observed=False)[\"value_group\"].count()\n exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]\n exp.index = CategoricalIndex(exp.index, name=exp.index.name)\n tm.assert_series_equal(res, exp)\n\n\ndef test_sort2():\n # dataframe groupby sort was being ignored # GH 8868\n df = DataFrame(\n [\n [\"(7.5, 10]\", 10, 10],\n [\"(7.5, 10]\", 8, 20],\n [\"(2.5, 5]\", 5, 30],\n [\"(5, 7.5]\", 6, 40],\n [\"(2.5, 5]\", 4, 50],\n [\"(0, 2.5]\", 1, 60],\n [\"(5, 7.5]\", 7, 70],\n ],\n columns=[\"range\", \"foo\", \"bar\"],\n )\n df[\"range\"] = Categorical(df[\"range\"], ordered=True)\n index = CategoricalIndex(\n [\"(0, 2.5]\", \"(2.5, 5]\", \"(5, 7.5]\", \"(7.5, 10]\"], name=\"range\", ordered=True\n )\n expected_sort = DataFrame(\n [[1, 60], [5, 30], [6, 40], [10, 10]], columns=[\"foo\", \"bar\"], index=index\n )\n\n col = \"range\"\n result_sort = df.groupby(col, sort=True, observed=False).first()\n tm.assert_frame_equal(result_sort, expected_sort)\n\n # when categories is ordered, group is ordered by category's order\n expected_sort = result_sort\n result_sort = df.groupby(col, sort=False, observed=False).first()\n tm.assert_frame_equal(result_sort, expected_sort)\n\n df[\"range\"] = Categorical(df[\"range\"], ordered=False)\n index = CategoricalIndex(\n [\"(0, 2.5]\", \"(2.5, 5]\", \"(5, 7.5]\", \"(7.5, 10]\"], name=\"range\"\n )\n expected_sort = DataFrame(\n [[1, 60], [5, 30], [6, 40], [10, 10]], columns=[\"foo\", \"bar\"], index=index\n )\n\n index = CategoricalIndex(\n [\"(7.5, 10]\", \"(2.5, 5]\", \"(5, 7.5]\", \"(0, 2.5]\"],\n categories=[\"(7.5, 10]\", \"(2.5, 5]\", \"(5, 7.5]\", \"(0, 2.5]\"],\n name=\"range\",\n )\n expected_nosort = DataFrame(\n [[10, 10], [5, 30], [6, 40], [1, 60]], index=index, columns=[\"foo\", \"bar\"]\n )\n\n col = \"range\"\n\n # this is an unordered categorical, but we allow this ####\n result_sort = df.groupby(col, sort=True, observed=False).first()\n tm.assert_frame_equal(result_sort, expected_sort)\n\n result_nosort = df.groupby(col, sort=False, observed=False).first()\n tm.assert_frame_equal(result_nosort, expected_nosort)\n\n\ndef test_sort_datetimelike():\n # GH10505\n\n # use same data as test_groupby_sort_categorical, which category is\n # corresponding to datetime.month\n df = DataFrame(\n {\n \"dt\": [\n datetime(2011, 7, 1),\n datetime(2011, 7, 1),\n datetime(2011, 2, 1),\n datetime(2011, 5, 1),\n datetime(2011, 2, 1),\n datetime(2011, 1, 1),\n datetime(2011, 5, 1),\n ],\n \"foo\": [10, 8, 5, 6, 4, 1, 7],\n \"bar\": [10, 20, 30, 40, 50, 60, 70],\n },\n columns=[\"dt\", \"foo\", \"bar\"],\n )\n\n # ordered=True\n df[\"dt\"] = Categorical(df[\"dt\"], ordered=True)\n index = [\n datetime(2011, 1, 1),\n datetime(2011, 2, 1),\n datetime(2011, 5, 1),\n datetime(2011, 7, 1),\n ]\n result_sort = DataFrame(\n [[1, 60], [5, 30], [6, 40], [10, 10]], columns=[\"foo\", \"bar\"]\n )\n result_sort.index = CategoricalIndex(index, name=\"dt\", ordered=True)\n\n index = [\n datetime(2011, 7, 1),\n datetime(2011, 2, 1),\n datetime(2011, 5, 1),\n datetime(2011, 1, 1),\n ]\n result_nosort = DataFrame(\n [[10, 10], [5, 30], [6, 40], [1, 60]], columns=[\"foo\", \"bar\"]\n )\n result_nosort.index = CategoricalIndex(\n index, categories=index, name=\"dt\", ordered=True\n )\n\n col = \"dt\"\n tm.assert_frame_equal(\n result_sort, df.groupby(col, sort=True, observed=False).first()\n )\n\n # when categories is ordered, group is ordered by category's order\n tm.assert_frame_equal(\n result_sort, df.groupby(col, sort=False, observed=False).first()\n )\n\n # ordered = False\n df[\"dt\"] = Categorical(df[\"dt\"], ordered=False)\n index = [\n datetime(2011, 1, 1),\n datetime(2011, 2, 1),\n datetime(2011, 5, 1),\n datetime(2011, 7, 1),\n ]\n result_sort = DataFrame(\n [[1, 60], [5, 30], [6, 40], [10, 10]], columns=[\"foo\", \"bar\"]\n )\n result_sort.index = CategoricalIndex(index, name=\"dt\")\n\n index = [\n datetime(2011, 7, 1),\n datetime(2011, 2, 1),\n datetime(2011, 5, 1),\n datetime(2011, 1, 1),\n ]\n result_nosort = DataFrame(\n [[10, 10], [5, 30], [6, 40], [1, 60]], columns=[\"foo\", \"bar\"]\n )\n result_nosort.index = CategoricalIndex(index, categories=index, name=\"dt\")\n\n col = \"dt\"\n tm.assert_frame_equal(\n result_sort, df.groupby(col, sort=True, observed=False).first()\n )\n tm.assert_frame_equal(\n result_nosort, df.groupby(col, sort=False, observed=False).first()\n )\n\n\ndef test_empty_sum():\n # https://github.com/pandas-dev/pandas/issues/18678\n df = DataFrame(\n {\"A\": Categorical([\"a\", \"a\", \"b\"], categories=[\"a\", \"b\", \"c\"]), \"B\": [1, 2, 1]}\n )\n expected_idx = CategoricalIndex([\"a\", \"b\", \"c\"], name=\"A\")\n\n # 0 by default\n result = df.groupby(\"A\", observed=False).B.sum()\n expected = Series([3, 1, 0], expected_idx, name=\"B\")\n tm.assert_series_equal(result, expected)\n\n # min_count=0\n result = df.groupby(\"A\", observed=False).B.sum(min_count=0)\n expected = Series([3, 1, 0], expected_idx, name=\"B\")\n tm.assert_series_equal(result, expected)\n\n # min_count=1\n result = df.groupby(\"A\", observed=False).B.sum(min_count=1)\n expected = Series([3, 1, np.nan], expected_idx, name=\"B\")\n tm.assert_series_equal(result, expected)\n\n # min_count>1\n result = df.groupby(\"A\", observed=False).B.sum(min_count=2)\n expected = Series([3, np.nan, np.nan], expected_idx, name=\"B\")\n tm.assert_series_equal(result, expected)\n\n\ndef test_empty_prod():\n # https://github.com/pandas-dev/pandas/issues/18678\n df = DataFrame(\n {\"A\": Categorical([\"a\", \"a\", \"b\"], categories=[\"a\", \"b\", \"c\"]), \"B\": [1, 2, 1]}\n )\n\n expected_idx = CategoricalIndex([\"a\", \"b\", \"c\"], name=\"A\")\n\n # 1 by default\n result = df.groupby(\"A\", observed=False).B.prod()\n expected = Series([2, 1, 1], expected_idx, name=\"B\")\n tm.assert_series_equal(result, expected)\n\n # min_count=0\n result = df.groupby(\"A\", observed=False).B.prod(min_count=0)\n expected = Series([2, 1, 1], expected_idx, name=\"B\")\n tm.assert_series_equal(result, expected)\n\n # min_count=1\n result = df.groupby(\"A\", observed=False).B.prod(min_count=1)\n expected = Series([2, 1, np.nan], expected_idx, name=\"B\")\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_multiindex_categorical_datetime():\n # https://github.com/pandas-dev/pandas/issues/21390\n\n df = DataFrame(\n {\n \"key1\": Categorical(list(\"abcbabcba\")),\n \"key2\": Categorical(\n list(pd.date_range(\"2018-06-01 00\", freq=\"1T\", periods=3)) * 3\n ),\n \"values\": np.arange(9),\n }\n )\n result = df.groupby([\"key1\", \"key2\"]).mean()\n\n idx = MultiIndex.from_product(\n [\n Categorical([\"a\", \"b\", \"c\"]),\n Categorical(pd.date_range(\"2018-06-01 00\", freq=\"1T\", periods=3)),\n ],\n names=[\"key1\", \"key2\"],\n )\n expected = DataFrame({\"values\": [0, 4, 8, 3, 4, 5, 6, np.nan, 2]}, index=idx)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"as_index, expected\",\n [\n (\n True,\n Series(\n index=MultiIndex.from_arrays(\n [Series([1, 1, 2], dtype=\"category\"), [1, 2, 2]], names=[\"a\", \"b\"]\n ),\n data=[1, 2, 3],\n name=\"x\",\n ),\n ),\n (\n False,\n DataFrame(\n {\n \"a\": Series([1, 1, 2], dtype=\"category\"),\n \"b\": [1, 2, 2],\n \"x\": [1, 2, 3],\n }\n ),\n ),\n ],\n)\ndef test_groupby_agg_observed_true_single_column(as_index, expected):\n # GH-23970\n df = DataFrame(\n {\"a\": Series([1, 1, 2], dtype=\"category\"), \"b\": [1, 2, 2], \"x\": [1, 2, 3]}\n )\n\n result = df.groupby([\"a\", \"b\"], as_index=as_index, observed=True)[\"x\"].sum()\n\n tm.assert_equal(result, expected)\n\n\[email protected](\"fill_value\", [None, np.nan, pd.NaT])\ndef test_shift(fill_value):\n ct = Categorical(\n [\"a\", \"b\", \"c\", \"d\"], categories=[\"a\", \"b\", \"c\", \"d\"], ordered=False\n )\n expected = Categorical(\n [None, \"a\", \"b\", \"c\"], categories=[\"a\", \"b\", \"c\", \"d\"], ordered=False\n )\n res = ct.shift(1, fill_value=fill_value)\n tm.assert_equal(res, expected)\n\n\[email protected]\ndef df_cat(df):\n \"\"\"\n DataFrame with multiple categorical columns and a column of integers.\n Shortened so as not to contain all possible combinations of categories.\n Useful for testing `observed` kwarg functionality on GroupBy objects.\n\n Parameters\n ----------\n df: DataFrame\n Non-categorical, longer DataFrame from another fixture, used to derive\n this one\n\n Returns\n -------\n df_cat: DataFrame\n \"\"\"\n df_cat = df.copy()[:4] # leave out some groups\n df_cat[\"A\"] = df_cat[\"A\"].astype(\"category\")\n df_cat[\"B\"] = df_cat[\"B\"].astype(\"category\")\n df_cat[\"C\"] = Series([1, 2, 3, 4])\n df_cat = df_cat.drop([\"D\"], axis=1)\n return df_cat\n\n\[email protected](\n \"operation, kwargs\", [(\"agg\", dict(dtype=\"category\")), (\"apply\", dict())]\n)\ndef test_seriesgroupby_observed_true(df_cat, operation, kwargs):\n # GH 24880\n index = MultiIndex.from_frame(\n DataFrame(\n {\"A\": [\"foo\", \"foo\", \"bar\", \"bar\"], \"B\": [\"one\", \"two\", \"one\", \"three\"]},\n **kwargs,\n )\n )\n expected = Series(data=[1, 3, 2, 4], index=index, name=\"C\")\n grouped = df_cat.groupby([\"A\", \"B\"], observed=True)[\"C\"]\n result = getattr(grouped, operation)(sum)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"operation\", [\"agg\", \"apply\"])\[email protected](\"observed\", [False, None])\ndef test_seriesgroupby_observed_false_or_none(df_cat, observed, operation):\n # GH 24880\n index, _ = MultiIndex.from_product(\n [\n CategoricalIndex([\"bar\", \"foo\"], ordered=False),\n CategoricalIndex([\"one\", \"three\", \"two\"], ordered=False),\n ],\n names=[\"A\", \"B\"],\n ).sortlevel()\n\n expected = Series(data=[2, 4, np.nan, 1, np.nan, 3], index=index, name=\"C\")\n grouped = df_cat.groupby([\"A\", \"B\"], observed=observed)[\"C\"]\n result = getattr(grouped, operation)(sum)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n \"observed, index, data\",\n [\n (\n True,\n MultiIndex.from_tuples(\n [\n (\"foo\", \"one\", \"min\"),\n (\"foo\", \"one\", \"max\"),\n (\"foo\", \"two\", \"min\"),\n (\"foo\", \"two\", \"max\"),\n (\"bar\", \"one\", \"min\"),\n (\"bar\", \"one\", \"max\"),\n (\"bar\", \"three\", \"min\"),\n (\"bar\", \"three\", \"max\"),\n ],\n names=[\"A\", \"B\", None],\n ),\n [1, 1, 3, 3, 2, 2, 4, 4],\n ),\n (\n False,\n MultiIndex.from_product(\n [\n CategoricalIndex([\"bar\", \"foo\"], ordered=False),\n CategoricalIndex([\"one\", \"three\", \"two\"], ordered=False),\n Index([\"min\", \"max\"]),\n ],\n names=[\"A\", \"B\", None],\n ),\n [2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],\n ),\n (\n None,\n MultiIndex.from_product(\n [\n CategoricalIndex([\"bar\", \"foo\"], ordered=False),\n CategoricalIndex([\"one\", \"three\", \"two\"], ordered=False),\n Index([\"min\", \"max\"]),\n ],\n names=[\"A\", \"B\", None],\n ),\n [2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],\n ),\n ],\n)\ndef test_seriesgroupby_observed_apply_dict(df_cat, observed, index, data):\n # GH 24880\n expected = Series(data=data, index=index, name=\"C\")\n result = df_cat.groupby([\"A\", \"B\"], observed=observed)[\"C\"].apply(\n lambda x: {\"min\": x.min(), \"max\": x.max()}\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_categorical_series_dataframe_consistent(df_cat):\n # GH 20416\n expected = df_cat.groupby([\"A\", \"B\"])[\"C\"].mean()\n result = df_cat.groupby([\"A\", \"B\"]).mean()[\"C\"]\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"code\", [([1, 0, 0]), ([0, 0, 0])])\ndef test_groupby_categorical_axis_1(code):\n # GH 13420\n df = DataFrame({\"a\": [1, 2, 3, 4], \"b\": [-1, -2, -3, -4], \"c\": [5, 6, 7, 8]})\n cat = pd.Categorical.from_codes(code, categories=list(\"abc\"))\n result = df.groupby(cat, axis=1).mean()\n expected = df.T.groupby(cat, axis=0).mean().T\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_cat_preserves_structure(observed, ordered):\n # GH 28787\n df = DataFrame(\n {\"Name\": Categorical([\"Bob\", \"Greg\"], ordered=ordered), \"Item\": [1, 2]},\n columns=[\"Name\", \"Item\"],\n )\n expected = df.copy()\n\n result = (\n df.groupby(\"Name\", observed=observed)\n .agg(pd.DataFrame.sum, skipna=True)\n .reset_index()\n )\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_get_nonexistent_category():\n # Accessing a Category that is not in the dataframe\n df = pd.DataFrame({\"var\": [\"a\", \"a\", \"b\", \"b\"], \"val\": range(4)})\n with pytest.raises(KeyError, match=\"'vau'\"):\n df.groupby(\"var\").apply(\n lambda rows: pd.DataFrame(\n {\"var\": [rows.iloc[-1][\"var\"]], \"val\": [rows.iloc[-1][\"vau\"]]}\n )\n )\n\n\ndef test_series_groupby_on_2_categoricals_unobserved(\n reduction_func: str, observed: bool, request\n):\n # GH 17605\n\n if reduction_func == \"ngroup\":\n pytest.skip(\"ngroup is not truly a reduction\")\n\n if reduction_func == \"corrwith\": # GH 32293\n mark = pytest.mark.xfail(reason=\"TODO: implemented SeriesGroupBy.corrwith\")\n request.node.add_marker(mark)\n\n df = pd.DataFrame(\n {\n \"cat_1\": pd.Categorical(list(\"AABB\"), categories=list(\"ABCD\")),\n \"cat_2\": pd.Categorical(list(\"AB\") * 2, categories=list(\"ABCD\")),\n \"value\": [0.1] * 4,\n }\n )\n args = {\"nth\": [0]}.get(reduction_func, [])\n\n expected_length = 4 if observed else 16\n\n series_groupby = df.groupby([\"cat_1\", \"cat_2\"], observed=observed)[\"value\"]\n agg = getattr(series_groupby, reduction_func)\n result = agg(*args)\n\n assert len(result) == expected_length\n\n\[email protected](\n \"func, zero_or_nan\",\n [\n (\"all\", np.NaN),\n (\"any\", np.NaN),\n (\"count\", 0),\n (\"first\", np.NaN),\n (\"idxmax\", np.NaN),\n (\"idxmin\", np.NaN),\n (\"last\", np.NaN),\n (\"mad\", np.NaN),\n (\"max\", np.NaN),\n (\"mean\", np.NaN),\n (\"median\", np.NaN),\n (\"min\", np.NaN),\n (\"nth\", np.NaN),\n (\"nunique\", 0),\n (\"prod\", np.NaN),\n (\"quantile\", np.NaN),\n (\"sem\", np.NaN),\n (\"size\", 0),\n (\"skew\", np.NaN),\n (\"std\", np.NaN),\n (\"sum\", np.NaN),\n (\"var\", np.NaN),\n ],\n)\ndef test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans(func, zero_or_nan):\n # GH 17605\n # Tests whether the unobserved categories in the result contain 0 or NaN\n df = pd.DataFrame(\n {\n \"cat_1\": pd.Categorical(list(\"AABB\"), categories=list(\"ABC\")),\n \"cat_2\": pd.Categorical(list(\"AB\") * 2, categories=list(\"ABC\")),\n \"value\": [0.1] * 4,\n }\n )\n unobserved = [tuple(\"AC\"), tuple(\"BC\"), tuple(\"CA\"), tuple(\"CB\"), tuple(\"CC\")]\n args = {\"nth\": [0]}.get(func, [])\n\n series_groupby = df.groupby([\"cat_1\", \"cat_2\"], observed=False)[\"value\"]\n agg = getattr(series_groupby, func)\n result = agg(*args)\n\n for idx in unobserved:\n val = result.loc[idx]\n assert (pd.isna(zero_or_nan) and pd.isna(val)) or (val == zero_or_nan)\n\n # If we expect unobserved values to be zero, we also expect the dtype to be int\n if zero_or_nan == 0:\n assert np.issubdtype(result.dtype, np.integer)\n\n\ndef test_series_groupby_categorical_aggregation_getitem():\n # GH 8870\n d = {\"foo\": [10, 8, 4, 1], \"bar\": [10, 20, 30, 40], \"baz\": [\"d\", \"c\", \"d\", \"c\"]}\n df = pd.DataFrame(d)\n cat = pd.cut(df[\"foo\"], np.linspace(0, 20, 5))\n df[\"range\"] = cat\n groups = df.groupby([\"range\", \"baz\"], as_index=True, sort=True)\n result = groups[\"foo\"].agg(\"mean\")\n expected = groups.agg(\"mean\")[\"foo\"]\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n \"func, expected_values\",\n [(pd.Series.nunique, [1, 1, 2]), (pd.Series.count, [1, 2, 2])],\n)\ndef test_groupby_agg_categorical_columns(func, expected_values):\n # 31256\n df = pd.DataFrame(\n {\n \"id\": [0, 1, 2, 3, 4],\n \"groups\": [0, 1, 1, 2, 2],\n \"value\": pd.Categorical([0, 0, 0, 0, 1]),\n }\n ).set_index(\"id\")\n result = df.groupby(\"groups\").agg(func)\n\n expected = pd.DataFrame(\n {\"value\": expected_values}, index=pd.Index([0, 1, 2], name=\"groups\"),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_agg_non_numeric():\n df = pd.DataFrame(\n {\"A\": pd.Categorical([\"a\", \"a\", \"b\"], categories=[\"a\", \"b\", \"c\"])}\n )\n expected = pd.DataFrame({\"A\": [2, 1]}, index=[1, 2])\n\n result = df.groupby([1, 2, 1]).agg(pd.Series.nunique)\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby([1, 2, 1]).nunique()\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"func\", [\"first\", \"last\"])\ndef test_groupy_first_returned_categorical_instead_of_dataframe(func):\n # GH 28641: groupby drops index, when grouping over categorical column with\n # first/last. Renamed Categorical instead of DataFrame previously.\n df = pd.DataFrame(\n {\"A\": [1997], \"B\": pd.Series([\"b\"], dtype=\"category\").cat.as_ordered()}\n )\n df_grouped = df.groupby(\"A\")[\"B\"]\n result = getattr(df_grouped, func)()\n expected = pd.Series([\"b\"], index=pd.Index([1997], name=\"A\"), name=\"B\")\n tm.assert_series_equal(result, expected)\n\n\ndef test_read_only_category_no_sort():\n # GH33410\n cats = np.array([1, 2])\n cats.flags.writeable = False\n df = DataFrame(\n {\"a\": [1, 3, 5, 7], \"b\": Categorical([1, 1, 2, 2], categories=Index(cats))}\n )\n expected = DataFrame(data={\"a\": [2, 6]}, index=CategoricalIndex([1, 2], name=\"b\"))\n result = df.groupby(\"b\", sort=False).mean()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_sorted_missing_category_values():\n # GH 28597\n df = pd.DataFrame(\n {\n \"foo\": [\n \"small\",\n \"large\",\n \"large\",\n \"large\",\n \"medium\",\n \"large\",\n \"large\",\n \"medium\",\n ],\n \"bar\": [\"C\", \"A\", \"A\", \"C\", \"A\", \"C\", \"A\", \"C\"],\n }\n )\n df[\"foo\"] = (\n df[\"foo\"]\n .astype(\"category\")\n .cat.set_categories([\"tiny\", \"small\", \"medium\", \"large\"], ordered=True)\n )\n\n expected = pd.DataFrame(\n {\n \"tiny\": {\"A\": 0, \"C\": 0},\n \"small\": {\"A\": 0, \"C\": 1},\n \"medium\": {\"A\": 1, \"C\": 1},\n \"large\": {\"A\": 3, \"C\": 2},\n }\n )\n expected = expected.rename_axis(\"bar\", axis=\"index\")\n expected.columns = pd.CategoricalIndex(\n [\"tiny\", \"small\", \"medium\", \"large\"],\n categories=[\"tiny\", \"small\", \"medium\", \"large\"],\n ordered=True,\n name=\"foo\",\n dtype=\"category\",\n )\n\n result = df.groupby([\"bar\", \"foo\"]).size().unstack()\n\n tm.assert_frame_equal(result, expected)\n"
] | [
[
"numpy.linspace",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas.core.indexes.base.Index.__le__",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas._libs.interval.IntervalTree",
"numpy.concatenate",
"pandas.core.indexes.base.Index",
"pandas._config.get_option",
"numpy.where",
"numpy.nextafter",
"pandas.core.indexes.extension.inherit_names",
"pandas._libs.tslibs.to_offset",
"pandas.core.dtypes.common.is_interval_dtype",
"pandas.core.common.all_not_none",
"pandas.core.common.not_none",
"numpy.arange",
"numpy.lexsort",
"pandas.core.common.any_none",
"pandas.core.indexes.base.maybe_extract_name",
"pandas.core.dtypes.common.is_number",
"pandas.core.ops.get_op_result_name",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.dtypes.common.is_float",
"pandas.core.arrays.interval.IntervalArray",
"pandas.util._exceptions.rewrite_exception",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.core.indexes.base.default_pprint",
"pandas.core.indexers.is_valid_positional_slice",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.core.dtypes.common.is_list_like",
"pandas.util._decorators.Appender",
"pandas.errors.InvalidIndexError",
"pandas.core.common.maybe_box_datetimelike",
"pandas.core.indexes.base.ensure_index",
"pandas.core.dtypes.cast.infer_dtype_from_scalar",
"pandas.core.indexes.base.Index.__lt__",
"pandas.core.indexes.base.Index.__gt__",
"numpy.timedelta64",
"pandas.core.dtypes.common.ensure_platform_int",
"pandas.core.arrays.interval.IntervalArray.from_tuples",
"numpy.array",
"pandas.core.arrays.interval.IntervalArray.from_arrays",
"pandas.core.indexes.base.Index.astype",
"pandas.core.common.count_not_none",
"pandas.core.dtypes.cast.maybe_downcast_to_dtype",
"numpy.intp",
"pandas.core.algorithms.take_1d",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.dtypes.common.is_integer",
"pandas.core.dtypes.cast.find_common_type",
"pandas.core.indexes.multi.MultiIndex.from_arrays",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.indexes.base.Index.__ge__",
"pandas.core.dtypes.common.is_datetime_or_timedelta_dtype",
"pandas.core.dtypes.missing.isna",
"pandas._libs.interval.IntervalMixin.__new__",
"pandas.core.arrays.interval.IntervalArray.from_breaks"
],
[
"numpy.hstack",
"pandas.Series",
"numpy.sqrt",
"pandas.Timestamp",
"pandas.RangeIndex",
"numpy.arange",
"pandas.Index",
"pandas.DataFrame",
"pandas.MultiIndex.from_arrays",
"numpy.random.randn",
"pandas.MultiIndex.from_product",
"numpy.random.rand",
"pandas.date_range",
"pandas._testing.assert_series_equal",
"pandas._testing.assert_frame_equal"
],
[
"pandas.to_datetime",
"pandas.Series",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"numpy.ones",
"pandas._testing.assert_frame_equal",
"pandas.Timedelta",
"numpy.array",
"numpy.zeros"
],
[
"pandas.Series",
"numpy.linspace",
"numpy.asarray",
"numpy.issubdtype",
"pandas._testing.assert_dict_equal",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"numpy.max",
"numpy.random.randn",
"numpy.mean",
"pandas.isna",
"pandas._testing.assert_frame_equal",
"numpy.random.randint",
"pandas._testing.assert_numpy_array_equal",
"numpy.arange",
"pandas.Index",
"pandas.cut",
"pandas._testing.assert_series_equal",
"pandas._testing.assert_index_equal",
"pandas.Categorical.from_codes",
"pandas.compat.is_platform_windows",
"pandas.Categorical",
"pandas.date_range",
"numpy.array",
"numpy.random.RandomState",
"numpy.sum",
"pandas.CategoricalIndex",
"pandas._testing.assert_equal",
"pandas.MultiIndex.from_arrays",
"pandas._testing.assert_categorical_equal",
"pandas.qcut"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
marc-rigter/varibad | [
"2bc0cb91dac7cd6718b1f3afbb17d3272d848b16"
] | [
"learner.py"
] | [
"\"\"\"\nBase Learner, without Meta-Learning.\nCan be used to train for good average performance, or for the oracle environment.\n\"\"\"\n\nimport os\nimport time\nimport wandb\nimport gym\nimport numpy as np\nimport torch\n\nfrom algorithms.a2c import A2C\nfrom algorithms.online_storage import OnlineStorage\nfrom algorithms.ppo import PPO\nfrom environments.parallel_envs import make_vec_envs\nfrom models.policy import Policy\nfrom utils import evaluation as utl_eval\nfrom utils import helpers as utl\nfrom utils.tb_logger import TBLogger\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass Learner:\n \"\"\"\n Learner (no meta-learning), can be used to train avg/oracle/belief-oracle policies.\n \"\"\"\n def __init__(self, args):\n\n self.args = args\n utl.seed(self.args.seed, self.args.deterministic_execution)\n\n # calculate number of updates and keep count of frames/iterations\n self.num_updates = int(args.num_frames) // args.policy_num_steps // args.num_processes\n self.frames = 0\n self.iter_idx = -1\n\n # initialise tensorboard logger\n self.logger = TBLogger(self.args, self.args.exp_label)\n\n # initialise environments\n self.envs = make_vec_envs(env_name=args.env_name, seed=args.seed, num_processes=args.num_processes,\n gamma=args.policy_gamma, device=device,\n episodes_per_task=self.args.max_rollouts_per_task,\n normalise_rew=args.norm_rew_for_policy, ret_rms=None,\n tasks=None\n )\n\n if self.args.single_task_mode:\n # get the current tasks (which will be num_process many different tasks)\n self.train_tasks = self.envs.get_task()\n # set the tasks to the first task (i.e. just a random task)\n self.train_tasks[1:] = self.train_tasks[0]\n # make it a list\n self.train_tasks = [t for t in self.train_tasks]\n # re-initialise environments with those tasks\n self.envs = make_vec_envs(env_name=args.env_name, seed=args.seed, num_processes=args.num_processes,\n gamma=args.policy_gamma, device=device,\n episodes_per_task=self.args.max_rollouts_per_task,\n normalise_rew=args.norm_rew_for_policy, ret_rms=None,\n tasks=self.train_tasks,\n )\n # save the training tasks so we can evaluate on the same envs later\n utl.save_obj(self.train_tasks, self.logger.full_output_folder, \"train_tasks\")\n else:\n self.train_tasks = None\n\n # calculate what the maximum length of the trajectories is\n args.max_trajectory_len = self.envs._max_episode_steps\n args.max_trajectory_len *= self.args.max_rollouts_per_task\n\n # get policy input dimensions\n self.args.state_dim = self.envs.observation_space.shape[0]\n self.args.task_dim = self.envs.task_dim\n self.args.belief_dim = self.envs.belief_dim\n self.args.num_states = self.envs.num_states\n # get policy output (action) dimensions\n self.args.action_space = self.envs.action_space\n if isinstance(self.envs.action_space, gym.spaces.discrete.Discrete):\n self.args.action_dim = 1\n else:\n self.args.action_dim = self.envs.action_space.shape[0]\n\n # initialise policy\n self.policy_storage = self.initialise_policy_storage()\n self.policy = self.initialise_policy()\n\n def initialise_policy_storage(self):\n return OnlineStorage(args=self.args,\n num_steps=self.args.policy_num_steps,\n num_processes=self.args.num_processes,\n state_dim=self.args.state_dim,\n latent_dim=0, # use metalearner.py if you want to use the VAE\n belief_dim=self.args.belief_dim,\n task_dim=self.args.task_dim,\n action_space=self.args.action_space,\n hidden_size=0,\n normalise_rewards=self.args.norm_rew_for_policy,\n )\n\n def initialise_policy(self):\n\n # initialise policy network\n policy_net = Policy(\n args=self.args,\n #\n pass_state_to_policy=self.args.pass_state_to_policy,\n pass_latent_to_policy=False, # use metalearner.py if you want to use the VAE\n pass_belief_to_policy=self.args.pass_belief_to_policy,\n pass_task_to_policy=self.args.pass_task_to_policy,\n dim_state=self.args.state_dim,\n dim_latent=0,\n dim_belief=self.args.belief_dim,\n dim_task=self.args.task_dim,\n #\n hidden_layers=self.args.policy_layers,\n activation_function=self.args.policy_activation_function,\n policy_initialisation=self.args.policy_initialisation,\n #\n action_space=self.envs.action_space,\n init_std=self.args.policy_init_std,\n ).to(device)\n\n # initialise policy trainer\n if self.args.policy == 'a2c':\n policy = A2C(\n self.args,\n policy_net,\n self.args.policy_value_loss_coef,\n self.args.policy_entropy_coef,\n policy_optimiser=self.args.policy_optimiser,\n policy_anneal_lr=self.args.policy_anneal_lr,\n train_steps=self.num_updates,\n lr=self.args.lr_policy,\n eps=self.args.policy_eps,\n )\n elif self.args.policy == 'ppo':\n policy = PPO(\n self.args,\n policy_net,\n self.args.policy_value_loss_coef,\n self.args.policy_entropy_coef,\n policy_optimiser=self.args.policy_optimiser,\n policy_anneal_lr=self.args.policy_anneal_lr,\n train_steps=self.num_updates,\n lr=self.args.lr_policy,\n eps=self.args.policy_eps,\n ppo_epoch=self.args.ppo_num_epochs,\n num_mini_batch=self.args.ppo_num_minibatch,\n use_huber_loss=self.args.ppo_use_huberloss,\n use_clipped_value_loss=self.args.ppo_use_clipped_value_loss,\n clip_param=self.args.ppo_clip_param,\n )\n else:\n raise NotImplementedError\n\n return policy\n\n def train(self):\n \"\"\" Main training loop \"\"\"\n start_time = time.time()\n\n # reset environments\n state, belief, task = utl.reset_env(self.envs, self.args)\n\n # insert initial observation / embeddings to rollout storage\n self.policy_storage.prev_state[0].copy_(state)\n\n # log once before training\n with torch.no_grad():\n self.log(None, None, start_time)\n\n for self.iter_idx in range(self.num_updates):\n\n # rollout policies for a few steps\n for step in range(self.args.policy_num_steps):\n\n # sample actions from policy\n with torch.no_grad():\n value, action = utl.select_action(\n args=self.args,\n policy=self.policy,\n state=state,\n belief=belief,\n task=task,\n deterministic=False)\n\n # observe reward and next obs\n [state, belief, task], (rew_raw, rew_normalised), done, infos = utl.env_step(self.envs, action, self.args)\n\n # create mask for episode ends\n masks_done = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done]).to(device)\n # bad_mask is true if episode ended because time limit was reached\n bad_masks = torch.FloatTensor([[0.0] if 'bad_transition' in info.keys() else [1.0] for info in infos]).to(device)\n\n # reset environments that are done\n done_indices = np.argwhere(done.flatten()).flatten()\n if len(done_indices) > 0:\n state, belief, task = utl.reset_env(self.envs, self.args,\n indices=done_indices, state=state)\n\n # add experience to policy buffer\n self.policy_storage.insert(\n state=state,\n belief=belief,\n task=task,\n actions=action,\n rewards_raw=rew_raw,\n rewards_normalised=rew_normalised,\n value_preds=value,\n masks=masks_done,\n bad_masks=bad_masks,\n done=torch.from_numpy(np.array(done, dtype=float)).unsqueeze(1),\n )\n\n self.frames += self.args.num_processes\n\n # --- UPDATE ---\n\n train_stats = self.update(state=state, belief=belief, task=task)\n\n # log\n run_stats = [action, self.policy_storage.action_log_probs, value]\n if train_stats is not None:\n with torch.no_grad():\n self.log(run_stats, train_stats, start_time)\n\n # clean up after update\n self.policy_storage.after_update()\n\n def get_value(self, state, belief, task):\n return self.policy.actor_critic.get_value(state=state, belief=belief, task=task, latent=None).detach()\n\n def update(self, state, belief, task):\n \"\"\"\n Meta-update.\n Here the policy is updated for good average performance across tasks.\n :return: policy_train_stats which are: value_loss_epoch, action_loss_epoch, dist_entropy_epoch, loss_epoch\n \"\"\"\n # bootstrap next value prediction\n with torch.no_grad():\n next_value = self.get_value(state=state, belief=belief, task=task)\n\n # compute returns for current rollouts\n self.policy_storage.compute_returns(next_value, self.args.policy_use_gae, self.args.policy_gamma,\n self.args.policy_tau,\n use_proper_time_limits=self.args.use_proper_time_limits)\n\n policy_train_stats = self.policy.update(policy_storage=self.policy_storage)\n\n return policy_train_stats, None\n\n def log(self, run_stats, train_stats, start):\n \"\"\"\n Evaluate policy, save model, write to tensorboard logger.\n \"\"\"\n\n # --- visualise behaviour of policy ---\n\n if (self.iter_idx + 1) % self.args.vis_interval == 0:\n ret_rms = self.envs.venv.ret_rms if self.args.norm_rew_for_policy else None\n utl_eval.visualise_behaviour(args=self.args,\n policy=self.policy,\n image_folder=self.logger.full_output_folder,\n iter_idx=self.iter_idx,\n ret_rms=ret_rms,\n tasks=self.train_tasks,\n )\n\n # --- evaluate policy ----\n\n if (self.iter_idx + 1) % self.args.eval_interval == 0:\n\n ret_rms = self.envs.venv.ret_rms if self.args.norm_rew_for_policy else None\n\n returns_per_episode = utl_eval.evaluate(args=self.args,\n policy=self.policy,\n ret_rms=ret_rms,\n iter_idx=self.iter_idx,\n tasks=self.train_tasks,\n )\n\n # log the average return across tasks (=processes)\n returns_avg = returns_per_episode.mean(dim=0)\n returns_std = returns_per_episode.std(dim=0)\n for k in range(len(returns_avg)):\n wandb.log({'return_avg_per_iter/episode_{}'.format(k + 1): returns_avg[k]}, step=self.iter_idx)\n wandb.log({'return_avg_per_frame/episode_{}'.format(k + 1): returns_avg[k]}, step=self.iter_idx)\n wandb.log({'return_std_per_iter/episode_{}'.format(k + 1): returns_std[k]}, step=self.iter_idx)\n wandb.log({'return_std_per_frame/episode_{}'.format(k + 1): returns_std[k]}, step=self.iter_idx)\n wandb.log({'environment/num_frames': self.frames}, step=self.iter_idx)\n print(\"Updates {}, num timesteps {}, FPS {} \\n Mean return (train): {:.5f} \\n\".\n format(self.iter_idx, self.frames, int(self.frames / (time.time() - start)),\n returns_avg[-1].item()))\n\n # save model\n if (self.iter_idx + 1) % self.args.save_interval == 0:\n save_path = os.path.join(self.logger.full_output_folder, 'models')\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n\n idx_labels = ['']\n if self.args.save_intermediate_models:\n idx_labels.append(int(self.iter_idx))\n\n for idx_label in idx_labels:\n\n torch.save(self.policy.actor_critic, os.path.join(save_path, f\"policy{idx_label}.pt\"))\n\n # save normalisation params of envs\n if self.args.norm_rew_for_policy:\n rew_rms = self.envs.venv.ret_rms\n utl.save_obj(rew_rms, save_path, f\"env_rew_rms{idx_label}\")\n # TODO: grab from policy and save?\n # if self.args.norm_obs_for_policy:\n # obs_rms = self.envs.venv.obs_rms\n # utl.save_obj(obs_rms, save_path, f\"env_obs_rms{idx_label}\")\n\n # --- log some other things ---\n\n if ((self.iter_idx + 1) % self.args.log_interval == 0) and (train_stats is not None):\n\n train_stats, _ = train_stats\n\n wandb.log({'policy_losses/value_loss': train_stats[0]}, step=self.iter_idx)\n wandb.log({'policy_losses/action_loss': train_stats[1]}, step=self.iter_idx)\n wandb.log({'policy_losses/dist_entropy': train_stats[2]}, step=self.iter_idx)\n wandb.log({'policy_losses/sum': train_stats[3]}, step=self.iter_idx)\n\n # writer.add_scalar('policy/action', action.mean(), j)\n wandb.log({'policy/action': run_stats[0][0].float().mean()}, step=self.iter_idx)\n if hasattr(self.policy.actor_critic, 'logstd'):\n wandb.log({'policy/action_logstd': self.policy.actor_critic.dist.logstd.mean()}, step=self.iter_idx)\n wandb.log({'policy/action_logprob': run_stats[1].mean()}, step=self.iter_idx)\n wandb.log({'policy/value': run_stats[2].mean()}, step=self.iter_idx)\n\n param_list = list(self.policy.actor_critic.parameters())\n param_mean = np.mean([param_list[i].data.cpu().numpy().mean() for i in range(len(param_list))])\n wandb.log({'weights/policy': param_mean}, step=self.iter_idx)\n wandb.log({'weights/policy_std': param_list[0].data.cpu().mean()}, step=self.iter_idx)\n if param_list[0].grad is not None:\n param_grad_mean = np.mean([param_list[i].grad.cpu().numpy().mean() for i in range(len(param_list))])\n wandb.log({'gradients/policy': param_grad_mean}, step=self.iter_idx)\n wandb.log({'gradients/policy_std': param_list[0].grad.cpu().numpy().mean()}, step=self.iter_idx)\n"
] | [
[
"torch.FloatTensor",
"numpy.array",
"torch.no_grad",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jinkos/nuke_kaggle | [
"79585646e77bd45ea2ee80e2f683d0875a3e20ba"
] | [
"image_wrapper.py"
] | [
"from __future__ import print_function\n\nfrom skimage.io import imread, imshow\nfrom skimage.transform import rotate\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport multiprocessing\nfrom tqdm import tqdm\nimport numpy as np\nimport sys\nimport os\n\nimport arg_config\nimport nuclear_gen\n\nuse_multiprocessing = True\nn_processes = 4\n\n### DEBUG FLAGS FOR augment(id_)\n# set to true for lots of plots to see what's going on\ndo_show = False\n# print lots of debugging data\ndo_data = False\n# set this to False if you are experimenting and don't want to\n# overwrite your previous images \ndo_save = True\n \n\n# copies a numpy image (src)\n# to an x,y position of another (dst)\ndef np_blit(dst,x,y,src):\n \n (dh,dw,_) = dst.shape\n (sh,sw,_) = src.shape\n\n dx = x\n sx = 0\n w = sw\n \n # clip left\n if dx < 0:\n w += dx\n sx -= dx\n dx = 0\n \n # clip right\n if x + w > dx + dw:\n w = dx + dw - x\n \n \n dy = y\n sy = 0\n h = sh\n\n # clip right\n if dy < 0:\n h += dy\n sy -= dy\n dy = 0\n \n # clip top\n if y + h > dy + dh:\n h = dy + dh - y\n\n if w <= 0 or h <= 0: return -1\n \n dst[dy:dy+h,dx:dx+w,:] = src[sy:sy+h,sx:sx+w,:]\n \n return 0\n\n# takes an input image (src)\n# returns a larger image mage of 9 sub-images\n# central image is the src\n# the other eight mirror the central image\ndef wrapify(src):\n\n (h,w,c) = src.shape\n\n # will return this\n img_wrap = np.empty((h*3,w*3,c),dtype=src.dtype)\n\n # center\n np_blit(img_wrap, w, h, src)\n # center top and bottom\n img_ud = np.flipud(src)\n np_blit(img_wrap, w, 0, img_ud)\n np_blit(img_wrap, w, 2*h, img_ud)\n # center left and right\n img_lr = np.fliplr(src)\n np_blit(img_wrap, 0, h, img_lr)\n np_blit(img_wrap, 2*w, h, img_lr)\n # corners\n im_lrud = np.fliplr(img_ud)\n np_blit(img_wrap, 0, 0, im_lrud)\n np_blit(img_wrap, 2*w, 0, im_lrud)\n np_blit(img_wrap, 0, 2*h, im_lrud)\n np_blit(img_wrap, 2*w, 2*h, im_lrud)\n\n return img_wrap\n\ndef imsave(fname,img):\n\n pil_img = Image.fromarray(img)\n pil_img.save(fname)\n\n# creates aumented files for a given idstr\ndef augment(multi_arg):\n \n idstr,train_path,n = multi_arg\n\n if (use_multiprocessing):\n print(\"doing {} : {}\".format(n,idstr))\n \n # load png image from 'image' folder\n path = train_path + idstr\n img = imread(path + '/images/' + idstr + '.png')[:,:,:3]\n (h,w,_) = img.shape\n\n # U-Net will need images whose dimentions are divisible by 16\n # otherwise skip-connections don't line up\n # also we want a 32 pixel border around the original image position\n # save these crop sizes for later\n w_crop = int(round(w/16))*16 + 64\n h_crop = int(round(h/16))*16 + 64\n \n # create the wrapped image\n img_wrap = wrapify(img)\n \n if do_data:\n print('img',np.min(img),np.max(img))\n print('img_wrap',np.min(img_wrap),np.max(img_wrap))\n \n if do_show:\n imshow(img)\n plt.show()\n imshow(img_wrap)\n plt.show()\n \n if do_save:\n # try each 15 degrees of rotation\n for rotation_angle in range(0,360,15):\n # this is SLOW!!!\n # preserve_range stops it from expanding the rand of the colors\n # order=3 is something like bi-cubic color fitting\n img_rot = rotate(img_wrap, rotation_angle, order=3,preserve_range=True)\n if do_data:\n print('img_rot',np.min(img_rot),np.max(img_rot))\n \n # shear the original rotated image\n img_shear,_ = nuclear_gen.do_shear(img_rot, [], -0.2, True)\n if do_data:\n print('img_shear',np.min(img_shear),np.max(img_shear))\n\n # crop the middle with a 32 pixel border\n img_crop,_ = nuclear_gen.do_random_crop(img_shear, [], w_crop, h_crop, 0)\n if do_data:\n print('img_crop',np.min(img_crop),np.max(img_crop))\n\n # make sure it's a 24-bit image\n img_crop = np.array(img_crop,dtype=np.uint8)\n if do_data:\n print('img_crop',np.min(img_crop),np.max(img_crop))\n\n # save it\n imsave(path + '/images/aug_' + str(rotation_angle) + \"_shu_\" + idstr + '.png',img_crop)\n\n # repeat shear-crop-save with a different shear\n img_shear,_ = nuclear_gen.do_shear(img_rot, [], 0.2, True)\n img_crop,_ = nuclear_gen.do_random_crop(img_shear, [], w_crop, h_crop, 0)\n img_crop = np.array(img_crop,dtype=np.uint8)\n imsave(path + '/images/aug_' + str(rotation_angle) + \"_shd_\" + idstr + '.png',img_crop)\n\n # repeat shear-crop-save with a different shear\n img_shear,_ = nuclear_gen.do_shear(img_rot, [], 0.2, False)\n img_crop,_ = nuclear_gen.do_random_crop(img_shear, [], w_crop, h_crop, 0)\n img_crop = np.array(img_crop,dtype=np.uint8)\n imsave(path + '/images/aug_' + str(rotation_angle) + \"_shl_\" + idstr + '.png',img_crop)\n\n # repeat shear-crop-save with a different shear\n img_shear,_ = nuclear_gen.do_shear(img_rot, [], -0.2, False)\n img_crop,_ = nuclear_gen.do_random_crop(img_shear, [], w_crop, h_crop, 0)\n img_crop = np.array(img_crop,dtype=np.uint8)\n imsave(path + '/images/aug_' + str(rotation_angle) + \"_shr_\" + idstr + '.png',img_crop)\n\n # repeat with NO shear\n img_crop,_ = nuclear_gen.do_random_crop(img_rot, [], w_crop, h_crop, 0)\n img_crop = np.array(img_crop,dtype=np.uint8)\n imsave(path + '/images/aug_' + str(rotation_angle) + \"_sh_\" + idstr + '.png',img_crop)\n\n # create single mask from all mask files\n img = np.zeros((h, w, 1), dtype=np.bool)\n for mask_file in next(os.walk(path + '/masks/'))[2]:\n # ignore file name of already augmented masks\n # a bit UGLY - sorry\n if mask_file[:5] == 'wrap_': continue \n if mask_file[:4] == 'all_': continue\n if mask_file[:4] == 'aug_': continue\n mask_ = imread(path + '/masks/' + mask_file)\n if do_data:\n print('Ymask_',np.min(mask_),np.max(mask_))\n mask_ = np.expand_dims(mask_, axis=-1)\n img = np.maximum(img, mask_)\n\n # create the wrapped image\n img_wrap = wrapify(img)\n if do_data:\n print('Yimg_wrap',np.min(img_wrap),np.max(img_wrap))\n\n # generally we like our greyscale images to have 0 channels\n # instead of 1 channel\n img_wrap = np.squeeze(img_wrap)\n img = np.squeeze(img)\n \n if do_show:\n imshow(img)\n plt.show()\n imshow(img_wrap)\n plt.show()\n\n # save the np.maximum(all masks)\n if do_save:\n imsave(path + '/masks/all_' + idstr + '.png',img)\n \n # this block of code is same as for color image input\n # but for B&W mask\n for rotation_angle in range(0,360,15):\n # order=0 is for nearest neighbour\n # we want B&W images not greyscale\n img_rot = rotate(img_wrap, rotation_angle, order=0,preserve_range=True)\n if do_data:\n print('Yimg_rot',np.min(img_rot),np.max(img_rot))\n _,img_shear = nuclear_gen.do_shear([], img_rot, -0.2, True)\n if do_data:\n print('Yimg_shear',np.min(img_shear),np.max(img_shear))\n _,img_crop = nuclear_gen.do_random_crop([], img_shear, w_crop, h_crop, 0)\n if do_data:\n print('Yimg_crop',np.min(img_crop),np.max(img_crop))\n img_crop = np.array(img_crop,dtype=np.uint8)\n if do_data:\n print('Yimg_crop',np.min(img_crop),np.max(img_crop))\n imsave(path + '/masks/aug_' + str(rotation_angle) + \"_shu_\" + idstr + '.png',img_crop)\n\n _,img_shear = nuclear_gen.do_shear([], img_rot, 0.2, True)\n _,img_crop = nuclear_gen.do_random_crop([], img_shear, w_crop, h_crop, 0)\n img_crop = np.array(img_crop,dtype=np.uint8)\n imsave(path + '/masks/aug_' + str(rotation_angle) + \"_shd_\" + idstr + '.png',img_crop)\n\n _,img_shear = nuclear_gen.do_shear([], img_rot, 0.2, False)\n _,img_crop = nuclear_gen.do_random_crop([], img_shear, w_crop, h_crop, 0)\n img_crop = np.array(img_crop,dtype=np.uint8)\n imsave(path + '/masks/aug_' + str(rotation_angle) + \"_shl_\" + idstr + '.png',img_crop)\n\n _,img_shear = nuclear_gen.do_shear([], img_rot, -0.2, False)\n _,img_crop = nuclear_gen.do_random_crop([], img_shear, w_crop, h_crop, 0)\n img_crop = np.array(img_crop,dtype=np.uint8)\n imsave(path + '/masks/aug_' + str(rotation_angle) + \"_shr_\" + idstr + '.png',img_crop)\n\n _,img_crop = nuclear_gen.do_random_crop([], img_rot, w_crop, h_crop, 0)\n img_crop = np.array(img_crop,dtype=np.uint8)\n imsave(path + '/masks/aug_' + str(rotation_angle) + \"_sh_\" + idstr + '.png',img_crop)\n\nif __name__ == \"__main__\":\n\n # allways call this first\n arg_config.arg_config(do_print=True)\n\n train_path = arg_config.cfg['train_path']\n test_path = arg_config.cfg['test_path']\n\n # Get train and test IDs from directory names\n train_ids = next(os.walk(train_path))[1]\n test_ids = next(os.walk(test_path))[1]\n\n # radically shrinks the dataset to make the program quicker\n do_short = True \n if do_short:\n train_ids = train_ids[:10]\n test_ids = test_ids[:10]\n\n # useful stats\n n_train_ids = len(train_ids)\n n_test_ids = len(test_ids)\n print(\"n_train_ids\",n_train_ids)\n print(\"n_test_ids\",n_test_ids)\n\n print(\"first few ids will be:\")\n for train_id in train_ids[:5]:\n print(train_id) \n\n ### DEAL WITH TRAINING SET\n if use_multiprocessing:\n train_paths = [train_path] * len(train_ids)\n counters = range(len(train_ids))\n p = multiprocessing.Pool(8)\n multi_args = zip(train_ids,train_paths,counters)\n p.map(augment, multi_args)\n else:\n # loop through the training images with tqdm\n sys.stdout.flush()\n for n, idstr in tqdm(enumerate(train_ids), total=n_train_ids):\n augment((idstr,train_path,n))\n \n ### DEAL WITH TEST SET\n # we're just going to save the cropped mirrored original\n # no augmentation\n print('Wrapify test images ... ')\n sys.stdout.flush()\n for n, idstr in tqdm(enumerate(test_ids), total=len(test_ids)):\n path = test_path + idstr\n img = imread(path + '/images/' + idstr + '.png')[:,:,:3]\n (h,w,_) = img.shape\n w_crop = int(round(w/16))*16 + 64\n h_crop = int(round(h/16))*16 + 64\n \n img_wrap = wrapify(img)\n \n if do_show:\n imshow(img)\n plt.show()\n imshow(img_wrap)\n plt.show()\n \n if do_save:\n img_crop,_ = nuclear_gen.do_random_crop(img_wrap, [], w_crop, h_crop, 0)\n img_crop = np.array(img_crop,dtype=np.uint8)\n imsave(path + '/images/wrap_' + idstr + '.png',img_crop)\n"
] | [
[
"numpy.expand_dims",
"numpy.maximum",
"numpy.min",
"numpy.fliplr",
"numpy.squeeze",
"numpy.flipud",
"numpy.max",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Shengyuan-Z/AGRL.pytorch | [
"6107fe0e4df5c8048a65f811bab46d2fb4624783",
"6107fe0e4df5c8048a65f811bab46d2fb4624783",
"6107fe0e4df5c8048a65f811bab46d2fb4624783"
] | [
"torchreid/models/resnet.py",
"torchreid/utils/torchtools.py",
"torchreid/data_manager/prid2011.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport torchvision\n\nfrom ..utils.torchtools import weights_init_kaiming\n\n__all__ = ['ResNet50', 'ResNet101', 'ResNet50M', 'ResNet50B']\n\n\nclass ResNet50(nn.Module):\n def __init__(self, num_classes, loss={'xent'}, **kwargs):\n super(ResNet50, self).__init__()\n self.loss = loss\n resnet50 = torchvision.models.resnet50(pretrained=True)\n self.base = nn.Sequential(*list(resnet50.children())[:-2])\n self.classifier = nn.Linear(2048, num_classes)\n self.feat_dim = 2048\n\n def forward(self, x):\n x = self.base(x)\n x = F.avg_pool2d(x, x.size()[2:])\n f = x.view(x.size(0), -1)\n if not self.training:\n return f\n y = self.classifier(f)\n\n if self.loss == {'xent'}:\n return y\n elif self.loss == {'xent', 'htri'}:\n return y, f\n else:\n raise KeyError(\"Unsupported loss: {}\".format(self.loss))\n\n\nclass ResNet101(nn.Module):\n def __init__(self, num_classes, loss={'xent'}, **kwargs):\n super(ResNet101, self).__init__()\n self.loss = loss\n resnet101 = torchvision.models.resnet101(pretrained=True)\n self.base = nn.Sequential(*list(resnet101.children())[:-2])\n self.classifier = nn.Linear(2048, num_classes)\n self.feat_dim = 2048 # feature dimension\n\n def forward(self, x):\n x = self.base(x)\n x = F.avg_pool2d(x, x.size()[2:])\n f = x.view(x.size(0), -1)\n if not self.training:\n return f\n y = self.classifier(f)\n\n if self.loss == {'xent'}:\n return y\n elif self.loss == {'xent', 'htri'}:\n return y, f\n else:\n raise KeyError(\"Unsupported loss: {}\".format(self.loss))\n\n\nclass ResNet50M(nn.Module):\n \"\"\"ResNet50 + mid-level features.\n\n Reference:\n Yu et al. The Devil is in the Middle: Exploiting Mid-level Representations for\n Cross-Domain Instance Matching. arXiv:1711.08106.\n \"\"\"\n def __init__(self, num_classes=0, loss={'xent'}, **kwargs):\n super(ResNet50M, self).__init__()\n self.loss = loss\n resnet50 = torchvision.models.resnet50(pretrained=True)\n base = nn.Sequential(*list(resnet50.children())[:-2])\n self.layers1 = nn.Sequential(base[0], base[1], base[2])\n self.layers2 = nn.Sequential(base[3], base[4])\n self.layers3 = base[5]\n self.layers4 = base[6]\n self.layers5a = base[7][0]\n self.layers5b = base[7][1]\n self.layers5c = base[7][2]\n self.fc_fuse = nn.Sequential(nn.Linear(4096, 1024), nn.BatchNorm1d(1024), nn.ReLU())\n self.classifier = nn.Linear(3072, num_classes)\n self.feat_dim = 3072 # feature dimension\n\n def forward(self, x):\n x1 = self.layers1(x)\n x2 = self.layers2(x1)\n x3 = self.layers3(x2)\n x4 = self.layers4(x3)\n x5a = self.layers5a(x4)\n x5b = self.layers5b(x5a)\n x5c = self.layers5c(x5b)\n\n x5a_feat = F.avg_pool2d(x5a, x5a.size()[2:]).view(x5a.size(0), x5a.size(1))\n x5b_feat = F.avg_pool2d(x5b, x5b.size()[2:]).view(x5b.size(0), x5b.size(1))\n x5c_feat = F.avg_pool2d(x5c, x5c.size()[2:]).view(x5c.size(0), x5c.size(1))\n\n midfeat = torch.cat((x5a_feat, x5b_feat), dim=1)\n midfeat = self.fc_fuse(midfeat)\n\n combofeat = torch.cat((x5c_feat, midfeat), dim=1)\n \n if not self.training:\n return combofeat\n \n prelogits = self.classifier(combofeat)\n \n if self.loss == {'xent'}:\n return prelogits\n elif self.loss == {'xent', 'htri'}:\n return prelogits, combofeat\n else:\n raise KeyError(\"Unsupported loss: {}\".format(self.loss))\n\n\nclass ResNet50B(nn.Module):\n \"\"\"Resnet50+bottleneck\n\n Reference:\n https://github.com/L1aoXingyu/reid_baseline\n \"\"\"\n def __init__(self, num_classes=0, loss={'xent'}, **kwargs):\n super(ResNet50B, self).__init__()\n self.loss = loss\n resnet50 = torchvision.models.resnet50(pretrained=True)\n resnet50.layer4[0].conv2.stride = (1, 1)\n resnet50.layer4[0].downsample[0].stride = (1, 1)\n self.base = nn.Sequential(*list(resnet50.children())[:-2])\n\n self.in_planes = 2048\n self.bottleneck = nn.Sequential(\n nn.Linear(self.in_planes, 512),\n nn.BatchNorm1d(512),\n nn.LeakyReLU(0.1),\n nn.Dropout(p=0.5))\n self.bottleneck.apply(weights_init_kaiming)\n\n self.classifier = nn.Linear(512, num_classes)\n self.classifier.apply(weights_init_kaiming)\n\n def forward(self, x):\n global_feat = self.base(x)\n global_feat = F.avg_pool2d(global_feat, global_feat.size()[-2:])\n global_feat = global_feat.view(global_feat.size(0), -1)\n if not self.training:\n return global_feat\n else:\n feat = self.bottleneck(global_feat)\n y = self.classifier(feat)\n\n if self.loss == {'xent'}:\n return y\n elif self.loss == {'xent', 'htri'}:\n return y, global_feat\n else:\n raise KeyError(\"Unsupported loss: {}\".format(self.loss))",
"from __future__ import absolute_import\nfrom __future__ import division\n\nimport torch\nimport torch.nn as nn\nimport gc\nimport time\n\n\ndef cur_time():\n return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())\n\n\ndef adjust_learning_rate(optimizer, base_lr, epoch, stepsize, gamma=0.1):\n # decay learning rate by 'gamma' for every 'stepsize'\n lr = base_lr * (gamma ** (epoch // stepsize))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef set_bn_to_eval(m):\n # 1. no update for running mean and var\n # 2. scale and shift parameters are still trainable\n classname = m.__class__.__name__\n if classname.find('BatchNorm') != -1:\n m.eval()\n\n\ndef set_wd(optim, num):\n assert isinstance(num, (int, float)), '{} is not int or float'.format(num)\n for group in optim.param_groups:\n if group['weight_decay'] != num:\n group['weight_decay'] = num\n\n\ndef count_num_param(model):\n num_param = sum(p.numel() for p in model.parameters()) / 1e+06\n if hasattr(model, 'classifier') and isinstance(model.classifier, nn.Module):\n # we ignore the classifier because it is unused at test time\n num_param -= sum(p.numel() for p in model.classifier.parameters()) / 1e+06\n return num_param\n\n\ndef flip_tensor(x, dim):\n indices = [slice(None)] * x.dim()\n indices[dim] = torch.arange(x.size(dim) - 1, -1, -1,\n dtype=torch.long, device=x.device)\n return x[tuple(indices)]\n\n\ndef weights_init_kaiming(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif classname.find('Conv') != -1:\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif classname.find('BatchNorm') != -1:\n if m.affine:\n nn.init.normal_(m.weight, 1.0, 0.001)\n nn.init.constant_(m.bias, 0.0)\n\n\ndef weights_init_xavier(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n nn.init.xavier_normal_(m.weight)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif classname.find('Conv') != -1:\n nn.init.xavier_normal_(m.weight)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif classname.find('BatchNorm') != -1:\n if m.affine:\n nn.init.normal_(m.weight, 1.0, 0.001)\n nn.init.constant_(m.bias, 0.0)\n\n\ndef weights_init_classifier(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n nn.init.normal_(m.weight.data, std=0.001)\n if m.bias is not None:\n nn.init.constant_(m.bias.data, 0.0)\n\n\ndef mem_report():\n \"\"\"Report the memory usage of the tensor.storage in pytorch\n Both on CPUs and GPUs are reported\"\"\"\n\n def _mem_report(tensors, mem_type):\n '''Print the selected tensors of type\n There are two major storage types in our major concern:\n - GPU: tensors transferred to CUDA devices\n - CPU: tensors remaining on the system memory (usually unimportant)\n Args:\n - tensors: the tensors of specified type\n - mem_type: 'CPU' or 'GPU' in current implementation '''\n print('Storage on %s' %(mem_type))\n print('-'*LEN)\n total_numel = 0\n total_mem = 0\n visited_data = []\n for tensor in tensors:\n if tensor.is_sparse:\n continue\n # a data_ptr indicates a memory block allocated\n data_ptr = tensor.storage().data_ptr()\n if data_ptr in visited_data:\n continue\n visited_data.append(data_ptr)\n\n numel = tensor.storage().size()\n total_numel += numel\n element_size = tensor.storage().element_size()\n mem = numel*element_size /1024/1024 # 32bit=4Byte, MByte\n total_mem += mem\n element_type = type(tensor).__name__\n size = tuple(tensor.size())\n\n print('%s\\t\\t%s\\t\\t%.2f' % (\n element_type,\n size,\n mem) )\n print('-'*LEN)\n print('Total Tensors: %d \\tUsed Memory Space: %.2f MBytes' % (total_numel, total_mem) )\n print('-'*LEN)\n\n LEN = 65\n print('='*LEN)\n objects = gc.get_objects()\n print('%s\\t%s\\t\\t\\t%s' %('Element type', 'Size', 'Used MEM(MBytes)') )\n tensors = [obj for obj in objects if torch.is_tensor(obj)]\n cuda_tensors = [t for t in tensors if t.is_cuda]\n host_tensors = [t for t in tensors if not t.is_cuda]\n _mem_report(cuda_tensors, 'GPU')\n _mem_report(host_tensors, 'CPU')\n print('='*LEN)",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport glob\nimport re\nimport sys\nimport urllib\nimport tarfile\nimport zipfile\nimport os.path as osp\nfrom scipy.io import loadmat\nimport numpy as np\nimport h5py\nfrom scipy.misc import imsave\nimport json\n\nfrom torchreid.utils.iotools import mkdir_if_missing, write_json, read_json\n\n\nclass PRID2011(object):\n \"\"\"\n PRID2011\n\n Reference:\n Hirzer et al. Person Re-Identification by Descriptive and Discriminative Classification. SCIA 2011.\n\n URL: https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/PRID11/\n \n Dataset statistics:\n # identities: 200\n # tracklets: 400\n # cameras: 2\n \"\"\"\n dataset_dir = 'prid2011'\n\n def __init__(self, root='data', split_id=0, min_seq_len=0, verbose=True, **kwargs):\n self.dataset_dir = osp.join(root, self.dataset_dir)\n self.split_path = osp.join(self.dataset_dir, 'splits_prid2011.json')\n self.cam_a_path = osp.join(self.dataset_dir, 'prid_2011', 'multi_shot', 'cam_a')\n self.cam_b_path = osp.join(self.dataset_dir, 'prid_2011', 'multi_shot', 'cam_b')\n self.pose_file = osp.join(self.dataset_dir, 'pose.json')\n\n self._check_before_run()\n with open(self.pose_file, 'r') as f:\n self.poses = json.load(f)\n # process the pose information\n self.process_poses = dict()\n for key in self.poses:\n # save only one body\n maxidx = -1\n maxarea = -1\n maxscore = -1\n assert len(self.poses[key]['bodies']) >= 1, 'pose of {} is empty'.format(key)\n if len(self.poses[key]['bodies']) == 1:\n self.process_poses[key] = np.array(self.poses[key]['bodies'][0]['joints']).reshape((-1, 3))\n else:\n for idx, body in enumerate(self.poses[key]['bodies']):\n tmp_kps = np.array(body['joints']).reshape((-1, 3))\n tmp_area = (max(tmp_kps[:, 0]) - min(tmp_kps[:, 0])) * (max(tmp_kps[:, 1]) - min(tmp_kps[:, 1]))\n tmp_score = body['score']\n if tmp_score > maxscore:\n if tmp_area > maxarea and tmp_score > 1.1 * maxscore:\n maxscore = tmp_score\n maxidx = idx\n self.process_poses[key] = np.array(self.poses[key]['bodies'][maxidx]['joints']).reshape((-1, 3))\n splits = read_json(self.split_path)\n if split_id >= len(splits):\n raise ValueError(\"split_id exceeds range, received {}, but expected between 0 and {}\".format(split_id, len(splits)-1))\n split = splits[split_id]\n train_dirs, test_dirs = split['train'], split['test']\n print(\"# train identites: {}, # test identites {}\".format(len(train_dirs), len(test_dirs)))\n\n train, num_train_tracklets, num_train_pids, num_imgs_train = \\\n self._process_data(train_dirs, cam1=True, cam2=True)\n query, num_query_tracklets, num_query_pids, num_imgs_query = \\\n self._process_data(test_dirs, cam1=True, cam2=False)\n gallery, num_gallery_tracklets, num_gallery_pids, num_imgs_gallery = \\\n self._process_data(test_dirs, cam1=False, cam2=True)\n\n num_imgs_per_tracklet = num_imgs_train + num_imgs_query + num_imgs_gallery\n min_num = np.min(num_imgs_per_tracklet)\n max_num = np.max(num_imgs_per_tracklet)\n avg_num = np.mean(num_imgs_per_tracklet)\n\n num_total_pids = num_train_pids + num_query_pids\n num_total_tracklets = num_train_tracklets + num_query_tracklets + num_gallery_tracklets\n\n if verbose:\n print(\"=> PRID2011 loaded\")\n print(\"Dataset statistics:\")\n print(\" ------------------------------\")\n print(\" subset | # ids | # tracklets\")\n print(\" ------------------------------\")\n print(\" train | {:5d} | {:8d}\".format(num_train_pids, num_train_tracklets))\n print(\" query | {:5d} | {:8d}\".format(num_query_pids, num_query_tracklets))\n print(\" gallery | {:5d} | {:8d}\".format(num_gallery_pids, num_gallery_tracklets))\n print(\" ------------------------------\")\n print(\" total | {:5d} | {:8d}\".format(num_total_pids, num_total_tracklets))\n print(\" number of images per tracklet: {} ~ {}, average {:.1f}\".format(min_num, max_num, avg_num))\n print(\" ------------------------------\")\n\n self.train = train\n self.query = query\n self.gallery = gallery\n\n self.num_train_pids = num_train_pids\n self.num_query_pids = num_query_pids\n self.num_gallery_pids = num_gallery_pids\n\n def _check_before_run(self):\n \"\"\"Check if all files are available before going deeper\"\"\"\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n\n def _process_data(self, dirnames, cam1=True, cam2=True):\n tracklets = []\n num_imgs_per_tracklet = []\n dirname2pid = {dirname:i for i, dirname in enumerate(dirnames)}\n \n for dirname in dirnames:\n if cam1:\n person_dir = osp.join(self.cam_a_path, dirname)\n img_names = sorted(glob.glob(osp.join(person_dir, '*.png')))\n assert len(img_names) > 0\n img_names = tuple(img_names)\n pid = dirname2pid[dirname]\n tracklets.append((img_names, pid, 0))\n num_imgs_per_tracklet.append(len(img_names))\n\n if cam2:\n person_dir = osp.join(self.cam_b_path, dirname)\n img_names = sorted(glob.glob(osp.join(person_dir, '*.png')))\n assert len(img_names) > 0\n img_names = tuple(img_names)\n pid = dirname2pid[dirname]\n tracklets.append((img_names, pid, 1))\n num_imgs_per_tracklet.append(len(img_names))\n\n num_tracklets = len(tracklets)\n num_pids = len(dirnames)\n\n return tracklets, num_tracklets, num_pids, num_imgs_per_tracklet\n"
] | [
[
"torch.nn.BatchNorm1d",
"torch.nn.Sequential",
"torch.nn.Dropout",
"torch.cat",
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"torch.nn.ReLU"
],
[
"torch.nn.init.constant_",
"torch.nn.init.xavier_normal_",
"torch.is_tensor",
"torch.nn.init.normal_",
"torch.nn.init.kaiming_normal_"
],
[
"numpy.max",
"numpy.array",
"numpy.mean",
"numpy.min"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Takishima/mindquantum | [
"e90dfe474b759023d7ae18281b9a87cb8d223d04",
"e90dfe474b759023d7ae18281b9a87cb8d223d04"
] | [
"tutorials/source/quantum_approximate_optimization_algorithm.py",
"mindquantum/third_party/unitary_cc.py"
] | [
"# -*- coding: utf-8 -*-\n# Copyright 2022 <Huawei Technologies Co., Ltd>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Example of the quantum approximate optimization algorithm.\"\"\"\n\nimport matplotlib.pyplot as plt\nimport mindspore as ms\nimport mindspore.nn as nn\nimport networkx as nx\nimport numpy as np\n\nfrom mindquantum.core import RX, UN, ZZ, Circuit, H, Hamiltonian, QubitOperator\nfrom mindquantum.framework import MQAnsatzOnlyLayer\nfrom mindquantum.simulator import Simulator\n\ng = nx.Graph()\nnx.add_path(g, [0, 1])\nnx.add_path(g, [1, 2])\nnx.add_path(g, [2, 3])\nnx.add_path(g, [3, 4])\nnx.add_path(g, [0, 4])\nnx.add_path(g, [0, 2])\nnx.draw(g, with_labels=True, font_weight='bold')\n\n\ndef build_hc(g, para):\n \"\"\"Build an HC circuit.\"\"\"\n hc = Circuit()\n for i in g.edges:\n hc += ZZ(para).on(i)\n return hc\n\n\ndef build_hb(g, para):\n \"\"\"Build an HB circuit.\"\"\"\n hc = Circuit()\n for i in g.nodes:\n hc += RX(para).on(i)\n return hc\n\n\ndef build_ansatz(g, p):\n \"\"\"Build an ansatz circuit.\"\"\"\n c = Circuit()\n for i in range(p):\n c += build_hc(g, f'g{i}')\n c += build_hb(g, f'b{i}')\n return c\n\n\ndef build_ham(g):\n \"\"\"Build a circuit for the hamiltonian.\"\"\"\n hc = QubitOperator()\n for i in g.edges:\n hc += QubitOperator(f'Z{i[0]} Z{i[1]}')\n return hc\n\n\np = 4\nham = Hamiltonian(build_ham(g))\nansatz = build_ansatz(g, p)\ninit_state_circ = UN(H, g.nodes)\n\n\nms.context.set_context(mode=ms.context.PYNATIVE_MODE, device_target=\"CPU\")\n\ncirc = init_state_circ + ansatz\nsim = Simulator('projectq', circ.n_qubits)\ngrad_ops = sim.get_expectation_with_grad(ham, circ)\nnet = MQAnsatzOnlyLayer(grad_ops)\nopti = nn.Adam(net.trainable_params(), learning_rate=0.05)\ntrain_net = nn.TrainOneStepCell(net, opti)\n\nfor i in range(600):\n if i % 10 == 0:\n print(\"train step:\", i, \", cut:\", (len(g.edges) - train_net()) / 2)\n\npr = dict(zip(ansatz.params_name, net.weight.asnumpy()))\nprint(circ.get_qs(pr=pr, ket=True))\n\n\ndef show_amp(state):\n \"\"\"Show the amplitude of a quantum state.\"\"\"\n amp = np.abs(state) ** 2\n n_qubits = int(np.log2(len(amp)))\n labels = [bin(i)[2:].zfill(n_qubits) for i in range(len(amp))]\n plt.bar(labels, amp)\n plt.xticks(rotation=45)\n plt.show()\n\n\nstate = circ.get_qs(pr=pr)\nshow_amp(state)\n",
"# -*- coding: utf-8 -*-\n# Copyright 2017 The OpenFermion Developers.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Module to create and manipulate unitary coupled cluster operators.\"\"\"\n\nimport itertools\n\nimport numpy\nfrom openfermion.utils.indexing import down_index, up_index\n\nfrom mindquantum.core.parameterresolver import ParameterResolver\n\n\ndef uccsd_singlet_get_packed_amplitudes(single_amplitudes, double_amplitudes, n_qubits, n_electrons):\n r\"\"\"\n Convert amplitudes for use with singlet UCCSD.\n\n The output list contains only those amplitudes that are relevant to\n singlet UCCSD, in an order suitable for use with the function\n `uccsd_singlet_generator`.\n\n Args:\n single_amplitudes(numpy.ndarray): :math:`N\\times N` array storing single excitation\n amplitudes corresponding to :math:`t_{i,j} * (a_i^\\dagger a_j - \\text{H.C.})`\n double_amplitudes(numpy.ndarray): :math:`N\\times N\\times N\\times N` array storing double\n excitation amplitudes corresponding to\n :math:`t_{i,j,k,l} * (a_i^\\dagger a_j a_k^\\dagger a_l - \\text{H.C.})`\n n_qubits(int): Number of spin-orbitals used to represent the system,\n which also corresponds to number of qubits in a non-compact map.\n n_electrons(int): Number of electrons in the physical system.\n\n Returns:\n ParameterResolver, List storing the unique single\n and double excitation amplitudes for a singlet UCCSD operator.\n The ordering lists unique single excitations before double\n excitations.\n\n Examples:\n >>> import numpy as np\n >>> from mindquantum.algorithm.nisq.chem import uccsd_singlet_get_packed_amplitudes\n >>> n_qubits, n_electrons = 4, 2\n >>> np.random.seed(42)\n >>> ccsd_single_amps = np.random.random((4, 4))\n >>> ccsd_double_amps = np.random.random((4, 4, 4, 4))\n >>> uccsd_singlet_get_packed_amplitudes(ccsd_single_amps, ccsd_double_amps,\n ... n_qubits, n_electrons)\n {'s_0': 0.6011150117432088, 'd1_0': 0.7616196153287176}\n \"\"\"\n n_spatial_orbitals = n_qubits // 2\n n_occupied = int(numpy.ceil(n_electrons / 2))\n n_virtual = n_spatial_orbitals - n_occupied\n\n singles = ParameterResolver()\n doubles_1 = ParameterResolver()\n doubles_2 = ParameterResolver()\n\n # Get singles and doubles amplitudes associated with one\n # spatial occupied-virtual pair\n for p, q in itertools.product(range(n_virtual), range(n_occupied)):\n # Get indices of spatial orbitals\n virtual_spatial = n_occupied + p\n occupied_spatial = q\n # Get indices of spin orbitals\n virtual_up = up_index(virtual_spatial)\n virtual_down = down_index(virtual_spatial)\n occupied_up = up_index(occupied_spatial)\n occupied_down = down_index(occupied_spatial)\n\n # Get singles amplitude\n # Just get up amplitude, since down should be the same\n singles[f's_{len(singles)}'] = single_amplitudes[virtual_up, occupied_up]\n\n # Get doubles amplitude\n doubles_1[f'd1_{len(doubles_1)}'] = double_amplitudes[virtual_up, occupied_up, virtual_down, occupied_down]\n\n # Get doubles amplitudes associated with two spatial occupied-virtual pairs\n for (p, q), (r, s) in itertools.combinations(itertools.product(range(n_virtual), range(n_occupied)), 2):\n # Get indices of spatial orbitals\n virtual_spatial_1 = n_occupied + p\n occupied_spatial_1 = q\n virtual_spatial_2 = n_occupied + r\n occupied_spatial_2 = s\n\n # Get indices of spin orbitals\n # Just get up amplitudes, since down and cross terms should be the same\n virtual_1_up = up_index(virtual_spatial_1)\n occupied_1_up = up_index(occupied_spatial_1)\n virtual_2_up = up_index(virtual_spatial_2)\n occupied_2_up = up_index(occupied_spatial_2)\n\n # Get amplitude\n doubles_2[f'd2_{len(doubles_2)}'] = double_amplitudes[virtual_1_up, occupied_1_up, virtual_2_up, occupied_2_up]\n\n return singles + doubles_1 + doubles_2\n\n\ndef uccsd_singlet_generator(n_qubits, n_electrons, anti_hermitian=True):\n \"\"\"\n Create a singlet UCCSD generator for a system with n_electrons.\n\n This function generates a FermionOperator for a UCCSD generator designed\n to act on a single reference state consisting of n_qubits spin orbitals\n and n_electrons electrons, that is a spin singlet operator, meaning it\n conserves spin.\n\n Args:\n n_qubits(int): Number of spin-orbitals used to represent the system,\n which also corresponds to number of qubits in a non-compact map.\n n_electrons(int): Number of electrons in the physical system.\n anti_hermitian(bool): Flag to generate only normal CCSD operator\n rather than unitary variant, primarily for testing\n\n Returns:\n FermionOperator, Generator of the UCCSD operator that\n builds the UCCSD wavefunction.\n\n Examples:\n >>> from mindquantum.algorithm.nisq.chem import uccsd_singlet_generator\n >>> uccsd_singlet_generator(4, 2)\n -s_0 [0^ 2] +\n -d1_0 [0^ 2 1^ 3] +\n -s_0 [1^ 3] +\n -d1_0 [1^ 3 0^ 2] +\n s_0 [2^ 0] +\n d1_0 [2^ 0 3^ 1] +\n s_0 [3^ 1] +\n d1_0 [3^ 1 2^ 0]\n \"\"\"\n from mindquantum.core.operators import FermionOperator\n\n if n_qubits % 2 != 0:\n raise ValueError('The total number of spin-orbitals should be even.')\n\n n_spatial_orbitals = n_qubits // 2\n n_occupied = int(numpy.ceil(n_electrons / 2))\n n_virtual = n_spatial_orbitals - n_occupied\n\n # Initialize operator\n generator = FermionOperator()\n\n # Generate excitations\n spin_index_functions = [up_index, down_index]\n # Generate all spin-conserving single and double excitations derived\n # from one spatial occupied-virtual pair\n for i, (p, q) in enumerate(itertools.product(range(n_virtual), range(n_occupied))):\n\n # Get indices of spatial orbitals\n virtual_spatial = n_occupied + p\n occupied_spatial = q\n\n for spin in range(2):\n # Get the functions which map a spatial orbital index to a\n # spin orbital index\n this_index = spin_index_functions[spin]\n other_index = spin_index_functions[1 - spin]\n\n # Get indices of spin orbitals\n virtual_this = this_index(virtual_spatial)\n virtual_other = other_index(virtual_spatial)\n occupied_this = this_index(occupied_spatial)\n occupied_other = other_index(occupied_spatial)\n\n # Generate single excitations\n coeff = ParameterResolver({f's_{i}': 1})\n generator += FermionOperator(((virtual_this, 1), (occupied_this, 0)), coeff)\n if anti_hermitian:\n generator += FermionOperator(((occupied_this, 1), (virtual_this, 0)), -1 * coeff)\n\n # Generate double excitation\n coeff = ParameterResolver({f'd1_{i}': 1})\n generator += FermionOperator(\n ((virtual_this, 1), (occupied_this, 0), (virtual_other, 1), (occupied_other, 0)), coeff\n )\n if anti_hermitian:\n generator += FermionOperator(\n ((occupied_other, 1), (virtual_other, 0), (occupied_this, 1), (virtual_this, 0)), -1 * coeff\n )\n\n # Generate all spin-conserving double excitations derived\n # from two spatial occupied-virtual pairs\n for i, ((p, q), (r, s)) in enumerate(\n itertools.combinations(itertools.product(range(n_virtual), range(n_occupied)), 2)\n ):\n\n # Get indices of spatial orbitals\n virtual_spatial_1 = n_occupied + p\n occupied_spatial_1 = q\n virtual_spatial_2 = n_occupied + r\n occupied_spatial_2 = s\n\n # Generate double excitations\n coeff = ParameterResolver({f'd2_{i}': 1})\n for (spin_a, spin_b) in itertools.product(range(2), repeat=2):\n # Get the functions which map a spatial orbital index to a\n # spin orbital index\n index_a = spin_index_functions[spin_a]\n index_b = spin_index_functions[spin_b]\n\n # Get indices of spin orbitals\n virtual_1_a = index_a(virtual_spatial_1)\n occupied_1_a = index_a(occupied_spatial_1)\n virtual_2_b = index_b(virtual_spatial_2)\n occupied_2_b = index_b(occupied_spatial_2)\n\n if virtual_1_a == virtual_2_b:\n continue\n if occupied_1_a == occupied_2_b:\n continue\n else:\n\n generator += FermionOperator(\n ((virtual_1_a, 1), (occupied_1_a, 0), (virtual_2_b, 1), (occupied_2_b, 0)), coeff\n )\n if anti_hermitian:\n generator += FermionOperator(\n ((occupied_2_b, 1), (virtual_2_b, 0), (occupied_1_a, 1), (virtual_1_a, 0)), -1 * coeff\n )\n\n return generator\n"
] | [
[
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"numpy.abs",
"matplotlib.pyplot.bar"
],
[
"numpy.ceil"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
UYousafzai/pytabulate | [
"418300a04c8a730d2808c77f5630561e690d28d5"
] | [
"pytabulate/generate_analytics.py"
] | [
"\"\"\"\n\nCORE ANALYTICS MODULE: Generates the analytical meta-data from tables database, this distribution is consumed in data generation and augmentation\nmethods.\n\nMax Row Support: 17\n\nMax Column Support: 40\n\n\"\"\"\n\nimport os\nimport cv2\nimport glob\nimport pickle\nimport numpy as np\nimport utility as ut\nfrom tqdm import tqdm\nfrom xml.etree import ElementTree\n\nmax_rows = 40\nmax_cols = 17\n\n\ndef merge_check(cell_data):\n \"\"\"\n ARGUMENTS:\n\n cell_data: this contains the cell information from a specific table.\n\n RETURNS:\n\n merge_metadata: this contains the merging information about different cells.\n \"\"\"\n merge_metadata = []\n merged_columns = []\n merged_rows = []\n for cell in cell_data:\n if cell.attrib[\"startCol\"] != cell.attrib[\"endCol\"]:\n temp = []\n temp.append(cell.attrib[\"startCol\"])\n temp.append(cell.attrib[\"endCol\"])\n temp.append(cell.attrib[\"startRow\"])\n merged_columns.append(temp)\n if cell.attrib[\"startRow\"] != cell.attrib[\"endRow\"]:\n temp = []\n temp.append(cell.attrib[\"startRow\"])\n temp.append(cell.attrib[\"endRow\"])\n temp.append(cell.attrib[\"startCol\"])\n merged_rows.append(temp)\n merge_metadata.append(merged_rows)\n merge_metadata.append(merged_columns)\n return merge_metadata\n\n\ndef fetch_table_ocr(ocr_data, x0, x1, y0, y1):\n \"\"\"\n #TODO\n ARGUMENTS:\n\n ocr_data: This contains the ocr data of the entire file.\n\n x0: is the x coordinate of the top left of the table.\n\n x1: is the y coordinate of the top left of the table.\n\n y0: is the x coordinate of the bottom right of the table.\n\n y1: is the y coordinate of the bottom right of the table.\n\n RETURNS:\n\n table_ocr_data: This return contains the ocr data that is inside\n the tables bounding box.\n \"\"\"\n table_ocr_data = []\n for data_item in ocr_data:\n if (\n (int(data_item[2]) >= int(x0))\n and (int(data_item[4]) <= int(x1))\n and (int(data_item[3]) >= int(y0))\n and (int(data_item[5]) <= int(y1))\n ):\n table_ocr_data.append(data_item)\n return table_ocr_data\n\n\ndef cell_distributed_ocr(raw_ocr_data, cell_data):\n \"\"\"\n ARGUMENTS:\n\n raw_ocr_data:this is the table distributed ocr data that.\n\n cell_data: this contains the cell information of a table.\n\n RETURNS:\n\n final_ocr_data: this returns a list of ocr data relavent according to each cell\n \"\"\"\n final_ocr_data = []\n for cell in cell_data:\n temp = []\n temp.append(cell)\n for data_item in raw_ocr_data:\n if (\n (int(data_item[2]) >= int(cell.attrib[\"x0\"]))\n and (int(data_item[4]) <= int(cell.attrib[\"x1\"]))\n and (int(data_item[3]) >= int(cell.attrib[\"y0\"]))\n and (int(data_item[5]) <= int(cell.attrib[\"y1\"]))\n ):\n temp.append(data_item)\n final_ocr_data.append(temp)\n return final_ocr_data\n\n\ndef localize_data(xml_file, ocr_file, img_file):\n \"\"\"\n ARGUMENTS:\n\n xml_file: a path to an xml file inside a folder.\n\n ocr_file: a path to an ocr file inside a folder.\n\n img_file: a path to an image file inside a folder.\n\n RETURNS:\n\n analytics_data: a data item containing all the relavent analytics for\n the given xml and ocr file.\n \"\"\"\n analytics_data = []\n ocr_data = []\n with open(ocr_file, \"rb\") as f:\n ocr_data = pickle.load(f)\n if os.path.exists(xml_file) and os.path.exists(ocr_file) and os.path.exists(img_file):\n tree = ElementTree.parse(xml_file)\n root = tree.getroot()\n for obj in root.findall(\".//Table\"):\n data_list = []\n if len(obj.findall(\"Row\")) + 1 >= max_rows:\n continue\n if len(obj.findall(\"Column\")) + 1 >= max_cols:\n continue\n data_list.append(len(obj.findall(\"Row\")) + 1)\n data_list.append(len(obj.findall(\"Column\")) + 1)\n cell_data = obj.findall(\"Cell\")\n data_list.append(len(cell_data))\n merged_data = merge_check(cell_data)\n data_list.append(merged_data)\n data_list.append(obj.attrib[\"orientation\"])\n raw_ocr_data = fetch_table_ocr(\n ocr_data,\n obj.attrib[\"x0\"],\n obj.attrib[\"x1\"],\n obj.attrib[\"y0\"],\n obj.attrib[\"y1\"],\n )\n final_ocr_data = cell_distributed_ocr(raw_ocr_data, cell_data)\n data_list.append(final_ocr_data)\n data_list.append(xml_file)\n\n rows = [int(obj.attrib[\"y0\"])]\n for row in obj.findall(\"Row\"):\n rows.append(int(row.attrib[\"y0\"]))\n rows.append(int(obj.attrib[\"y0\"]))\n\n cols = [int(obj.attrib[\"x0\"])]\n for col in obj.findall(\"Column\"):\n cols.append(int(col.attrib[\"x0\"]))\n cols.append(int(obj.attrib[\"x0\"]))\n\n data_list.append(rows)\n data_list.append(cols)\n data_list.append(img_file)\n analytics_data.append(data_list)\n return analytics_data\n\n\ndef get_lined_table_distribution(analytics_data, x=17, y=17):\n \"\"\"\n ARGUMENTS:\n\n analytics_data: a list of iterable tables with relavent data.\n\n RETURNS:\n\n lined_table_distribution: a 1D matrix containing line distribution in data.\n\n index 0: contains frequency of fully lined tables\n\n index 1: contains frequency of partially row lined tables\n\n index 2: contains frequency of partially column lined tables\n\n index 3: contains frequency of partially column lined tables\n\n \"\"\"\n\n lined_table_db = np.zeros((4))\n for data_item in tqdm(analytics_data):\n img_path = data_item[9]\n _img = cv2.imread(img_path)\n\n tmp = cv2.cvtColor(_img, cv2.COLOR_BGR2GRAY)\n _img = cv2.adaptiveThreshold(tmp, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)\n kernel = np.ones((1, 5), np.uint8)\n horizontal_lines = np.copy(_img)\n horizontal_lines = cv2.dilate(horizontal_lines, kernel, iterations=x)\n horizontal_lines = cv2.erode(horizontal_lines, kernel, iterations=x)\n kernel = np.ones((5, 1), np.uint8)\n vertical_lines = np.copy(_img)\n vertical_lines = cv2.dilate(vertical_lines, kernel, iterations=y)\n vertical_lines = cv2.erode(vertical_lines, kernel, iterations=y)\n ver_pos = np.where(vertical_lines == 0)\n hor_pos = np.where(horizontal_lines == 0)\n _w, _h = _img.shape\n vert_img = np.zeros((_w, _h), dtype=np.uint8)\n hor_img = np.zeros((_w, _h), dtype=np.uint8)\n vert_img[ver_pos] = 255\n hor_img[hor_pos] = 255\n vert_lab, _ = cv2.connectedComponents(vert_img)\n hor_lab, _ = cv2.connectedComponents(hor_img)\n vert_lab = vert_lab - 1\n hor_lab = hor_lab - 1\n\n if vert_lab > 2 and hor_lab > 2:\n lined_table_db[0] += 1\n elif vert_lab <= 2 and hor_lab > 2:\n lined_table_db[1] += 1\n elif vert_lab > 2 and hor_lab <= 2:\n lined_table_db[2] += 1\n elif vert_lab < 2 and hor_lab < 2:\n lined_table_db[3] += 1\n return lined_table_db\n\n\ndef get_row_column_distribution(analytics_data):\n \"\"\"\n Gets us the row_column Distribution of tables occurring throughout the dataset\n this gives us an idea on what type of tables are the most commonly occuring ones\n\n ARGUMENTS:\n\n analytics_data: a list of iterable tables with relavent data.\n\n RETURNS:\n\n row_column_db: a 2D matrix containing the distribution\n \"\"\"\n total_tables = len(analytics_data)\n row_column_db = np.zeros((50, 30))\n for data_item in analytics_data:\n row_column_db[int(data_item[0]), int(data_item[1])] += 1\n row_column_db = np.true_divide(row_column_db, total_tables)\n return row_column_db\n\n\ndef get_RowToColumn_merge_db(analytics_data):\n \"\"\"\n This method gives us the distribution between a row and column merging in\n all types of tables\n\n ARGUMENTS:\n\n analytics_data: a list of iterable tables with relavent data\n\n RETURNS:\n\n a 3D numpy matrix consisting of the above mentioned distribution\n \"\"\"\n RtC_merge_db = np.zeros((50, 30, 2))\n for data_item in analytics_data:\n if (int(len(data_item[3][0])) != 0) or (int(len(data_item[3][1])) != 0):\n RtC_merge_db[int(data_item[0]), int(data_item[1]), int(0)] += len(data_item[3][0])\n RtC_merge_db[int(data_item[0]), int(data_item[1]), int(1)] += len(data_item[3][1])\n RtC_merge_db[:, :, 0], RtC_merge_db[:, :, 1] = (\n (RtC_merge_db[:, :, 0]) / (RtC_merge_db[:, :, 0] + RtC_merge_db[:, :, 1]),\n (RtC_merge_db[:, :, 1]) / (RtC_merge_db[:, :, 0] + RtC_merge_db[:, :, 1]),\n )\n RtC_merge_db = np.nan_to_num(RtC_merge_db)\n return RtC_merge_db\n\n\ndef get_cell_width_db(analytics_data):\n \"\"\"\n This method gives us the min, average and max of the width of cells distributed\n throughout the data distinguishable by row and columns in the table.\n\n ARGUMENTS:\n\n analytics_data: a list of iterable tables with relavent data\n\n RETURN:\n\n cell_width_db: a 5D numpy array having distributions of widths over cells in\n different tables. it keeps track of min, avg, max, count in particular table\n types.\n \"\"\"\n\n cell_width_db = np.zeros((max_rows, max_cols, max_rows, max_cols, 3), dtype=\"float\")\n for data_item in analytics_data:\n for cell_list in data_item[5]:\n cell_obj = cell_list[0]\n _row = int(data_item[0])\n _col = int(data_item[1])\n _curr_r = int(cell_obj.attrib[\"startRow\"])\n _curr_c = int(cell_obj.attrib[\"startCol\"])\n _width = data_item[8][_curr_c + 1] - data_item[8][_curr_c]\n\n # changing average\n cell_width_db[_row, _col, _curr_r, _curr_c, 0] += _width\n cell_width_db[_row, _col, _curr_r, _curr_c, 2] += 1\n cell_width_db[:, :, :, :, 0] /= cell_width_db[:, :, :, :, 2]\n for data_item in analytics_data:\n for cell_list in data_item[5]:\n cell_obj = cell_list[0]\n _row = int(data_item[0])\n _col = int(data_item[1])\n _curr_r = int(cell_obj.attrib[\"startRow\"])\n _curr_c = int(cell_obj.attrib[\"startCol\"])\n _width = data_item[8][_curr_c + 1] - data_item[8][_curr_c]\n\n # changing average\n mean = cell_width_db[_row, _col, _curr_r, _curr_c, 0]\n cell_width_db[_row, _col, _curr_r, _curr_c, 1] += (_width - mean) ** 2\n cell_width_db[:, :, :, :, 1] /= cell_width_db[:, :, :, :, 2]\n cell_width_db[:, :, :, :, 1] = np.sqrt(cell_width_db[:, :, :, :, 1])\n\n return cell_width_db\n\n\ndef get_cell_height_db(analytics_data):\n \"\"\"\n This method gives us the min, average and max of the height of cells distributed\n throughout the data distinguishable by row and columns in the table.\n\n ARGUMENTS:\n\n analytics_data: a list of iterable tables with relavent data\n\n RETURN:\n\n cell_height_db: a 5D numpy array having distributions of heights over cells in\n different tables.\n \"\"\"\n cell_height_db = np.zeros((max_rows, max_cols, max_rows, max_cols, 4), dtype=\"float\")\n for data_item in analytics_data:\n for cell_list in data_item[5]:\n cell_obj = cell_list[0]\n _row = int(data_item[0])\n _col = int(data_item[1])\n _curr_r = int(cell_obj.attrib[\"startRow\"])\n _curr_c = int(cell_obj.attrib[\"startCol\"])\n _height = float(cell_obj.attrib[\"y1\"]) - float(cell_obj.attrib[\"y0\"])\n\n # checking minimum\n _current_min = cell_height_db[_row, _col, _curr_r, _curr_c, 0]\n if _height < _current_min:\n cell_height_db[_row, _col, _curr_r, _curr_c, 0] = _height\n\n # checking maximum\n _current_max = cell_height_db[_row, _col, _curr_r, _curr_c, 2]\n if _height > _current_max:\n cell_height_db[_row, _col, _curr_r, _curr_c, 2] = _height\n\n # changing average\n _old_average = cell_height_db[_row, _col, _curr_r, _curr_c, 1]\n n1 = cell_height_db[_row, _col, _curr_r, _curr_c, 3]\n cell_height_db[_row, _col, _curr_r, _curr_c, 3] += 1\n n2 = cell_height_db[_row, _col, _curr_r, _curr_c, 3]\n _new_average = ((_old_average * n1) + _height) / n2\n cell_height_db[_row, _col, _curr_r, _curr_c, 1] = float(_new_average)\n return cell_height_db\n\n\ndef get_type_merge_db(analytics_data):\n \"\"\"\n This method gives us the merge distribution by type, meaning the ditribution\n is based on what probability a certain degree table contains a merging\n\n ARGUMENTS:\n\n analytics_data: a list of iterable tables with relavent dat\n\n RETURNS:\n\n row_column_merge_db: a 2D matrix containing the distribution\n \"\"\"\n typeMerge_db = np.zeros((max_rows, max_cols))\n total_merging_tables = 0\n for data_item in analytics_data:\n if int(len(data_item[3][0])) != 0 or int(len(data_item[3][1])) != 0:\n total_merging_tables += 1\n typeMerge_db[int(data_item[0]), int(data_item[1])] += 1\n if total_merging_tables != 0:\n typeMerge_db = np.true_divide(typeMerge_db, total_merging_tables)\n return typeMerge_db\n\n\ndef get_probability_of_merging(analytics_data):\n \"\"\"\n ARGUMENTS:\n\n analytics_data: a list of iterable tables with relavent data\n\n RETURNS:\n\n a simple probability of a merge happening in our given data set\n \"\"\"\n total_tables = len(analytics_data)\n total_merging_tables = 0\n for data_item in analytics_data:\n if int(len(data_item[3][0])) != 0 or int(len(data_item[3][1])) != 0:\n total_merging_tables += 1\n return total_merging_tables / total_tables\n\n\ndef get_mergingRows_db(analytics_data):\n \"\"\"\n This method gives us a distribution of merging rows in a 5D matrix\n\n 1. index identifies the rows of the table type\n\n 2. index identifies the columns of the table type\n\n 3. index identifies the merging start row\n\n 4. index identifies the merging end row\n\n 5. index identifies the column they are merging on\n\n ARGUMENTS:\n\n analytics_data: a list of iterable tables with relavent data\n\n RETURNS:\n\n row_merge_db: a 5D Matrix containing all the row merging data\n \"\"\"\n row_merge_db = np.zeros((max_rows, max_cols, max_rows, max_rows, max_cols))\n for data_item in analytics_data:\n for merge_item in data_item[3][0]:\n row_merge_db[\n int(data_item[0]),\n int(data_item[1]),\n int(merge_item[0]),\n int(merge_item[1]),\n int(merge_item[2]),\n ] += 1\n return row_merge_db\n\n\ndef get_mergingColumns_db(analytics_data):\n \"\"\"\n This method gives us a distribution of merging columns in a 3d matrix\n\n 1. index identifies the rows of the table type\n\n 2. index identifies the columns of the table type\n\n 3. index identifies the merging start column\n\n 4. index identifies the merging end column\n\n 5. index identifies the row they are merging on\n\n ARGUMENTS:\n\n analytics_data: a list of iterable tables with relavent data\n\n RETURNS:\n\n col_merge_db: a 5D Matrix containing all the column merging data\n\n \"\"\"\n col_merge_db = np.zeros((max_rows, max_cols, max_cols, max_cols, max_rows))\n for data_item in analytics_data:\n for merge_item in data_item[3][1]:\n col_merge_db[\n int(data_item[0]),\n int(data_item[1]),\n int(merge_item[0]),\n int(merge_item[1]),\n int(merge_item[2]),\n ] += 1\n return col_merge_db\n\n\ndef get_cell_to_word_distribution(analytics_data):\n \"\"\"\n ARGUMENTS:\n\n analytics_data: a list of iterable tables with relavent data\n\n RETURNS:\n\n cell_word_db: A 5D matrix containing probability of words per cell differentiated\n by types of tables that occur throughout the dataset\n \"\"\"\n cell_word_db = np.zeros((max_rows, max_cols, max_rows, max_cols, 70), dtype=\"float\")\n for data_item in analytics_data:\n for cell_item in data_item[5]:\n num_words = int(len(cell_item) - 1)\n word_bin = num_words // 3\n cell_word_db[\n int(data_item[0]),\n int(data_item[1]),\n int(cell_item[0].attrib[\"startRow\"]),\n int(cell_item[0].attrib[\"startCol\"]),\n min(word_bin, 69),\n ] += 1\n return cell_word_db\n\n\ndef read_probability_distribution(distribution_file_path):\n \"\"\"\n ARGUMENTS:\n\n distribution_file_path: path to the pickle file containing the distribution.\n\n RETURNS:\n\n db_tuple: distribution tuple containing the distribution data\n\n \"\"\"\n db_tuple = ut.read_distribution(distribution_file_path)\n return db_tuple\n\n\ndef get_probability_distribution(\n xml_dir,\n ocr_dir,\n img_dir,\n log_dir=os.path.join(os.getcwd(), \"error_logs\"),\n metadata_dir=os.path.join(os.getcwd(), \"metadata\"),\n dataset_name=\"default-icdar\",\n):\n \"\"\"\n This method gets us all the distribution matrices of the data\n\n ARGUMENTS:\n data_path_list: a list of iterable paths where the dataset is stored.\n\n RETURNS:\n tuple of all the numpy matrices containing all the distributions, in following order:\n\n row_column_db\n\n typeMerge_db\n\n prob_merge\n\n rowTocolumn_merge_db\n\n mergingRows_db\n\n mergingColumns_db\n\n cell_word_db\n\n cell_width_db\n\n cell_height_db\n\n lined_table_db\n \"\"\"\n # xml is used as the final file list\n file_list = glob.glob(os.path.join(xml_dir, \"*.xml\"))\n\n files_notfound = []\n analytics_data = []\n for _file in tqdm(file_list):\n ocr_file_name = _file.split(\"/\")[-1][:-3] + \"pkl\"\n img_file_name = _file.split(\"/\")[-1][:-3] + \"png\"\n ocr_file_path = os.path.join(ocr_dir, ocr_file_name)\n img_file_path = os.path.join(img_dir, img_file_name)\n if os.path.exists(ocr_file_path) and os.path.exists(img_file_path):\n analytics_data += localize_data(_file, ocr_file_path, img_file_path)\n else:\n if not os.path.exists(ocr_file_path) and not os.path.exists(img_file_path):\n files_notfound.append([ocr_file_path, img_file_path])\n elif not os.path.exists(ocr_file_path):\n files_notfound.append([ocr_file_path])\n else:\n files_notfound.append([img_file_path])\n ut.log_errors(files_notfound, log_dir)\n\n row_column_db = get_row_column_distribution(analytics_data)\n typeMerge_db = get_type_merge_db(analytics_data)\n prob_merge = get_probability_of_merging(analytics_data)\n rowTocolumn_merge_db = get_RowToColumn_merge_db(analytics_data)\n mergingRows_db = get_mergingRows_db(analytics_data)\n mergingColumns_db = get_mergingColumns_db(analytics_data)\n cell_word_db = get_cell_to_word_distribution(analytics_data)\n cell_width_db = get_cell_width_db(analytics_data)\n cell_height_db = get_cell_height_db(analytics_data)\n lined_table_db = get_lined_table_distribution(analytics_data)\n\n distribution_tuple = (\n row_column_db,\n typeMerge_db,\n prob_merge,\n rowTocolumn_merge_db,\n mergingRows_db,\n mergingColumns_db,\n cell_word_db,\n cell_width_db,\n cell_height_db,\n lined_table_db,\n )\n\n ut.write_distributions(distribution_tuple, metadata_dir, dataset_name)\n\n return distribution_tuple\n"
] | [
[
"numpy.true_divide",
"numpy.sqrt",
"numpy.nan_to_num",
"numpy.ones",
"numpy.copy",
"numpy.zeros",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
steelONIONknight/bolt | [
"9bd3d08f2abb14435ca3ad0179889e48fa7e9b47",
"9bd3d08f2abb14435ca3ad0179889e48fa7e9b47",
"9bd3d08f2abb14435ca3ad0179889e48fa7e9b47",
"9bd3d08f2abb14435ca3ad0179889e48fa7e9b47",
"9bd3d08f2abb14435ca3ad0179889e48fa7e9b47"
] | [
"training/src/tests/tests/python/depthwise_conv1d.py",
"training/src/tests/tests/python/RMSprop.py",
"training/src/tests/tests/python/linear.py",
"training/src/tests/tests/python/gru_cell.py",
"training/src/tests/tests/python/cnnDepthwise.py"
] | [
"# Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved.\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\nimport torch\n\n# 1D Depthwise with dilation\ntorch.manual_seed(1)\ntorch.set_printoptions(precision=10)\nm = torch.nn.Conv1d(4, 4, 3, stride=2, padding=1, dilation=2, bias=False, groups=4)\nprint(\"Weights:\", m.weight)\ninput = torch.randn(2, 4, 3, requires_grad=True)\n\nprint(\"Input: \", input)\nresult = m(input)\nprint(\"Result: \", result)\nresult.sum().backward()\nprint(\"Gradient for input: \", input.grad)\nprint(\"Gradient for weights: \", m.weight.grad)\n",
"# Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport torch\nfrom torch.optim import rmsprop\n\n# Test 1\ntorch.manual_seed(0)\ntorch.set_printoptions(precision=6)\nparam = torch.rand(1, 2, 3, 4)\nparam.grad = torch.rand(1, 2, 3, 4)\nprint(\"Parameter: \", param)\nprint(\"Gradeient: \", param.grad)\n# First step\nopt = rmsprop.RMSprop(\n [param], lr=0.1, alpha=0.9, eps=0.1, weight_decay=0.1, momentum=0.1, centered=True\n)\nopt.step()\nprint(\"Parameter (after first step): \", param)\n# Second step\nopt.step()\nprint(\"Parameter (after second step): \", param)\n\n# Test 2\nparam = torch.rand(1, 2, 3, 4)\nparam.grad = torch.rand(1, 2, 3, 4)\nprint(\"Parameter: \", param)\nprint(\"Gradeient: \", param.grad)\n\n# First step\nopt = rmsprop.RMSprop(\n [param], lr=0.1, alpha=0.9, eps=0.1, weight_decay=0.1, momentum=0.1, centered=False\n)\nopt.step()\nprint(\"Parameter (after first step): \", param)\n# Second step\nopt.step()\nprint(\"Parameter (after second step): \", param)\n\n# Test 3\nparam = torch.rand(1, 2, 3, 4)\nparam.grad = torch.rand(1, 2, 3, 4)\nprint(\"Parameter: \", param)\nprint(\"Gradeient: \", param.grad)\n\n# First step\nopt = rmsprop.RMSprop(\n [param], lr=0.1, alpha=0.9, eps=0.1, weight_decay=0.1, momentum=0.0, centered=False\n)\nopt.step()\nprint(\"Parameter (after first step): \", param)\n# Second step\nopt.step()\nprint(\"Parameter (after second step): \", param)\n",
"# Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved.\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport torchvision\nimport math\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nx = torch.tensor(\n [\n [[[1.0, 1.0, 2.0, 0.0, 5.0], [-1.0, 2.0, 2.0, 0.0, 5.0]]],\n [[[-1.0, 4.0, 1.0, 2.0, 1], [-3.0, 4.0, 5.0, 2.0, 1]]],\n ],\n requires_grad=True,\n)\n\nl = nn.Linear(5, 3)\nl.weight.data = torch.tensor(\n [\n [-0.2381, 0.1714, -0.0612, -0.1329, -0.3701],\n [0.0283, -0.2147, -0.0502, 0.2090, 0.4333],\n [-0.1200, 0.1664, -0.3021, -0.2250, 0.3329],\n ],\n requires_grad=True,\n)\nl.bias.data = torch.tensor([0.3548, 0.2879, 0.0343], requires_grad=True)\nprint(l.weight)\nprint(l.bias)\n\nres = l(x)\nres.backward(\n torch.tensor([[[[1.0, 2.0, -1], [0.5, 1, 1.0]]], [[[0.5, 6.0, 1], [2, -1.0, 1.5]]]])\n)\n\nprint(\"res\", res)\nprint(\"x.grad\", x.grad)\nprint(\"weight.grad\", l.weight.grad)\nprint(\"bias.grad\", l.bias.grad)\n",
"# Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved.\n\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\nimport torch\n\ntorch.manual_seed(0)\ntorch.set_printoptions(precision=8)\n\n# Ones hidden unit\nrnn = torch.nn.GRU(9, 5, 1, batch_first=True)\ninput = torch.randn(3, 1, 9, requires_grad=True)\nh0 = torch.zeros([1, 3, 5], requires_grad=True)\nrnn.weight_ih_l0 = torch.nn.Parameter(torch.ones([15, 9]))\nrnn.weight_hh_l0 = torch.nn.Parameter(torch.ones([15, 5]))\nrnn.bias_ih_l0 = torch.nn.Parameter(torch.ones([15]))\nrnn.bias_hh_l0 = torch.nn.Parameter(torch.ones([15]))\nprint(\"Input: \", input)\nprint(\"Hidden: \", h0)\n\n## Forward\noutput, hn = rnn(input, h0)\nprint(\"New hidden: \", hn)\n\n## Backward\nhn.sum().backward()\nprint(\"Input gradient: \", input.grad)\nprint(\"Hidden gradient: \", h0.grad)\n\n# Ones hidden unit\nrnn = torch.nn.GRU(4, 3, 1, batch_first=True)\ninput = torch.randn(2, 1, 4, requires_grad=True)\nh0 = torch.randn(1, 2, 3, requires_grad=True)\nrnn.weight_ih_l0 = torch.nn.Parameter(torch.ones([9, 4]))\nrnn.weight_hh_l0 = torch.nn.Parameter(torch.ones([9, 3]))\nrnn.bias_ih_l0 = torch.nn.Parameter(torch.ones([9]))\nrnn.bias_hh_l0 = torch.nn.Parameter(torch.ones([9]))\nprint(\"Input: \", input)\nprint(\"Hidden: \", h0)\n\n## Forward\noutput, hn = rnn(input, h0)\nprint(\"New hidden: \", hn)\n\n## Backward\nhn.sum().backward()\nprint(\"Input gradient: \", input.grad)\nprint(\"Hidden gradient: \", h0.grad)\n\n# Random weights unit\nrnn = torch.nn.GRU(7, 4, 1, batch_first=True)\ninput = torch.randn(3, 1, 7, requires_grad=True)\nh0 = torch.randn(1, 3, 4, requires_grad=True)\nprint(\"IH weights: \", rnn.weight_ih_l0)\nprint(\"HH weights: \", rnn.weight_hh_l0)\nprint(\"IH bias: \", rnn.bias_ih_l0)\nprint(\"HH bias: \", rnn.bias_hh_l0)\nprint(\"Input: \", input)\nprint(\"Hidden: \", h0)\n\n## Forward\noutput, hn = rnn(input, h0)\nprint(\"New hidden: \", hn)\n\n## Backward\nhn.sum().backward()\nprint(\"Input gradient: \", input.grad)\nprint(\"Hidden gradient: \", h0.grad)\nprint(\"IH weights gradient: \", rnn.weight_ih_l0.grad)\nprint(\"HH weights gradient: \", rnn.weight_hh_l0.grad)\nprint(\"IH biases gradient: \", rnn.bias_ih_l0.grad)\nprint(\"HH biases gradient: \", rnn.bias_hh_l0.grad)\n",
"# Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved.\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nimport os\nimport time\n\nnum_classes = 10\nbatch_size = 50\nlearning_rate = 0.1\n\ncurdir = \"./weights/\"\n\n\nclass Softmax(nn.Module):\n def forward(self, input):\n exp_x = torch.exp(input)\n y = exp_x / exp_x.sum(1).unsqueeze(1).expand_as(exp_x)\n return y\n\n\nclass NeuralNet(nn.Module):\n def __init__(self, num_classes):\n super(NeuralNet, self).__init__()\n\n torch.manual_seed(0)\n\n self.conv1_depthwise = nn.Conv2d(1, 1, kernel_size=3, stride=2, padding=1)\n self.conv1_pointwise = nn.Conv2d(1, 4, kernel_size=1)\n self.conv2_depthwise = nn.Conv2d(\n 4, 4, kernel_size=3, stride=2, padding=1, groups=4\n )\n self.conv2_pointwise = nn.Conv2d(4, 8, kernel_size=1)\n self.conv3_depthwise = nn.Conv2d(\n 8, 8, kernel_size=3, stride=2, padding=1, groups=8\n )\n self.conv3_pointwise = nn.Conv2d(8, 16, kernel_size=1)\n self.conv4_depthwise = nn.Conv2d(\n 16, 16, kernel_size=3, stride=4, padding=0, groups=16\n )\n self.conv4_pointwise = nn.Conv2d(16, 10, kernel_size=1)\n self.softmax = Softmax()\n\n nn.init.xavier_uniform_(self.conv1_depthwise.weight)\n nn.init.xavier_uniform_(self.conv1_pointwise.weight)\n nn.init.xavier_uniform_(self.conv2_depthwise.weight)\n nn.init.xavier_uniform_(self.conv2_pointwise.weight)\n nn.init.xavier_uniform_(self.conv3_depthwise.weight)\n nn.init.xavier_uniform_(self.conv3_pointwise.weight)\n nn.init.xavier_uniform_(self.conv4_depthwise.weight)\n nn.init.xavier_uniform_(self.conv4_pointwise.weight)\n\n nn.init.zeros_(self.conv1_depthwise.bias)\n nn.init.zeros_(self.conv1_pointwise.bias)\n nn.init.zeros_(self.conv2_depthwise.bias)\n nn.init.zeros_(self.conv2_pointwise.bias)\n nn.init.zeros_(self.conv3_depthwise.bias)\n nn.init.zeros_(self.conv3_pointwise.bias)\n nn.init.zeros_(self.conv4_depthwise.bias)\n nn.init.zeros_(self.conv4_pointwise.bias)\n\n def forward(self, x):\n out = self.conv1_depthwise(x)\n out = self.conv1_pointwise(out)\n out = self.conv2_depthwise(out)\n out = self.conv2_pointwise(out)\n out = self.conv3_depthwise(out)\n out = self.conv3_pointwise(out)\n out = self.conv4_depthwise(out)\n out = self.conv4_pointwise(out)\n out = out.reshape(-1, num_classes)\n out = self.softmax(out)\n return out\n\n\ndef predict(test_loader, model):\n correct = 0\n total = 0\n # ~ with torch.no_grad():\n for images, labels in test_loader:\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n print(\n \"Accuracy of the network on the 10000 test images: {:.2f} %\".format(\n 100 * correct / total\n )\n )\n\n\ndef saveWeights(index, model):\n\n if not os.path.exists(curdir):\n os.mkdir(curdir)\n\n for name, param in model.named_parameters():\n if param.requires_grad:\n if param.data.dim() == 4:\n for i in range(0, param.data.shape[0]):\n with open(\n curdir + str(index) + \"_\" + name + \"_\" + str(i) + \".txt\", \"w\"\n ) as outfile:\n for j in range(0, param.data.shape[1]):\n np.savetxt(outfile, param.data[i, j])\n else:\n with open(curdir + str(index) + \"_\" + name + \".txt\", \"w\") as outfile:\n np.savetxt(outfile, np.transpose(param.data))\n\n\ndef CrossEntropy(y, target):\n ones = torch.sparse.torch.eye(num_classes)\n t = ones.index_select(0, target).type(y.data.type())\n t = Variable(t)\n loss = (-t * torch.log(y)).sum() / y.size(0)\n return loss, y\n\n\ndef main():\n\n train_dataset = torchvision.datasets.MNIST(\n root=\"./data/mnist\", train=True, transform=transforms.ToTensor(), download=True\n )\n\n test_dataset = torchvision.datasets.MNIST(\n root=\"./data/mnist\", train=False, transform=transforms.ToTensor()\n )\n\n train_loader = torch.utils.data.DataLoader(\n dataset=train_dataset, batch_size=batch_size, shuffle=False\n )\n\n test_loader = torch.utils.data.DataLoader(\n dataset=test_dataset, batch_size=batch_size, shuffle=False\n )\n\n model = NeuralNet(num_classes)\n\n predict(test_loader, model)\n\n optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)\n\n total_step = len(train_loader)\n\n if os.path.exists(curdir + \"loss.txt\"):\n os.remove(curdir + \"loss.txt\")\n\n timeTaken = 0\n\n for i, (images, labels) in enumerate(train_loader):\n\n start = time.time()\n\n outputs = model(images)\n # if i < 1:\n # saveWeights(i, model)\n loss, lossInput = CrossEntropy(outputs, labels)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n timeTaken += time.time() - start\n\n # if i % 100 == 0:\n # with open(curdir + 'loss.txt', 'a') as outfile:\n # print(loss.item(), file = outfile)\n\n if i % 100 == 0:\n print(\"Step [{:4d}/{}], Loss: {:.6f}\".format(i, total_step, loss.item()))\n\n predict(test_loader, model)\n\n print(\"Time taken = {:.4f}\".format(timeTaken))\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.set_printoptions",
"torch.manual_seed",
"torch.randn",
"torch.nn.Conv1d"
],
[
"torch.set_printoptions",
"torch.manual_seed",
"torch.optim.rmsprop.RMSprop",
"torch.rand"
],
[
"torch.nn.Linear",
"torch.tensor"
],
[
"torch.ones",
"torch.zeros",
"torch.set_printoptions",
"torch.manual_seed",
"torch.nn.GRU",
"torch.randn"
],
[
"torch.max",
"torch.manual_seed",
"torch.nn.Conv2d",
"torch.utils.data.DataLoader",
"numpy.transpose",
"torch.exp",
"torch.sparse.torch.eye",
"torch.log",
"torch.nn.init.xavier_uniform_",
"numpy.savetxt",
"torch.nn.init.zeros_",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yma042/verif | [
"506ca3ca9713fdc01dee306edee6a5e21e6d4deb"
] | [
"verif/tests/integration_test.py"
] | [
"import unittest\nimport verif.driver\nimport os\nimport numpy as np\nimport tempfile\nnp.seterr('raise')\n\n\nall_axes = [\"time\", \"leadtime\", \"timeofday\", \"dayofyear\", \"monthofyear\", \"day\", \"week\", \"month\", \"year\", \"leadtimeday\", \"location\", \"lat\", \"lon\", \"elev\"]\n\n\nclass IntegrationTest(unittest.TestCase):\n \"\"\"\n These tests run verif on the command-line, but do not test the validity of the\n output graphics, only that they do or do not create errors.\n \"\"\"\n\n @staticmethod\n def run_command(command):\n \"\"\" Runs a verif command line \"\"\"\n argv = command.split()\n verif.driver.run(argv)\n\n @staticmethod\n def remove(file):\n \"\"\" Removes a file \"\"\"\n os.remove(file)\n\n @staticmethod\n def file_size(filename):\n \"\"\" Returns the number of bytes of a file \"\"\"\n statinfo = os.stat(filename)\n return statinfo.st_size\n\n @staticmethod\n def is_valid_file(filename, min_size=3000):\n \"\"\" Checks if a file is larger in size than min_size bytes \"\"\"\n return IntegrationTest.file_size(filename) > min_size\n\n def run_with_image(self, command):\n \"\"\"\n Runs the verif command and appends -f <somefile>.png so that it will write output\n to a temporary png file. Removes the file afterwards.\n \"\"\"\n fd, imageFile = tempfile.mkstemp(suffix=\".png\")\n command = command + \" -f \" + imageFile\n self.run_command(command)\n self.assertTrue(self.is_valid_file(imageFile), 3000)\n os.close(fd)\n self.remove(imageFile)\n\n def run_with_text(self, command):\n \"\"\"\n Runs the verif command and appends -f <somefile>.txt so that it will write output\n to a temporary txt file. Removes the file afterwards.\n \"\"\"\n fd, textFile = tempfile.mkstemp(suffix=\".txt\")\n command = command + \" -f \" + textFile\n self.run_command(command)\n self.assertTrue(self.is_valid_file(textFile, 10))\n os.close(fd)\n self.remove(textFile)\n\n def test_valid(self):\n self.run_command(\"verif\")\n self.run_command(\"verif --version\")\n self.run_command(\"verif examples/raw.txt examples/kf.txt --list-thresholds\")\n self.run_command(\"verif examples/raw.txt examples/kf.txt --list-quantiles\")\n self.run_command(\"verif examples/raw.txt examples/kf.txt --list-times\")\n self.run_command(\"verif examples/raw.txt examples/kf.txt --list-dates\")\n self.run_command(\"verif examples/raw.txt examples/kf.txt --list-thresholds --list-quantiles --list-times\")\n\n def test_invalid(self):\n with self.assertRaises(SystemExit):\n self.run_with_image(\"verif --list-thresholds\")\n with self.assertRaises(SystemExit):\n self.run_with_image(\"verif --list-quantiles\")\n with self.assertRaises(SystemExit):\n self.run_with_image(\"verif --list-times\")\n\n def test_README(self):\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m ets\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m taylor\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m error\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m reliability -r 0\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m pithist\")\n\n def test_option_b(self):\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m ets -b below\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m ets -b within\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m ets -b above\")\n\n def test_option_c(self):\n self.run_with_image(\"verif examples/raw.txt -c examples/kf.txt -m ets\")\n\n def test_option_leg(self):\n self.run_with_image(\"verif -leg 1,2 examples/raw.txt examples/kf.txt -m ets\")\n self.run_with_image(\"verif -leg 1,2 examples/raw.txt examples/kf.txt -m ets -x no\")\n self.run_with_image(\"verif -leg 1dqwoijdioqwjdoiqjwdoijiqow,2dqwoijdioqwjdoiqjwdoijiqow examples/raw.txt examples/kf.txt -m ets\")\n with self.assertRaises(SystemExit):\n self.run_with_image(\"verif -leg 1 examples/raw.txt examples/kf.txt -m ets\")\n\n def test_option_ct(self):\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -agg min\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -agg mean\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -agg median\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -agg max\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -agg std\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -agg range\")\n\n def test_standard_option_x(self):\n for axis in all_axes:\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -x %s\" % axis)\n\n def test_obsfcst_option_x(self):\n for axis in all_axes:\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m obsfcst -x %s\" % axis)\n\n def test_pithist(self):\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m pithistdev\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m pithistslope\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m pithistshape\")\n\n def test_obs_subset(self):\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -r 10\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -x threshold\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -r 0,2,10 -x threshold -b within\")\n\n def test_annotate(self):\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -a\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -a -x location\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -a -type map\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m obsfcst -a\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m obsfcst -a -x location\")\n\n def test_plotting_options(self):\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -aspect 0.1\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -aspect 2.1\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -bottom 0.1\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -bottom 0.5\")\n # -clim and -cmap are tested with -type map\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -dpi 50\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -dpi 300\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -fs 10,2\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -labfs 0\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -labfs 11\")\n # -lc tests are in separate functions\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -left 0.8\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -legfs 0\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -legfs 10\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -legloc right\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -legloc lower_left\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -ls - -ma ,o\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -ls None -ma *\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -ls -,-, -ma ,s,:\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -lw 0\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -lw 1\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -lw 1.3\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -lw 2\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -ms 0\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -ms 1\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -ms 1.3\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -ms 2\")\n # For some reason this fails without -left 0.1, although it works fine when verif is\n # invoked on the command line:\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -left 0.1 -right 0.8\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -tickfs 0\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -tickfs 10\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -title title\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -titlefs 0\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -titlefs 10\")\n # Same as for -right above\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -bottom 0.1 -top 0.4\")\n # -type is tested separately\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -xlabel test\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -xlim 0,1\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -xrot 90\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -xticks 0:4\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -xticks 0:4 -xticklabels 0,test,1\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -xticklabels 0,test,1\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -xticklabels ''\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -ylabel test\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -ylim 0,1\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -yrot 90\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -yticks 0:4\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -yticks 0:4 -yticklabels 0,test,1\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -yticklabels 0,test,1\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -yticklabels ''\")\n\n def test_against(self):\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m against\")\n # Ensure at least 3 files to test the subplots\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt examples/raw.txt -m against\")\n\n def test_impact(self):\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -type impact\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m corr -type impact -ms 6 -r 0:0.1:1\")\n\n def test_mapimpact(self):\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -type mapimpact -legfs 0\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m ets -type mapimpact -r 1 -legfs 0\")\n\n def test_fss(self):\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m fss -r 5\")\n\n def test_taylor(self):\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m taylor -xlim 0,2\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m taylor -xlim 0,0.2\")\n\n def test_roc(self):\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m droc -r 0\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m droc -r 0 -simple\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m droc -r 0 -xlog\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m droc -r 0 -ylog\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m droc -r 0 -xlog -ylog\")\n\n def test_obsleg(self):\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m obsfcst -obsleg Test -leg 1,2\")\n\n def test_discrimination(self):\n self.run_with_image(\"verif examples/raw.txt -m discrimination -r 0\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m discrimination -r 0\")\n\n def test_auto(self):\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m autocorr\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m autocov\")\n\n def test_autocorr(self):\n self.run_with_image(\"verif examples/raw.txt -m autocorr\")\n self.run_with_image(\"verif examples/raw.txt -m autocorr -x location\")\n self.run_with_image(\"verif examples/raw.txt -m autocorr -x lat\")\n self.run_with_image(\"verif examples/raw.txt -m autocorr -x lon\")\n self.run_with_image(\"verif examples/raw.txt -m autocorr -x elev\")\n self.run_with_image(\"verif examples/raw.txt -m autocorr -x time\")\n self.run_with_image(\"verif examples/raw.txt -m autocorr -x leadtime\")\n self.run_with_image(\"verif examples/raw.txt -m autocorr -r 0:100:1000\")\n self.run_with_image(\"verif examples/raw.txt -m autocorr -r 0:100:1000 -xlim 0,100\")\n\n def test_config(self):\n self.run_with_image(\"verif examples/raw.txt --config verif/tests/files/config1.txt\")\n self.run_with_image(\"verif examples/raw.txt -m mae --config verif/tests/files/configEmpty.txt\")\n\n def test_other_fields(self):\n self.run_with_image(\"verif verif/tests/files/file1_crps.txt -m crps\")\n self.run_with_image(\"verif verif/tests/files/file1_crps.txt -m crps -x time\")\n self.run_with_image(\"verif verif/tests/files/file1_crps.txt -m crps -agg median\")\n\n def test_map_type(self):\n pass\n\n def test_type(self):\n self.run_with_text(\"verif examples/raw.txt examples/kf.txt -m mae -type text\")\n self.run_with_text(\"verif examples/raw.txt examples/kf.txt -m mae -type csv\")\n # These cause a FutureWarning in mpl, but not much we can do about that\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -type map -clim 0,11\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -type map -cmap RdBu\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -type map\")\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -type maprank\")\n\n def test_freq(self):\n self.run_with_image(\"verif verif/tests/files/file1.txt -m freq\")\n # Check that no error occurs, even though fcst or obs is not available\n self.run_with_image(\"verif verif/tests/files/file1_no_obs.txt -m freq\")\n self.run_with_image(\"verif verif/tests/files/file1_no_fcst.txt -m freq\")\n\n def test_option_lc(self):\n for lc in (\"g,r\", \"g\", \"g,r,b\", \"0,0.5,0.9\", \"[0,0,1],0.5,g\"):\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m mae -lc %s\" % lc)\n\n def test_boolean_options(self):\n for opt in (\"acc\", \"nogrid\", \"nomargin\", \"hist\", \"sort\", \"sp\", \"simple\", \"xlog\", \"ylog\"):\n self.run_with_image(\"verif examples/raw.txt examples/kf.txt -m obs -%s\" % opt)\n\n def test_invalidMetric(self):\n with self.assertRaises(SystemExit):\n self.run_with_image(\"verif examples/T_raw_0.nc -m maeq\")\n\n def test_invalidFile(self):\n with self.assertRaises(SystemExit):\n self.run_with_image(\"verif examples/T_raw_1.nc -m mae\")\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.seterr"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dpwe/calc_sbpca | [
"6a0a3631e7c7c647fc826596cfd96eeafd643a19"
] | [
"python/test_SAcC.py"
] | [
"#!/usr/bin/env python\n\"\"\"\ntest_SAcC.py\ntest harness for SAcC.py\n\"\"\"\n\nimport SAcC\nimport numpy as np\nconfig = {}\n\n# Input/Output files\nfilename = 'input8k.wav'\n#filename = '60sec_37497_20120228_020100_24537_appen-cts-pus_F.wav'\noutfilename = 'sacc8k.txt'\n#outfilename = '60sec_37497_20120228_020100_24537_appen-cts-pus_F.txt'\n#outsphname = 'sacc8k.sph'\n#outmatname = 'sacc8k.mat'\n\n# Add config parameters to dictionary\n# diff file for py\nconfig['pca_file'] = 'aux/mapping-pca_sr8k_bpo6_sb24_k10.mat' \n#config['wgt_file'] = 'aux/rats_sr8k_bpo6_sb24_k10_aCH_h100.wgt'\n#config['norms_file'] = 'aux/tr_rats_sr8k_bpo6_sb24_k10.norms'\nconfig['wgt_file'] = 'aux/sub_qtr_rats_keele_sr8k_bpo6_sb24_k10_ep5_h100.wgt'\nconfig['norms_file'] = 'aux/tr_keele_rbf_pinknoise_sr8000_bpo6_nchs24_k10.norms'\nconfig['pcf_file'] = 'aux/pitch_candidates_freqz.txt'\n#config['kdim'] = 10 # not used - inferred from pca file\nconfig['nchs'] = 24\nconfig['n_s'] = 1.0 # secs per process block, controls blockframes\nconfig['SBF_sr'] = 8000\nconfig['SBF_fmin'] = 100\nconfig['SBF_bpo'] = 6\nconfig['SBF_q'] = 8 # not actually used for SlanPat ERB filters\nconfig['SBF_order'] = 2 # not actually used for SlanPat ERB filters\n#config['SBF_ftype'] = 2 # not used in Python - only supported type\n#config['force_mono'] = 1 # stereo downmixing not supported\nconfig['twin'] = 0.025 # New option in Python - autoco window len\nconfig['thop'] = 0.010 # New option in Python - autoco hop\n#config['ac_maxlags'] = 200 # not used - inferred from mapping file\n#config['hid'] = 100 # not used - inferred from wgt file\n#config['npcf'] = 67 # not used - inferred from wgt file\n#config['nmlp'] = 68 # not used - inferred from wgt file\nconfig['hmm_vp'] = 0.9 # interpretation changed c/w Matlab\nconfig['write_rownum'] = 0 # prepend row number\nconfig['write_time'] = 1 # prepend time in seconds to output\nconfig['write_sbac'] = 0 # output raw autocorrelations (big - 24 x 200)\nconfig['write_sbpca'] = 0 # output subband pcas (24 x 10)\nconfig['write_posteriors'] = 0 # output raw pitch posteriors (68)\nconfig['write_pitch'] = 1 # output the actual pitch value in Hz (1)\nconfig['write_pvx'] = 1 # output just 1-posterior(unvoiced) (1)\n# Tricks with segmenting utterances not implemented in Python\n#config['start_utt'] = 0 # what utterance number to start at\n#config['incr_utt'] = 0 # increment the utterance each seg (?)\n#config['segs_per_utt'] = 1 # break each utterance into this many segs\nconfig['verbose'] = 0\n\n\nimport array as ARRAY\nimport ctypes\n\ndef write_features_in_sph(filename, data, feature_names=\"SAcC\"):\n num_frames, num_elements = np.shape(data)\n shead = \"NIST_1A\\n 1024\\nfile_type -s11 featurefile\\n\"\n shead += \"feature_names -s\" + str(len(feature_names)) \n shead += \" \" + feature_names + \"\\n\"\n shead += \"num_elements -i \" + str(int(num_elements)) + \"\\n\"\n shead += \"num_frames -i \" + str(int(num_frames)) + \"\\n\"\n shead += \"sample_count -i \" + str(int(num_elements * num_frames)) + \"\\n\"\n shead += \"end_head\\n\"\n shead += (1024 - len(shead)) * \" \"\n f = open(filename, 'wb')\n buf = ctypes.create_string_buffer(shead, 1024)\n f.write(buf)\n v = ARRAY.array('f')\n v.fromlist(list(data.ravel()))\n v.byteswap()\n v.tofile(f)\n f.close()\n\n# Construct the extractor then extract the features for [filename]\nextractor = SAcC.SAcC(config)\nfeatures = extractor(filename)\n\n# Save features in ascii format\nnp.savetxt(outfilename, features, fmt='%f', delimiter=' ', newline='\\n')\n#write_features_in_sph(outsphname, features)\n#scipy.io.savemat(outmatname, {'features': features})\n"
] | [
[
"numpy.savetxt",
"numpy.shape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GiulioCMSanto/HDSIdent | [
"88d3b5a7d1d99b5c90f2114a939b83294c003d25",
"88d3b5a7d1d99b5c90f2114a939b83294c003d25"
] | [
"HDSIdent/initial_intervals/exponentially_weighted.py",
"HDSIdent/model_structures/model_structures.py"
] | [
"import math\nimport pandas as pd\nimport numpy as np\nfrom collections import defaultdict\nfrom joblib import Parallel, delayed\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nsns.set_style(\"darkgrid\")\n\n\nclass ExponentiallyWeighted(object):\n \"\"\"\n Exponentially Weighted Moving Average Control Chart.\n Implements a recursive moving average filter, detecting\n change-points and its corresponding intervals.\n\n Arguments:\n forgetting_fact_v: exponential forgetting factor for the variance;\n forgetting_fact_u: exponential forgetting factor for the average;\n sigma: data (population) standard deviation. Estimated if not provided;\n H_u: change-point threshold for the mean;\n H_v: change-point threshold for the variance;\n min_input_coupling: the min number of inputs that must satisfy the method criteria (thresholds);\n min_output_coupling: the min number of outputs that must satisfy the method criteria (thresholds);\n num_previous_indexes: number of indexes to anticipate the beginning of an interval;\n split_size: the maximum interval length (lengthier intervals are split);\n min_interval_length: the minimum length an interval must have;\n n_jobs: the number of threads as in joblib library;\n verbose: the degree of verbosity (going from 0 to 10).\n\n -------------------------------- REFERENCES --------------------------------------\n This method here implemented is proposed in the following works:\n PERETZKI, D. et al. Data mining of historic data for process identification.\n In: Proceedings of the 2011 AIChE Annual Meeting, p. 1027–1033, 2011.\n\n BITTENCOURT, A. C. et al. An algorithm for finding process identification\n intervals from normal operating data. Processes, v. 3, p. 357–383, 2015.\n\n Ideas of the following work were also considered:\n WANG, J. et al. Searching historical data segments for process\n identification in feedback control loops. Computers and Chemical\n Engineering, v. 112, n. 6, p. 6–16, 2018.\n \"\"\"\n\n def __init__(\n self,\n forgetting_fact_v,\n forgetting_fact_u,\n sigma=None,\n H_u=None,\n H_v=None,\n min_input_coupling=1,\n min_output_coupling=1,\n num_previous_indexes=0,\n min_interval_length=None,\n split_size=None,\n n_jobs=-1,\n verbose=0,\n ):\n\n self.forgetting_fact_v = forgetting_fact_v\n self.forgetting_fact_u = forgetting_fact_u\n self.sigma = sigma\n self.H_u = H_u\n self.H_v = H_v\n self.min_input_coupling = min_input_coupling\n self.min_output_coupling = min_output_coupling\n self.num_previous_indexes = num_previous_indexes\n self.min_interval_length = min_interval_length\n self.split_size = split_size\n self.n_jobs = n_jobs\n self.verbose = verbose\n\n def _verify_data(self, X, y):\n \"\"\"\n Verifies the data type and save data columns\n in case they are provided.\n\n Arguments:\n X: the input data in pandas dataframe format or numpy array\n y: the output data in pandas dataframe format or numpy array\n\n Output:\n X: the input data in numpy array format\n y: the input data in numpy array format\n X_cols: the input data columns in case they are provided\n y_cols: the output data columns in case they are provided\n \"\"\"\n if type(X) == pd.core.frame.DataFrame:\n X_cols = X.columns\n X = X.values\n if X.ndim == 1:\n X = X.reshape(-1, 1)\n elif type(X) == np.ndarray:\n X_cols = None\n if X.ndim == 1:\n X = X.reshape(-1, 1)\n else:\n raise Exception(\"Input data must be a pandas dataframe or a numpy array\")\n\n if type(y) == pd.core.frame.DataFrame:\n y_cols = y.columns\n y = y.values\n if y.ndim == 1:\n y = y.reshape(-1, 1)\n elif type(y) == np.ndarray:\n y_cols = None\n if y.ndim == 1:\n y = y.reshape(-1, 1)\n else:\n raise Exception(\"Input data must be a pandas dataframe or a numpy array\")\n\n return X, y, X_cols, y_cols\n\n def _initialize_internal_variables(self, X):\n \"\"\"\n This function initializes the interval variables.\n \"\"\"\n self.unified_intervals = defaultdict(list)\n self.intervals = defaultdict(list)\n self._mu_k_arr = None\n self._v_k_arr = None\n self._mu_k = np.array([])\n self._v_k = np.array([])\n self._is_interval = [False] * X.shape[1]\n self._init_idx = [0] * X.shape[1]\n self._final_idx = [0] * X.shape[1]\n self._criteria = None\n\n self._mu_k_1 = np.mean(X[:100, :], axis=0)\n self._v_k_1 = np.var(X[:100, :], axis=0)\n\n if self.sigma is None:\n self.sigma = np.std(X, axis=0)\n\n if self.H_u is None:\n self.H_u = 5 * self.sigma\n\n if self.H_v is None:\n self.H_v = 5 * self.sigma\n\n if type(self.H_u) == list:\n self.H_u = np.array(self.H_u)\n if type(self.H_v) == list:\n self.H_u = np.array(self.H_u)\n\n def _exponential_moving_average_and_variance(self, X, idx):\n \"\"\"\n Performs a recursive exponential moving average/variance\n algorithm from past samples.\n\n Arguments:\n X: the input discrete-time data\n idx: the input data sample index\n\n Output:\n self._mu_k: the sample average filtered data for the given index\n self._v_k: the sample variance filtered data for the given index\n \"\"\"\n\n self._mu_k = (\n self.forgetting_fact_u * X[idx, :]\n + (1 - self.forgetting_fact_u) * self._mu_k_1\n )\n self._v_k = ((2 - self.forgetting_fact_u) / 2) * (\n self.forgetting_fact_v * (X[idx, :] - self._mu_k) ** 2\n + (1 - self.forgetting_fact_v) * self._v_k_1\n )\n\n self._mu_k_1 = self._mu_k\n self._v_k_1 = self._v_k\n\n return (self._mu_k, self._v_k)\n\n def _search_for_change_points(self, X, idx, col, criteria):\n \"\"\"\n Searchs for change points in the filtered data.\n\n Arguments:\n idx: the filtered data sample index\n col: the data column (execution signal)\n criteria: the filter to be considered when looking for\n a change-point (average, variance or both)\n\n Output:\n self._intervals: a list with the initial and final\n indexes of an interval (if found).\n \"\"\"\n\n # Change-point conditions\n if criteria == \"average\":\n condition = abs(self._mu_k_arr[idx, col]) >= self.H_u[col]\n elif criteria == \"variance\":\n condition = abs(self._v_k_arr[idx, col]) >= self.H_v[col]\n else:\n condition = (abs(self._mu_k_arr[idx, col]) >= self.H_u[col]) and (\n abs(self._v_k_arr[idx, col]) >= self.H_v[col]\n )\n\n if condition:\n if not self._is_interval[col]:\n self._init_idx[col] = idx\n self._is_interval[col] = True\n elif idx == len(X) - 1 and self._is_interval[col]:\n self._is_interval[col] = False\n self._final_idx[col] = idx\n self.intervals[col].append([self._init_idx[col], self._final_idx[col]])\n elif self._is_interval[col]:\n self._is_interval[col] = False\n self._final_idx[col] = idx\n self.intervals[col].append([self._init_idx[col], self._final_idx[col]])\n\n def recursive_exponential_moving_average_and_variance(self, X):\n \"\"\"\n Performs a recursive moving average/variance algorithm from past samples\n using a multithread approach.\n\n Output:\n self._mu_k_arr: the average filtered data for the given index\n self._v_k_arr: the variance filtered data for the given index\n \"\"\"\n results = list(\n Parallel(n_jobs=self.n_jobs, require=\"sharedmem\", verbose=self.verbose)(\n delayed(self._exponential_moving_average_and_variance)(X, idx)\n for idx in range(len(X))\n )\n )\n\n self._mu_k_arr, self._v_k_arr = list(zip(*results))\n self._mu_k_arr = np.stack(self._mu_k_arr, axis=0)\n self._v_k_arr = np.stack(self._v_k_arr, axis=0)\n\n return self._mu_k_arr, self._v_k_arr\n\n def change_points(self, X, criteria=\"variance\"):\n \"\"\"\n Searchs for change points in the filtered data and its\n corresponding intervals using a multithread approach.\n\n Arguments:\n criteria: the filter to be considered when looking for\n a change-point (average, variance or both)\n \"\"\"\n # Reset Intervals\n self.intervals = defaultdict(list)\n\n # Update Criteria\n self._criteria = criteria\n\n if (self._mu_k_arr is None) or (self._v_k_arr is None):\n self.recursive_exponential_moving_average_and_variance()\n\n Parallel(n_jobs=self.n_jobs, require=\"sharedmem\", verbose=self.verbose)(\n delayed(self._search_for_change_points)(X, idx, col, criteria)\n for idx in range(len(X))\n for col in range(X.shape[1])\n )\n\n self._is_interval = [False] * X.shape[1]\n self._init_idx = [0] * X.shape[1]\n self._final_idx = [0] * X.shape[1]\n\n return self.intervals\n\n def _extend_previous_indexes(self):\n \"\"\"\n This function allows an extension of each interval\n with previous index values. The number of indexes\n extended are provided in num_previous_indexes.\n \"\"\"\n for key, interval_arr in self.intervals.items():\n for idx, interval in enumerate(interval_arr):\n\n min_val = np.min(interval)\n\n if (idx == 0) and (np.min(interval) - self.num_previous_indexes < 0):\n min_val = 0\n elif (idx > 0) and (\n (np.min(interval) - self.num_previous_indexes)\n <= np.max(interval_arr[idx - 1])\n ):\n min_val = np.max(interval_arr[idx - 1]) + 1\n else:\n min_val = np.min(interval) - self.num_previous_indexes\n\n self.intervals[key][idx] = [min_val, np.max(interval)]\n\n def _create_indicating_sequence(self, X):\n \"\"\"\n This function creates an indicating sequence, i.e., an array containing 1's\n in the intervals of interest and 0's otherwise, based on each interval obtained\n by the exponential weighted filter approach.\n\n Output:\n indicating_sequence: the indicating sequence\n \"\"\"\n indicating_sequence = np.zeros(X.shape[0])\n for _, interval_arr in self.intervals.items():\n for interval in interval_arr:\n indicating_sequence[interval[0] : interval[1] + 1] = 1\n\n return indicating_sequence\n\n def _create_sequential_indicating_sequences(self, indicating_sequence):\n \"\"\"\n This function gets the indicating sequence for a given data\n and creates the corresponding segments where the sequence\n contains consecutive values of 1. For example, the sequence\n [0,0,1,1,1,1,0,0,0,1,1,0,0,0] would result in two sequential\n sequences:\n\n 1) Sequence formed by indexes [2,3,4,5]\n 2) Sequence forme by indexes [9,10]\n\n Arguments:\n indicating_sequence: the data indicating sequence.\n\n Output:\n sequential_indicating_sequences: the sequential indicating sequence.\n \"\"\"\n\n is_interval = False\n sequential_indicating_sequences = []\n aux_arr = []\n\n for idx in range(len(indicating_sequence)):\n\n if not is_interval and indicating_sequence[idx] == 1:\n is_interval = True\n\n if is_interval and indicating_sequence[idx] == 1:\n aux_arr.append(idx)\n\n if idx < len(indicating_sequence) - 1:\n if (\n is_interval\n and indicating_sequence[idx] == 1\n and indicating_sequence[idx + 1] == 0\n ):\n\n is_interval = False\n sequential_indicating_sequences.append(aux_arr)\n aux_arr = []\n else:\n if aux_arr != []:\n sequential_indicating_sequences.append(aux_arr)\n\n return sequential_indicating_sequences\n\n def _label_intervals_with_input_output(self, X, X_cols, y, y_cols):\n \"\"\"\n This function labels the intervals dictionary keys to discriminate\n the input and output variables. This is crucial to garantee the\n min_input_coupling and min_output_coupling conditions.\n\n Arguments:\n X: the input matrix. Each column corresponds to an input signal\n X_cols: the input signals column names\n y: the output matrix: Each column corresponds to an ouput signal\n y_cols: the output signals column names\n \"\"\"\n\n labeled_intervals = defaultdict(dict)\n\n for input_idx in range(0, X.shape[1]):\n\n if X_cols is not None:\n input_idx_name = X_cols[input_idx]\n else:\n input_idx_name = \"input\" + \"_\" + str(input_idx)\n\n labeled_intervals[\"input\"][input_idx_name] = self.intervals[input_idx]\n\n for output_idx in range(0, y.shape[1]):\n\n if y_cols is not None:\n output_idx_name = y_cols[output_idx]\n else:\n output_idx_name = \"output\" + \"_\" + str(output_idx)\n\n labeled_intervals[\"output\"][output_idx_name] = self.intervals[\n X.shape[1] + output_idx\n ]\n\n return labeled_intervals\n\n def _get_final_intervals(self, labeled_intervals, global_sequence):\n \"\"\"\n This function takes the global indicating sequences, i.e., the unified\n indicating sequence for all input and output signals and verfies if\n there is at least one input and one output valid indicating sequence inside\n each global indicating sequence.\n\n Arguments:\n global_sequence: the unified intervals for all input and output signals.\n labeled_intervals: the individual intervals for each input and output.\n \"\"\"\n\n final_segment_indexes = []\n\n for segment_idx_arr in global_sequence:\n\n # Check if at least one input indicating sequence is in the correspondig global sequence\n input_count = 0\n for input_name in labeled_intervals[\"input\"].keys():\n input_aux_count = 0\n for input_sequence in labeled_intervals[\"input\"][input_name]:\n if all(elem in segment_idx_arr for elem in input_sequence):\n input_aux_count += 1\n if input_aux_count > 0:\n input_count += 1\n\n # Check if at least one output indicating sequence is in the correspondig global sequence\n output_count = 0\n for output_name in labeled_intervals[\"output\"].keys():\n output_aux_count = 0\n for output_sequence in labeled_intervals[\"output\"][output_name]:\n if all(elem in segment_idx_arr for elem in output_sequence):\n output_aux_count += 1\n if output_aux_count > 0:\n output_count += 1\n\n if (\n input_count >= self.min_input_coupling\n and output_count >= self.min_output_coupling\n ):\n\n final_segment_indexes.append(segment_idx_arr)\n\n return final_segment_indexes\n\n def _length_check(self):\n \"\"\"\n This function checks the interval length\n according to the provided min_interval_length.\n Only intervals with length >= min_interval_length\n are returned.\n \"\"\"\n final_intervals = {}\n\n for key, value in self.unified_intervals.items():\n if len(value) >= self.min_interval_length:\n final_intervals[key] = value\n\n return final_intervals\n\n def _split_data(self):\n \"\"\"\"\"\"\n final_intervals = {}\n divided_intervals = []\n\n for key, value in self.unified_intervals.items():\n if len(value) < self.split_size:\n divided_intervals.append(value)\n else:\n divided_intervals += list(\n np.array_split(\n np.array(value), math.ceil(len(value) / self.split_size)\n )\n )\n\n for key, interval in enumerate(divided_intervals):\n final_intervals[key] = list(interval)\n\n return final_intervals\n\n def fit(self, X, y):\n \"\"\"\n This function performs the following routines:\n - Applies the recursive exponential moving average/variance\n - Compute the initial intervals (change-points)\n - Creates an indicating sequence, unifying input and output intervals\n - From the indicating sequence, creates a final unified interval\n\n Output:\n unified_intervals: the final unified intervals for the input and output signals\n \"\"\"\n\n # Verify data format\n X, y, X_cols, y_cols = self._verify_data(X, y)\n\n # Create Matrix\n data = np.concatenate([X, y], axis=1)\n\n # Initialize Internal Variables\n self._initialize_internal_variables(X=data)\n\n # Apply Recursive Exponential Moving Average/Variance\n self.recursive_exponential_moving_average_and_variance(X=data)\n\n # Find change-points\n self.change_points(X=data)\n\n # Extend Intervals\n if self.num_previous_indexes > 0:\n self._extend_previous_indexes()\n\n # Make labeled intervals\n self.labeled_intervals = self._label_intervals_with_input_output(\n X=X, X_cols=X_cols, y=y, y_cols=y_cols\n )\n\n # Create Indicating Sequence\n indicating_sequence = self._create_indicating_sequence(X=data)\n\n # Create Global Sequence\n global_sequence = self._create_sequential_indicating_sequences(\n indicating_sequence=indicating_sequence\n )\n\n # Find intervals that respect min_input_coupling and min_output_coupling\n final_segment_indexes = self._get_final_intervals(\n labeled_intervals=self.labeled_intervals, global_sequence=global_sequence\n )\n\n self.unified_intervals = dict(\n zip(range(0, len(final_segment_indexes)), final_segment_indexes)\n )\n\n # Length Check\n if (self.min_interval_length is not None) and (self.min_interval_length > 1):\n self.unified_intervals = self._length_check()\n\n # Split Long Data\n if self.split_size:\n self.unified_intervals = self._split_data()\n\n return self.unified_intervals\n\n def plot_change_points(self, X, y):\n \"\"\"\n Plots all found change points and its corresponding\n intervals.\n \"\"\"\n # Verify data format\n X, y, X_cols, y_cols = self._verify_data(X, y)\n\n # Create Matrix\n data = np.concatenate([X, y], axis=1)\n df_cols = None\n\n if X_cols is not None and y_cols is not None:\n df_cols = list(X_cols) + list(y_cols)\n\n # Check if fit is needed\n try:\n self.intervals\n except:\n self.fit(X=X, y=y)\n\n for col in list(self.intervals.keys()):\n intervals_arr = self.intervals[col]\n\n sns.set_style(\"darkgrid\")\n plt.figure(figsize=(15, 4))\n if self._criteria == \"variance\":\n plt.plot(self._v_k_arr[:, col], zorder=1, color=\"coral\")\n elif self._criteria == \"average\":\n plt.plot(self._mu_k_arr[:, col], zorder=1, color=\"coral\")\n else:\n plt.plot(\n self._v_k_arr[:, col],\n label=\"Variance Plot\",\n zorder=1,\n color=\"coral\",\n )\n plt.plot(\n self._mu_k_arr[:, col],\n label=\"Average Plot\",\n zorder=1,\n color=\"coral\",\n )\n plt.legend(fontsize=14)\n\n if df_cols is None:\n col_name = f\"Signal {col}\"\n else:\n col_name = f\"Signal {df_cols[col]}\"\n\n plt.title(\n f\"Moving Average Change Points and Intervals for {col_name}\",\n fontsize=18,\n fontweight=\"bold\",\n )\n plt.ylabel(\"Signal Amplitude\", fontsize=18)\n plt.xlabel(\"Discrete Samples\", fontsize=18)\n plt.xticks(fontsize=18, color=\"black\")\n plt.yticks(fontsize=18, color=\"black\")\n\n color_rule = True\n color_arr = [\"darkred\", \"darkmagenta\"]\n for interval in intervals_arr:\n color_rule = not color_rule\n for idx in interval:\n if self._criteria == \"variance\":\n plt.scatter(\n idx,\n self._v_k_arr[:, col][idx],\n marker=\"x\",\n s=50,\n color=color_arr[color_rule],\n zorder=2,\n )\n plt.axvline(x=idx, linestyle=\"--\", color=color_arr[color_rule])\n else:\n plt.scatter(\n idx,\n self._mu_k_arr[:, col][idx],\n marker=\"x\",\n s=50,\n color=color_arr[color_rule],\n zorder=2,\n )\n plt.axvline(x=idx, linestyle=\"--\", color=color_arr[color_rule])\n plt.show()\n\n\n# See below the used libraries Licenses\n# -------------------------------------\n\n# Joblib license\n# --------------\n\n# Copyright (c) 2008-2016, The joblib developers.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n\n# Pandas license\n# --------------\n\n# Copyright (c) 2008-2011, AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team\n# All rights reserved.\n#\n# Copyright (c) 2011-2020, Open source contributors.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n\n# Numpy license\n# -------------\n\n# Copyright (c) 2005-2020, NumPy Developers.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n#\n# * Neither the name of the NumPy Developers nor the names of any\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n\n# Matplotlib licence\n# ------------------\n\n# License agreement for matplotlib versions 1.3.0 and later\n# =========================================================\n#\n# 1. This LICENSE AGREEMENT is between the Matplotlib Development Team\n# (\"MDT\"), and the Individual or Organization (\"Licensee\") accessing and\n# otherwise using matplotlib software in source or binary form and its\n# associated documentation.\n#\n# 2. Subject to the terms and conditions of this License Agreement, MDT\n# hereby grants Licensee a nonexclusive, royalty-free, world-wide license\n# to reproduce, analyze, test, perform and/or display publicly, prepare\n# derivative works, distribute, and otherwise use matplotlib\n# alone or in any derivative version, provided, however, that MDT's\n# License Agreement and MDT's notice of copyright, i.e., \"Copyright (c)\n# 2012- Matplotlib Development Team; All Rights Reserved\" are retained in\n# matplotlib alone or in any derivative version prepared by\n# Licensee.\n#\n# 3. In the event Licensee prepares a derivative work that is based on or\n# incorporates matplotlib or any part thereof, and wants to\n# make the derivative work available to others as provided herein, then\n# Licensee hereby agrees to include in any such work a brief summary of\n# the changes made to matplotlib .\n#\n# 4. MDT is making matplotlib available to Licensee on an \"AS\n# IS\" basis. MDT MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\n# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, MDT MAKES NO AND\n# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\n# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB\n# WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.\n#\n# 5. MDT SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB\n# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR\n# LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING\n# MATPLOTLIB , OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF\n# THE POSSIBILITY THEREOF.\n#\n# 6. This License Agreement will automatically terminate upon a material\n# breach of its terms and conditions.\n#\n# 7. Nothing in this License Agreement shall be deemed to create any\n# relationship of agency, partnership, or joint venture between MDT and\n# Licensee. This License Agreement does not grant permission to use MDT\n# trademarks or trade name in a trademark sense to endorse or promote\n# products or services of Licensee, or any third party.\n#\n# 8. By copying, installing or otherwise using matplotlib ,\n# Licensee agrees to be bound by the terms and conditions of this License\n# Agreement.\n#\n# License agreement for matplotlib versions prior to 1.3.0\n# ========================================================\n#\n# 1. This LICENSE AGREEMENT is between John D. Hunter (\"JDH\"), and the\n# Individual or Organization (\"Licensee\") accessing and otherwise using\n# matplotlib software in source or binary form and its associated\n# documentation.\n#\n# 2. Subject to the terms and conditions of this License Agreement, JDH\n# hereby grants Licensee a nonexclusive, royalty-free, world-wide license\n# to reproduce, analyze, test, perform and/or display publicly, prepare\n# derivative works, distribute, and otherwise use matplotlib\n# alone or in any derivative version, provided, however, that JDH's\n# License Agreement and JDH's notice of copyright, i.e., \"Copyright (c)\n# 2002-2011 John D. Hunter; All Rights Reserved\" are retained in\n# matplotlib alone or in any derivative version prepared by\n# Licensee.\n#\n# 3. In the event Licensee prepares a derivative work that is based on or\n# incorporates matplotlib or any part thereof, and wants to\n# make the derivative work available to others as provided herein, then\n# Licensee hereby agrees to include in any such work a brief summary of\n# the changes made to matplotlib.\n#\n# 4. JDH is making matplotlib available to Licensee on an \"AS\n# IS\" basis. JDH MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\n# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, JDH MAKES NO AND\n# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\n# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB\n# WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.\n#\n# 5. JDH SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB\n# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR\n# LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING\n# MATPLOTLIB , OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF\n# THE POSSIBILITY THEREOF.\n#\n# 6. This License Agreement will automatically terminate upon a material\n# breach of its terms and conditions.\n#\n# 7. Nothing in this License Agreement shall be deemed to create any\n# relationship of agency, partnership, or joint venture between JDH and\n# Licensee. This License Agreement does not grant permission to use JDH\n# trademarks or trade name in a trademark sense to endorse or promote\n# products or services of Licensee, or any third party.\n#\n# 8. By copying, installing or otherwise using matplotlib,\n# Licensee agrees to be bound by the terms and conditions of this License\n# Agreement.\n\n# Seaborn license\n# ---------------\n\n# Copyright (c) 2012-2020, Michael L. Waskom\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the project nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n",
"from scipy.stats import norm as sci_norm\nfrom collections import defaultdict\nfrom numpy import linalg as LA\nimport pandas as pd\nimport numpy as np\n\n\nclass ModelStructure(object):\n \"\"\"\n This class contains general functions that can be\n used for a variety of model structures used in\n System Identification. This class is used as a\n father class for model structure classes, where\n several are available, such as:\n\n - QR decomposition\n - Condition Number\n - Effective Rank\n - Scalar Cross-correlation\n - Granger causality test (chi-squared test)\n\n -------------------------------------------- REFERENCES --------------------------------------------\n The theory behind this implementation was based in the following reference:\n\n PERETZKI, D. et al. Data mining of historic data for process identification.\n In: Proceedings of the 2011 AIChE Annual Meeting, p. 1027–1033, 2011.\n\n SHARDT, Y. A. W.; SHAH, S. L. Segmentation Methods for Model Identification from\n Historical Process Data. In: Proceedings of the 19th World Congress.\n Cape Town, South Africa: IFAC, 2014. p. 2836–2841.\n\n AGUIRRE, L. A. Introdução à Identificação de Sistemas:\n técnicas lineares e não lineares: teoria e aplicação. 4. ed.\n Belo Horizonte, Brasil: Editora UFMG, 2015.\n\n BITTENCOURT, A. C. et al. An algorithm for finding process identification\n intervals from normal operating data. Processes, v. 3, p. 357–383, 2015.\n\n RIBEIRO, A. H.; AGUIRRE, L. A. Selecting transients automatically\n for the identification of models for an oil well. IFAC-PapersOnLine,\n v. 48, n. 6, p. 154–158, 2015.\n\n PATEL, A. Data Mining of Process Data in Mutlivariable Systems.\n Degree project in electrical engineering — Royal Institute of Technology,\n Stockholm, Sweden, 2016.\n\n ARENGAS, D.; KROLL, A. A Search Method for Selecting Informative Data in Predominantly\n Stationary Historical Records for Multivariable System Identification.\n In: Proceedings of the 21st International Conference on System Theory,\n Control and Computing (ICSTCC). Sinaia, Romenia: IEEE, 2017a. p. 100–105.\n\n ARENGAS, D.; KROLL, A. Searching for informative intervals in predominantly stationary\n data records to support system identification. In: Proceedings of the XXVI International\n Conference on Information, Communication and Automation Technologies (ICAT). Sarajevo,\n Bosnia-Herzegovina: IEEE, 2017b.\n \"\"\"\n\n def __init__(self):\n pass\n\n def _verify_data(self, X, y):\n \"\"\"\n Verifies the data type and save data columns\n in case they are provided.\n\n Arguments:\n X: the input data in pandas dataframe format or numpy array\n y: the output data in pandas dataframe format or numpy array\n\n Output:\n X: the input data in numpy array format\n y: the input data in numpy array format\n X_cols: the input data columns in case they are provided\n y_cols: the output data columns in case they are provided\n \"\"\"\n if type(X) == pd.core.frame.DataFrame:\n X_cols = X.columns\n X = X.values\n if X.ndim == 1:\n X = X.reshape(-1, 1)\n elif type(X) == np.ndarray:\n X_cols = None\n if X.ndim == 1:\n X = X.reshape(-1, 1)\n else:\n raise Exception(\"Input data must be a pandas dataframe or a numpy array\")\n\n if type(y) == pd.core.frame.DataFrame:\n y_cols = y.columns\n y = y.values\n if y.ndim == 1:\n y = y.reshape(-1, 1)\n elif type(y) == np.ndarray:\n y_cols = None\n if y.ndim == 1:\n y = y.reshape(-1, 1)\n else:\n raise Exception(\"Input data must be a pandas dataframe or a numpy array\")\n\n return X, y, X_cols, y_cols\n\n def _initialize_metrics(self, X, y, X_cols, y_cols):\n \"\"\"\n This function initializes the following metrics:\n - Phi_dict: a dictionary of regressor matrices for each input/output and for each signal;\n - I_dict: a dictionary of information matrices of the form [Phi]^T[Phi];\n - cond_num_dict: a dictionary of condition numbers for each information matrix;\n - theta_dict: a dictionary of estimated parameter vectors phi = [ph1 ph2 ... phiNb];\n - chi_squared_dict: a dictionary of chi-squared test for validating the estimated parameters;\n - cross_corr_dict: a dictionary of cross-correlations for each input/output;\n - eff_rank_1_dict: a dictionary of type 1 effective ranks;\n - eff_rank_2_dict: a dictionary of type 2 effective ranks;\n - miso_ranks: a dictionary of effective ranks;\n - miso_correlations: a dictionary of cross-corellations for each input/output and for each signal;\n - Phi_aug_dict: a dictionary of augmented matrices of the form [Phi y].\n \"\"\"\n # Create Internal Variables\n self.Phi_dict = defaultdict(lambda: defaultdict(dict))\n self.I_dict = defaultdict(lambda: defaultdict(dict))\n self.cond_num_dict = defaultdict(lambda: defaultdict(dict))\n self.theta_dict = defaultdict(lambda: defaultdict(dict))\n self.chi_squared_dict = defaultdict(lambda: defaultdict(dict))\n self.cross_corr_dict = defaultdict(lambda: defaultdict(dict))\n self.eff_rank_1_dict = defaultdict(lambda: defaultdict(dict))\n self.eff_rank_2_dict = defaultdict(lambda: defaultdict(dict))\n self.miso_ranks = defaultdict(lambda: defaultdict(dict))\n self.miso_correlations = defaultdict(lambda: defaultdict(dict))\n self.Phi_aug_dict = defaultdict(lambda: defaultdict(dict))\n\n def _update_index_name(self, input_idx, X_cols, output_idx, y_cols):\n \"\"\"\n This function verifies if the provided data contains\n column names. In the case it does, the column name is\n used as index, otherwise the index number is concatenated\n with the word input or output, depending on the signal type.\n \"\"\"\n if X_cols is not None:\n input_idx_name = X_cols[input_idx]\n else:\n input_idx_name = \"input\" + \"_\" + str(input_idx)\n\n if y_cols is not None:\n output_idx_name = y_cols[output_idx]\n else:\n output_idx_name = \"output\" + \"_\" + str(output_idx)\n\n return input_idx_name, output_idx_name\n\n def _qr_factorization(\n self, y, input_idx, X_cols, output_idx, y_cols, segment, operation\n ):\n \"\"\"\n Performs a QR-Factorization (Decomposition) using numpy linear\n algebra library and uses the R matrix to solve the Ordinary Least\n Square (OLS) problem.\n\n Arguments:\n y: the ouput signals\n input_idx: the sequential number of the execution input;\n X_cols: the input data columns in case they are provided;\n output_idx: the sequential number of the execution output;\n y_cols: the output data columns in case they are provided;\n segment: the sequential number of the execution segment (interval).\n operation: which operation to perform (all, condition_number or chi_squared_test)\n \"\"\"\n\n # Take Column Names\n input_idx_name, output_idx_name = self._update_index_name(\n input_idx, X_cols, output_idx, y_cols\n )\n\n # Take Segment\n segment_idx = self.initial_intervals[segment]\n\n # Take Regressor Matrix\n Phi = self.Phi_dict[\"segment\" + \"_\" + str(segment)][output_idx_name][\n input_idx_name\n ]\n\n # Define the y shift according to the model structure\n # If a model structure is of order 3, for example, the\n # output used for fitting the model must start 3 samples\n # ahead. In that case, y_shift=3. For Laguerre models, the\n # y_shift is always 1, regardless of the model order.\n y_length = len(y[segment_idx, output_idx])\n regressor_length = Phi.shape[0]\n y_shift = y_length - regressor_length\n\n # Create the Augmented Regressor Matrix [Phi y]\n self.Phi_aug_dict[\"segment\" + \"_\" + str(segment)][output_idx_name][\n input_idx_name\n ] = np.zeros((len(segment_idx[y_shift:]), self.Nb + 1))\n\n self.Phi_aug_dict[\"segment\" + \"_\" + str(segment)][output_idx_name][\n input_idx_name\n ][: Phi.shape[0], : self.Nb] = Phi\n\n self.Phi_aug_dict[\"segment\" + \"_\" + str(segment)][output_idx_name][\n input_idx_name\n ][:, -1] = np.squeeze(y[segment_idx, output_idx][y_shift:])\n\n # QR-Factorization\n Q, R = LA.qr(\n self.Phi_aug_dict[\"segment\" + \"_\" + str(segment)][output_idx_name][\n input_idx_name\n ]\n )\n R1 = R[: self.Nb, : self.Nb]\n R2 = R[: self.Nb, self.Nb]\n R3 = R[self.Nb, self.Nb]\n\n # Comput Theta, Information Matrix and its Condition Number and the chi-squared Test\n if operation in (\"all\", \"condition_number\"):\n self.I_dict[\"segment\" + \"_\" + str(segment)][output_idx_name][\n input_idx_name\n ] = (1 / len(np.squeeze(y[segment_idx, output_idx][y_shift:]))) * np.matmul(\n R1.T, R1\n )\n\n self.cond_num_dict[\"segment\" + \"_\" + str(segment)][output_idx_name][\n input_idx_name\n ] = LA.cond(\n self.I_dict[\"segment\" + \"_\" + str(segment)][output_idx_name][\n input_idx_name\n ]\n )\n\n if operation in (\"all\", \"chi_squared_test\"):\n try:\n self.theta_dict[\"segment\" + \"_\" + str(segment)][output_idx_name][\n input_idx_name\n ] = np.matmul(LA.inv(R1), R2)\n except:\n pass\n\n self.chi_squared_dict[\"segment\" + \"_\" + str(segment)][output_idx_name][\n input_idx_name\n ] = (\n np.sqrt(len(np.squeeze(y[segment_idx, output_idx][y_shift:])))\n / np.abs(R3)\n ) * LA.norm(\n x=R2, ord=2\n )\n\n def _cross_correlation_scalar_metric(self, X, y, delay, cc_alpha):\n \"\"\"\n Computes a scalar metric that represents the cross-correlation\n function for a range of lag values. The lag range goes from\n -delay to delay. The normalized cross-correlation is computed\n for signals X and y and compared to the critical value of a\n two-sided normal distribution for an alpha confidence value.\n\n This metric is proposed in the following reference:\n\n RIBEIRO, A. H.; AGUIRRE, L. A. Selecting transients automatically\n for the identification of models for an oil well. IFAC-PapersOnLine,\n v. 48, n. 6, p. 154–158, 2015.\n\n Arguments:\n X: the input signal;\n y: the output signal;\n delay: the maximum/minimum cross-correlation lag value between the input and the output signals;\n cc_alpha: the confidence value for a normal distribution.\n\n Output:\n ccsm: the cross-correlation scalar metric.\n \"\"\"\n # Compute p-value\n p = sci_norm.ppf(1 - (cc_alpha) / 2) / np.sqrt(len(X))\n\n s_arr = []\n for d in range(-delay, delay + 1):\n # Compute Normalized Cross Corellation for current delay\n ncc = self._normalized_cross_correlation(X=X, y=y, delay=d)\n\n if np.abs(ncc) <= p:\n s_arr.append(0)\n elif np.abs(ncc) > p and d != 0:\n s_arr.append((np.abs(ncc) - p) / np.abs(d))\n else:\n s_arr.append(np.abs(ncc) - p)\n\n ccsm = np.sum(s_arr)\n\n return ccsm\n\n def _normalized_cross_correlation(self, X, y, delay):\n \"\"\"\n Computes the normalized cross-correlation function\n of signals X and y for a given delay value.\n\n Arguments:\n X: the input signal;\n y: the output signal;\n delay: the delay between both signals.\n\n Output:\n ncc: the normalized cross-correlation value.\n \"\"\"\n X_mean = np.mean(X)\n y_mean = np.mean(y)\n\n if delay < 0:\n num = np.sum(\n [\n (X[idx] - X_mean) * (y[idx + delay] - y_mean)\n for idx in range(np.abs(delay), len(X))\n ]\n )\n den_1 = np.sum(\n [(X[idx] - X_mean) ** 2 for idx in range(np.abs(delay), len(X))]\n )\n den_2 = np.sum(\n [(y[idx + delay] - y_mean) ** 2 for idx in range(np.abs(delay), len(X))]\n )\n den = np.sqrt(den_1 * den_2)\n else:\n num = np.sum(\n [\n (X[idx] - X_mean) * (y[idx + delay] - y_mean)\n for idx in range(0, len(X) - delay)\n ]\n )\n den_1 = np.sum([(X[idx] - X_mean) ** 2 for idx in range(0, len(X) - delay)])\n den_2 = np.sum(\n [(y[idx + delay] - y_mean) ** 2 for idx in range(0, len(X) - delay)]\n )\n den = np.sqrt(den_1 * den_2)\n\n if den == 0:\n ncc = 0\n else:\n ncc = num / den\n\n return ncc\n\n def _effective_rank_type_2(self, singular_values, threshold):\n \"\"\"\n Compute the effective rank as a function of the difference\n of two consecutive singular values.\n\n This implementation was based on the following reference:\n\n RIBEIRO, A. H.; AGUIRRE, L. A. Selecting transients automatically\n for the identification of models for an oil well. IFAC-PapersOnLine,\n v. 48, n. 6, p. 154–158, 2015.\n\n Arguments:\n singular_values: matrix singular values;\n threshold: effective rank threshold.\n\n Output:\n efr: the computed effective rank.\n \"\"\"\n\n efr_arr = []\n for idx in range(1, len(singular_values)):\n # Compute Consecutives Singular Values\n s_i_1 = singular_values[idx - 1]\n s_i = singular_values[idx]\n\n # Compute the difference of the consecutive singular values\n s_diff = s_i_1 - s_i\n\n # Compute effective rank for index idx\n if s_diff > threshold:\n efr_arr.append(1)\n else:\n efr_arr.append(0)\n\n efr = np.sum(efr_arr)\n\n return efr\n\n def _effective_rank_type_1(self, singular_values, threshold):\n \"\"\"\n Compute the effective rank as a function of the normalized\n singular values.\n\n This implementation was based on the following reference:\n\n RIBEIRO, A. H.; AGUIRRE, L. A. Selecting transients automatically\n for the identification of models for an oil well. IFAC-PapersOnLine,\n v. 48, n. 6, p. 154–158, 2015.\n\n Arguments:\n singular_values: matrix singular values;\n threshold: effective rank threshold.\n\n Output:\n efr: the computed effective rank.\n \"\"\"\n\n # Compute L1-norm\n l1_norm = np.sum([np.abs(s) for s in singular_values])\n\n # Compute Normalized Singular Values\n p_arr = [s / l1_norm for s in singular_values]\n\n # Compute Effective Rank for given Threshold\n efr = np.sum([1 if p > threshold else 0 for p in p_arr])\n\n return efr\n\n def _effective_rank(self, A, threshold, efr_type):\n \"\"\"\n Compute the effective rank of matrix A for\n a given threshold. Two types of effective\n rank are available and implemented based on the\n following reference:\n\n RIBEIRO, A. H.; AGUIRRE, L. A. Selecting transients automatically\n for the identification of models for an oil well. IFAC-PapersOnLine,\n v. 48, n. 6, p. 154–158, 2015.\n\n Arguments:\n A: the input matrix;\n threshold: the threshold for computing the effective rank;\n efr_type: type_1 or type_2.\n\n Output:\n efr: the effective rank\n \"\"\"\n\n # Compute Singular Values of Matrix A\n _, singular_values, _ = LA.svd(A)\n\n # Compute Effective Rank\n if efr_type == \"type_1\":\n return self._effective_rank_type_1(\n singular_values=singular_values, threshold=threshold\n )\n elif efr_type == \"type_2\":\n return self._effective_rank_type_2(\n singular_values=singular_values, threshold=threshold\n )\n\n def _compute_miso_ranks(\n self, X, y, regressor_mtrx, input_idx, X_cols, output_idx, y_cols, segment\n ):\n \"\"\"\n For each MISO System, i.e., for each output, compute the effective rank\n of the AR Information matrix for the corresponding output.\n\n Arguments:\n y: the output signal\n output_idx: the sequential number of the execution output\n segment: the sequential number of the execution segment (interval)\n \"\"\"\n # Take Column Names\n input_idx_name, output_idx_name = self._update_index_name(\n input_idx, X_cols, output_idx, y_cols\n )\n\n # Compute the Effective Rank of the Information Matrix\n efr = self._effective_rank(\n A=regressor_mtrx, threshold=self.sv_thr, efr_type=self.efr_type\n )\n\n self.miso_ranks[\"segment\" + \"_\" + str(segment)][output_idx_name][\n input_idx_name\n ] = efr\n\n def _compute_miso_correlations(\n self, X, y, input_idx, X_cols, output_idx, y_cols, segment\n ):\n \"\"\"\n For each MISO System, i.e., for each output, compute the cross-correlation\n metric between each input and the corresponding output.\n\n Arguments:\n X: the input signal\n y: the output signal\n input_idx: the sequential number of the execution input\n output_idx: the sequential number of the execution output\n segment: the sequential number of the execution segment (interval)\n \"\"\"\n # Take Column Names\n input_idx_name, output_idx_name = self._update_index_name(\n input_idx, X_cols, output_idx, y_cols\n )\n\n ncc = self._cross_correlation_scalar_metric(\n X=X[:, input_idx][self.initial_intervals[segment]],\n y=y[:, output_idx][self.initial_intervals[segment]],\n delay=self.delay,\n cc_alpha=self.cc_alpha,\n )\n\n self.miso_correlations[\"segment\" + \"_\" + str(segment)][output_idx_name][\n input_idx_name\n ] = ncc\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.concatenate",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.mean",
"numpy.var",
"numpy.stack",
"numpy.std",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.min",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks"
],
[
"scipy.stats.norm.ppf",
"numpy.linalg.svd",
"numpy.sqrt",
"numpy.abs",
"numpy.linalg.inv",
"numpy.squeeze",
"numpy.matmul",
"numpy.linalg.norm",
"numpy.mean",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tjiagoM/quantised-bayesian-nets | [
"c6ff1db376c366633afa2845b7527cc144ffd3b2",
"c6ff1db376c366633afa2845b7527cc144ffd3b2"
] | [
"experiments/scripts/stochastic/bbb/float/bbb_mnist.py",
"tests/plot_distortions.py"
] | [
"import sys\nimport torch\nimport argparse\nfrom datetime import timedelta\nimport logging\n\nsys.path.append(\"../\")\nsys.path.append(\"../../\")\nsys.path.append(\"../../../\")\nsys.path.append(\"../../../../\")\nsys.path.append(\"../../../../../\")\n\nfrom experiments.utils import evaluate_mnist_uncertainty\nfrom src.data import *\nfrom src.trainer import Trainer\nfrom src.models import ModelFactory\nfrom src.losses import LOSS_FACTORY\nimport src.utils as utils\n\nparser = argparse.ArgumentParser(\"mnist_classifier\")\n\nparser.add_argument('--task', type=str, default='classification', help='the main task; defines loss')\nparser.add_argument('--model', type=str, default='conv_lenet_bbb', help='the model that we want to train')\n\nparser.add_argument('--learning_rate', type=float,\n default=0.001, help='init learning rate')\nparser.add_argument('--loss_scaling', type=str,\n default='batch', help='smoothing factor')\nparser.add_argument('--weight_decay', type=float,\n default=0.0, help='weight decay')\n\nparser.add_argument('--data', type=str, default='./../../../data/',\n help='location of the data corpus')\nparser.add_argument('--dataset', type=str, default='mnist',\n help='dataset')\nparser.add_argument('--batch_size', type=int, default=256, help='batch size')\n\nparser.add_argument('--valid_portion', type=float,\n default=0.1, help='portion of training data')\n\nparser.add_argument('--gamma', type=float,\n default=.1, help='portion of training data')\nparser.add_argument('--sigma_prior', type=float,\n default=.1, help='portion of training data')\n \nparser.add_argument('--epochs', type=int, default=100,\n help='num of training epochs')\n\nparser.add_argument('--input_size', nargs='+',\n default=[1, 1, 28, 28], help='input size')\nparser.add_argument('--output_size', type=int,\n default=10, help='output size')\nparser.add_argument('--samples', type=int,\n default=20, help='output size')\n\nparser.add_argument('--save', type=str, default='EXP', help='experiment name')\nparser.add_argument('--save_last', action='store_true', default=True,\n help='whether to just save the last model') \n\nparser.add_argument('--num_workers', type=int,\n default=16, help='number of workers')\nparser.add_argument('--seed', type=int, default=1, help='random seed')\nparser.add_argument('--debug', action='store_true', help='whether we are currently debugging')\n\nparser.add_argument('--report_freq', type=float,\n default=50, help='report frequency')\nparser.add_argument('--gpu', type=int, default = 0, help='gpu device ids')\n\nparser.add_argument('--q', action='store_true',\n help='whether to do post training quantisation')\nparser.add_argument('--at', action='store_true',\n help='whether to do training aware quantisation')\n\n\ndef main():\n args = parser.parse_args()\n load = False\n if args.save!='EXP':\n load=True\n\n args, writer = utils.parse_args(args)\n \n logging.info('# Start Re-training #')\n \n criterion = LOSS_FACTORY[args.task](args, args.loss_scaling)\n\n model_temp = ModelFactory.get_model\n\n logging.info('## Downloading and preparing data ##')\n train_loader, valid_loader= get_train_loaders(args)\n\n if not load:\n model= model_temp(args.model, args.input_size, args.output_size, args.at, args)\n\n logging.info('## Model created: ##')\n logging.info(model.__repr__())\n\n \n logging.info('### Loading model to parallel GPUs ###')\n\n model = utils.model_to_gpus(model, args)\n\n logging.info('### Preparing schedulers and optimizers ###')\n optimizer = torch.optim.Adam(\n model.parameters(),\n args.learning_rate,\n weight_decay = args.weight_decay)\n\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer, args.epochs)\n logging.info('## Downloading and preparing data ##')\n train_loader, valid_loader= get_train_loaders(args)\n\n logging.info('## Beginning Training ##')\n\n train = Trainer(model, criterion, optimizer, scheduler, args)\n\n best_error, train_time, val_time = train.train_loop(\n train_loader, valid_loader, writer)\n\n logging.info('## Finished training, the best observed validation error: {}, total training time: {}, total validation time: {} ##'.format(\n best_error, timedelta(seconds=train_time), timedelta(seconds=val_time)))\n\n logging.info('## Beginning Plotting ##')\n del model \n\n with torch.no_grad():\n model = model_temp(args.model, args.input_size, args.output_size, args.q, args)\n\n utils.load_model(model, args.save+\"/weights.pt\")\n\n logging.info('## Model re-created: ##')\n logging.info(model.__repr__())\n\n model = utils.model_to_gpus(model, args)\n model.eval()\n evaluate_mnist_uncertainty(model, args)\n\n logging.info('# Finished #')\n \n\nif __name__ == '__main__':\n main()\n",
"import sys\nimport argparse\nimport numpy as np\nimport logging \n\nsys.path.append(\"../\")\nsys.path.append(\"../../\")\nsys.path.append(\"../../../\")\nsys.path.append(\"../../../../\")\nsys.path.append(\"../../../../../\")\n\n\nfrom experiments.utils import DISTORTIONS, LEVELS\nimport src.utils as utils\nfrom src.data import get_test_loader\nfrom experiments.presentation.plot_settings import PLT as plt\nimport matplotlib.gridspec as gridspec\nfrom src.data import CIFAR_MEAN, CIFAR_STD\n\nparser = argparse.ArgumentParser(\"test_distortions\")\n\nparser.add_argument('--save', type=str, default='EXP', help='experiment name')\nparser.add_argument('--data', type=str, default='./../experiments/data', help='experiment name')\n\nparser.add_argument('--label', type=str, default='test_distortions', help='default experiment category ')\nparser.add_argument('--dataset', type=str, default='mnist', help='default dataset ')\nparser.add_argument('--batch_size', type=int, default=64, help='default batch size')\nparser.add_argument('--num_workers', type=int, default=1, help='default batch size')\n\n\nparser.add_argument('--seed', type=int, default=1, help='random seed')\nparser.add_argument('--debug', action='store_true', help='whether we are currently debugging')\n\nparser.add_argument('--gpu', type=int, default = 0, help='gpu device ids')\n\n\n\ndef main():\n args = parser.parse_args()\n args, _ = utils.parse_args(args, args.label)\n logging.info('## Testing distortions ##')\n\n for distortion in DISTORTIONS:\n plt.figure(figsize=(3, 1))\n gs = gridspec.GridSpec(1, 5)\n gs.update(wspace=0, hspace=0)\n for level in range(LEVELS):\n test_loader = get_test_loader(args, distortion=distortion, level=level)\n input, _ = next(iter(test_loader))\n plt.subplot(gs[level])\n if args.dataset == \"mnist\":\n image = input[0]\n plt.imshow(image.squeeze().numpy(), cmap='gray')\n elif args.dataset == \"cifar\":\n image = input[2]\n means = np.array(CIFAR_MEAN).reshape((3,1,1))\n stds = np.array(CIFAR_STD).reshape((3,1,1))\n image = (image.numpy()*stds)+means\n plt.imshow(np.transpose(image,(1,2,0)))\n\n plt.axis('off')\n plt.tight_layout()\n path = utils.check_path(args.save+'/{}.png'.format(distortion))\n plt.savefig(path)\n\nif __name__ == '__main__':\n main()"
] | [
[
"torch.no_grad",
"torch.optim.lr_scheduler.CosineAnnealingLR"
],
[
"numpy.array",
"matplotlib.gridspec.GridSpec",
"numpy.transpose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
synapticarbors/DEODR | [
"e67f1792de90669b9adbf1a8103a9ca3b2c2c3dc"
] | [
"deodr/pytorch/mesh_fitter_pytorch.py"
] | [
"\"\"\"Modules containing pytorch classes to fit 3D meshes to images using differentiable rendering.\"\"\"\n\n\nimport copy\n\nimport numpy as np\n\nimport scipy.sparse.linalg\nimport scipy.spatial.transform.rotation\n\nimport torch\n\nfrom . import CameraPytorch, LaplacianRigidEnergyPytorch, Scene3DPytorch\nfrom .triangulated_mesh_pytorch import ColoredTriMeshPytorch as ColoredTriMesh\nfrom .triangulated_mesh_pytorch import TriMeshPytorch as TriMesh\nfrom .. import LaplacianRigidEnergy\n\n\ndef print_grad(name):\n # to visualize the gradient of a variable use\n # variable_name.register_hook(print_grad('variable_name'))\n def hook(grad):\n print(f\"grad {name} = {grad}\")\n\n return hook\n\n\ndef qrot(q, v):\n qr = q[None, :].repeat(v.shape[0], 1)\n qvec = qr[:, :-1]\n uv = torch.cross(qvec, v, dim=1)\n uuv = torch.cross(qvec, uv, dim=1)\n return v + 2 * (qr[:, [3]] * uv + uuv)\n\n\nclass MeshDepthFitterEnergy(torch.nn.Module):\n \"\"\"Pytorch module to fit a deformable mesh to a depth image.\"\"\"\n\n def __init__(self, vertices, faces, euler_init, translation_init, cregu=2000):\n super(MeshDepthFitterEnergy, self).__init__()\n self.mesh = TriMesh(\n faces[:, ::-1].copy(), vertices\n ) # we do a copy to avoid negative stride not supported by pytorch\n object_center = vertices.mean(axis=0)\n object_radius = np.max(np.std(vertices, axis=0))\n self.camera_center = object_center + np.array([-0.5, 0, 5]) * object_radius\n self.scene = Scene3DPytorch()\n self.scene.set_mesh(self.mesh)\n self.rigid_energy = LaplacianRigidEnergyPytorch(self.mesh, vertices, cregu)\n self.Vinit = copy.copy(self.mesh.vertices)\n self.Hfactorized = None\n self.Hpreconditioner = None\n self.transform_quaternion_init = scipy.spatial.transform.Rotation.from_euler(\n \"zyx\", euler_init\n ).as_quat()\n self.transform_translation_init = translation_init\n self._vertices = torch.nn.Parameter(\n torch.tensor(self.Vinit, dtype=torch.float64)\n )\n self.quaternion = torch.nn.Parameter(\n torch.tensor(self.transform_quaternion_init, dtype=torch.float64)\n )\n self.translation = torch.nn.Parameter(\n torch.tensor(self.transform_translation_init, dtype=torch.float64)\n )\n\n def set_max_depth(self, max_depth):\n self.scene.max_depth = max_depth\n self.scene.set_background(\n np.full((self.height, self.width, 1), max_depth, dtype=np.float)\n )\n\n def set_depth_scale(self, depth_scale):\n self.depthScale = depth_scale\n\n def set_image(self, hand_image, focal=None, distortion=None):\n self.width = hand_image.shape[1]\n self.height = hand_image.shape[0]\n assert hand_image.ndim == 2\n self.hand_image = hand_image\n if focal is None:\n focal = 2 * self.width\n\n rot = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])\n t = -rot.T.dot(self.camera_center)\n intrinsic = np.array(\n [[focal, 0, self.width / 2], [0, focal, self.height / 2], [0, 0, 1]]\n )\n extrinsic = np.column_stack((rot, t))\n self.camera = CameraPytorch(\n extrinsic=extrinsic, intrinsic=intrinsic, distortion=distortion\n )\n self.iter = 0\n\n def forward(self):\n q_normalized = self.quaternion / self.quaternion.norm()\n print(self.quaternion.norm())\n vertices_centered = self._vertices - torch.mean(self._vertices, dim=0)[None, :]\n v_transformed = qrot(q_normalized, vertices_centered) + self.translation\n self.mesh.set_vertices(v_transformed)\n depth_scale = 1 * self.depthScale\n depth = self.scene.render_depth(\n self.CameraMatrix,\n width=self.width,\n height=self.height,\n depth_scale=depth_scale,\n )\n depth = torch.clamp(depth, 0, self.scene.max_depth)\n diff_image = torch.sum(\n (depth - torch.tensor(self.hand_image[:, :, None])) ** 2, dim=2\n )\n self.depth = depth\n self.diff_image = diff_image\n energy_data = torch.sum(diff_image)\n energy_rigid = self.rigid_energy.evaluate(\n self._vertices, return_grad=False, return_hessian=False\n )\n energy = energy_data + energy_rigid\n self.loss = energy_data + energy_rigid\n print(\"Energy=%f : EData=%f E_rigid=%f\" % (energy, energy_data, energy_rigid))\n return self.loss\n\n\nclass MeshDepthFitterPytorchOptim:\n \"\"\"Pytorch optimizer to fit a deformable mesh to an image.\"\"\"\n\n def __init__(\n self, vertices, faces, euler_init, translation_init, cregu=2000, lr=0.8\n ):\n self.energy = MeshDepthFitterEnergy(\n vertices, faces, euler_init, translation_init, cregu\n )\n params = self.energy.parameters()\n self.optimizer = torch.optim.LBFGS(params, lr=0.8, max_iter=1)\n # self.optimizer = torch.optim.SGD(params, lr=0.000005, momentum=0.1,\n # dampening=0.1 )\n # self.optimizer =torch.optim.RMSprop(params, lr=1e-3, alpha=0.99, eps=1e-8,\n # weight_decay=0, momentum=0.001)\n # self.optimizer = torch.optim.Adadelta(params, lr=0.1, rho=0.95,\n # eps=1e-6, weight_decay=0)\n # self.optimizer = torch.optim.Adagrad(self.energy.parameters(), lr=0.02)\n\n def set_image(self, depth_image, focal):\n self.energy.set_image(depth_image, focal=focal)\n\n def set_max_depth(self, max_depth):\n self.energy.set_max_depth(max_depth)\n\n def set_depth_scale(self, depth_scale):\n self.energy.set_depth_scale(depth_scale)\n\n def step(self):\n def closure():\n self.optimizer.zero_grad()\n loss = self.energy()\n loss.backward()\n return loss\n\n self.optimizer.step(closure)\n # self.iter += 1\n return (\n self.energy.loss,\n self.energy.Depth[:, :, 0].detach().numpy(),\n self.energy.diffImage.detach().numpy(),\n )\n\n\nclass MeshDepthFitter:\n \"\"\"Class to fit a deformable mesh to a depth image.\"\"\"\n\n def __init__(\n self,\n vertices,\n faces,\n euler_init,\n translation_init,\n cregu=2000,\n inertia=0.96,\n damping=0.05,\n ):\n self.cregu = cregu\n self.inertia = inertia\n self.damping = damping\n self.step_factor_vertices = 0.0005\n self.step_max_vertices = 0.5\n self.step_factor_quaternion = 0.00006\n self.step_max_quaternion = 0.1\n self.step_factor_translation = 0.00005\n self.step_max_translation = 0.1\n\n self.mesh = TriMesh(\n faces.copy()\n ) # we do a copy to avoid negative stride not support by pytorch\n object_center = vertices.mean(axis=0) + translation_init\n object_radius = np.max(np.std(vertices, axis=0))\n self.camera_center = object_center + np.array([-0.5, 0, 5]) * object_radius\n\n self.scene = Scene3DPytorch()\n self.scene.set_mesh(self.mesh)\n self.rigid_energy = LaplacianRigidEnergy(self.mesh, vertices, cregu)\n self.vertices_init = torch.tensor(copy.copy(vertices))\n self.Hfactorized = None\n self.Hpreconditioner = None\n self.set_mesh_transform_init(euler=euler_init, translation=translation_init)\n self.reset()\n\n def set_mesh_transform_init(self, euler, translation):\n self.transform_quaternion_init = scipy.spatial.transform.Rotation.from_euler(\n \"zyx\", euler\n ).as_quat()\n self.transform_translation_init = translation\n\n def reset(self):\n self.vertices = copy.copy(self.vertices_init)\n self.speed_vertices = np.zeros(self.vertices_init.shape)\n self.transform_quaternion = copy.copy(self.transform_quaternion_init)\n self.transform_translation = copy.copy(self.transform_translation_init)\n self.speed_translation = np.zeros(3)\n self.speed_quaternion = np.zeros(4)\n\n def set_max_depth(self, max_depth):\n self.scene.max_depth = max_depth\n self.scene.set_background(\n np.full((self.height, self.width, 1), max_depth, dtype=np.float)\n )\n\n def set_depth_scale(self, depth_scale):\n self.depthScale = depth_scale\n\n def set_image(self, hand_image, focal=None, distortion=None):\n self.width = hand_image.shape[1]\n self.height = hand_image.shape[0]\n assert hand_image.ndim == 2\n self.hand_image = hand_image\n if focal is None:\n focal = 2 * self.width\n\n rot = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])\n trans = -rot.T.dot(self.camera_center)\n intrinsic = np.array(\n [[focal, 0, self.width / 2], [0, focal, self.height / 2], [0, 0, 1]]\n )\n extrinsic = np.column_stack((rot, trans))\n self.camera = CameraPytorch(\n extrinsic=extrinsic,\n intrinsic=intrinsic,\n width=self.width,\n height=self.height,\n distortion=distortion,\n )\n self.iter = 0\n\n def step(self):\n self.vertices = self.vertices - torch.mean(self.vertices, dim=0)[None, :]\n # vertices_with_grad = self.vertices.clone().requires_grad(True)\n vertices_with_grad = self.vertices.clone().detach().requires_grad_(True)\n vertices_with_grad_centered = (\n vertices_with_grad - torch.mean(vertices_with_grad, dim=0)[None, :]\n )\n quaternion_with_grad = torch.tensor(\n self.transform_quaternion, dtype=torch.float64, requires_grad=True\n )\n translation_with_grad = torch.tensor(\n self.transform_translation, dtype=torch.float64, requires_grad=True\n )\n\n q_normalized = (\n quaternion_with_grad / quaternion_with_grad.norm()\n ) # that will lead to a gradient that is in the tangeant space\n vertices_with_grad_transformed = (\n qrot(q_normalized, vertices_with_grad_centered) + translation_with_grad\n )\n\n self.mesh.set_vertices(vertices_with_grad_transformed)\n\n depth_scale = 1 * self.depthScale\n depth = self.scene.render_depth(\n self.camera, width=self.width, height=self.height, depth_scale=depth_scale\n )\n depth = torch.clamp(depth, 0, self.scene.max_depth)\n\n diff_image = torch.sum(\n (depth - torch.tensor(self.hand_image[:, :, None])) ** 2, dim=2\n )\n loss = torch.sum(diff_image)\n\n loss.backward()\n energy_data = loss.detach().numpy()\n\n grad_data = vertices_with_grad.grad.numpy()\n\n (\n energy_rigid,\n grad_rigidity,\n approx_hessian_rigidity,\n ) = self.rigid_energy.evaluate(self.vertices.numpy())\n energy = energy_data + energy_rigid\n print(\"Energy=%f : EData=%f E_rigid=%f\" % (energy, energy_data, energy_rigid))\n\n # update v\n grad = grad_data + grad_rigidity\n\n def mult_and_clamp(x, a, t):\n return np.minimum(np.maximum(x * a, -t), t)\n\n # update vertices\n step_vertices = mult_and_clamp(\n -grad, self.step_factor_vertices, self.step_max_vertices\n )\n self.speed_vertices = (1 - self.damping) * (\n self.speed_vertices * self.inertia + (1 - self.inertia) * step_vertices\n )\n self.vertices = self.vertices + torch.tensor(self.speed_vertices)\n # update rotation\n step_quaternion = mult_and_clamp(\n -quaternion_with_grad.grad.numpy(),\n self.step_factor_quaternion,\n self.step_max_quaternion,\n )\n self.speed_quaternion = (1 - self.damping) * (\n self.speed_quaternion * self.inertia + (1 - self.inertia) * step_quaternion\n )\n self.transform_quaternion = self.transform_quaternion + self.speed_quaternion\n self.transform_quaternion = self.transform_quaternion / np.linalg.norm(\n self.transform_quaternion\n )\n # update translation\n\n step_translation = mult_and_clamp(\n -translation_with_grad.grad.numpy(),\n self.step_factor_translation,\n self.step_max_translation,\n )\n self.speed_translation = (1 - self.damping) * (\n self.speed_translation * self.inertia\n + (1 - self.inertia) * step_translation\n )\n self.transform_translation = self.transform_translation + self.speed_translation\n\n self.iter += 1\n return energy, depth[:, :, 0].detach().numpy(), diff_image.detach().numpy()\n\n\nclass MeshRGBFitterWithPose:\n \"\"\"Class to fit a deformable mesh to a color image.\"\"\"\n\n def __init__(\n self,\n vertices,\n faces,\n euler_init,\n translation_init,\n default_color,\n default_light,\n cregu=2000,\n inertia=0.96,\n damping=0.05,\n update_lights=True,\n update_color=True,\n ):\n self.cregu = cregu\n\n self.inertia = inertia\n self.damping = damping\n self.step_factor_vertices = 0.0005\n self.step_max_vertices = 0.5\n self.step_factor_quaternion = 0.00006\n self.step_max_quaternion = 0.05\n self.step_factor_translation = 0.00005\n self.step_max_translation = 0.1\n\n self.default_color = default_color\n self.default_light = default_light\n self.update_lights = update_lights\n self.update_color = update_color\n self.mesh = ColoredTriMesh(\n faces.copy()\n ) # we do a copy to avoid negative stride not support by pytorch\n object_center = vertices.mean(axis=0) + translation_init\n object_radius = np.max(np.std(vertices, axis=0))\n self.camera_center = object_center + np.array([0, 0, 9]) * object_radius\n\n self.scene = Scene3DPytorch()\n self.scene.set_mesh(self.mesh)\n self.rigid_energy = LaplacianRigidEnergyPytorch(self.mesh, vertices, cregu)\n self.vertices_init = torch.tensor(copy.copy(vertices))\n self.Hfactorized = None\n self.Hpreconditioner = None\n self.set_mesh_transform_init(euler=euler_init, translation=translation_init)\n self.reset()\n\n def set_background_color(self, background_color):\n self.scene.set_background(\n np.tile(background_color[None, None, :], (self.height, self.width, 1))\n )\n\n def set_mesh_transform_init(self, euler, translation):\n self.transform_quaternion_init = scipy.spatial.transform.Rotation.from_euler(\n \"zyx\", euler\n ).as_quat()\n self.transform_translation_init = translation\n\n def reset(self):\n self.vertices = copy.copy(self.vertices_init)\n self.speed_vertices = np.zeros(self.vertices.shape)\n self.transform_quaternion = copy.copy(self.transform_quaternion_init)\n self.transform_translation = copy.copy(self.transform_translation_init)\n self.speed_translation = np.zeros(3)\n self.speed_quaternion = np.zeros(4)\n\n self.hand_color = copy.copy(self.default_color)\n self.light_directional = copy.copy(self.default_light[\"directional\"])\n self.light_ambient = copy.copy(self.default_light[\"ambient\"])\n\n self.speed_light_directional = np.zeros(self.light_directional.shape)\n self.speed_light_ambient = np.zeros(self.light_ambient.shape)\n self.speed_hand_color = np.zeros(self.hand_color.shape)\n\n def set_image(self, hand_image, focal=None, distortion=None):\n self.width = hand_image.shape[1]\n self.height = hand_image.shape[0]\n assert hand_image.ndim == 3\n self.hand_image = hand_image\n if focal is None:\n focal = 2 * self.width\n\n rot = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])\n trans = -rot.T.dot(self.camera_center)\n intrinsic = np.array(\n [[focal, 0, self.width / 2], [0, focal, self.height / 2], [0, 0, 1]]\n )\n extrinsic = np.column_stack((rot, trans))\n self.camera = CameraPytorch(\n extrinsic=extrinsic,\n intrinsic=intrinsic,\n width=self.width,\n height=self.height,\n distortion=distortion,\n )\n self.iter = 0\n\n def step(self):\n self.vertices = self.vertices - torch.mean(self.vertices, dim=0)[None, :]\n vertices_with_grad = self.vertices.clone().detach().requires_grad_(True)\n vertices_with_grad_centered = (\n vertices_with_grad - torch.mean(vertices_with_grad, dim=0)[None, :]\n )\n quaternion_with_grad = torch.tensor(\n self.transform_quaternion, dtype=torch.float64, requires_grad=True\n )\n translation_with_grad = torch.tensor(\n self.transform_translation, dtype=torch.float64, requires_grad=True\n )\n\n light_directional_with_grad = torch.tensor(\n self.light_directional, dtype=torch.float64, requires_grad=True\n )\n light_ambient_with_grad = torch.tensor(\n self.light_ambient, dtype=torch.float64, requires_grad=True\n )\n hand_color_with_grad = torch.tensor(\n self.hand_color, dtype=torch.float64, requires_grad=True\n )\n\n q_normalized = (\n quaternion_with_grad / quaternion_with_grad.norm()\n ) # that will lead to a gradient that is in the tangeant space\n vertices_with_grad_transformed = (\n qrot(q_normalized, vertices_with_grad_centered) + translation_with_grad\n )\n self.mesh.set_vertices(vertices_with_grad_transformed)\n\n self.scene.set_light(\n light_directional=light_directional_with_grad,\n light_ambient=light_ambient_with_grad,\n )\n self.mesh.set_vertices_colors(\n hand_color_with_grad.repeat([self.mesh.nb_vertices, 1])\n )\n\n image = self.scene.render(self.camera)\n\n diff_image = torch.sum((image - torch.tensor(self.hand_image)) ** 2, dim=2)\n loss = torch.sum(diff_image)\n\n loss.backward()\n energy_data = loss.detach().numpy()\n\n grad_data = vertices_with_grad.grad\n\n (\n energy_rigid,\n grad_rigidity,\n approx_hessian_rigidity,\n ) = self.rigid_energy.evaluate(self.vertices)\n energy = energy_data + energy_rigid.numpy()\n print(\"Energy=%f : EData=%f E_rigid=%f\" % (energy, energy_data, energy_rigid))\n\n # update v\n grad = grad_data + grad_rigidity\n\n def mult_and_clamp(x, a, t):\n return np.minimum(np.maximum(x * a, -t), t)\n\n inertia = self.inertia\n\n # update vertices\n step_vertices = mult_and_clamp(\n -grad.numpy(), self.step_factor_vertices, self.step_max_vertices\n )\n self.speed_vertices = (1 - self.damping) * (\n self.speed_vertices * inertia + (1 - inertia) * step_vertices\n )\n self.vertices = self.vertices + torch.tensor(self.speed_vertices)\n # update rotation\n step_quaternion = mult_and_clamp(\n -quaternion_with_grad.grad.numpy(),\n self.step_factor_quaternion,\n self.step_max_quaternion,\n )\n self.speed_quaternion = (1 - self.damping) * (\n self.speed_quaternion * inertia + (1 - inertia) * step_quaternion\n )\n self.transform_quaternion = self.transform_quaternion + self.speed_quaternion\n self.transform_quaternion = self.transform_quaternion / np.linalg.norm(\n self.transform_quaternion\n )\n\n # update translation\n step_translation = mult_and_clamp(\n -translation_with_grad.grad.numpy(),\n self.step_factor_translation,\n self.step_max_translation,\n )\n self.speed_translation = (1 - self.damping) * (\n self.speed_translation * inertia + (1 - inertia) * step_translation\n )\n self.transform_translation = self.transform_translation + self.speed_translation\n # update directional light\n step = -light_directional_with_grad.grad.numpy() * 0.0001\n self.speed_light_directional = (1 - self.damping) * (\n self.speed_light_directional * inertia + (1 - inertia) * step\n )\n self.light_directional = self.light_directional + self.speed_light_directional\n # update ambient light\n step = -light_ambient_with_grad.grad.numpy() * 0.0001\n self.speed_light_ambient = (1 - self.damping) * (\n self.speed_light_ambient * inertia + (1 - inertia) * step\n )\n self.light_ambient = self.light_ambient + self.speed_light_ambient\n # update hand color\n step = -hand_color_with_grad.grad.numpy() * 0.00001\n self.speed_hand_color = (1 - self.damping) * (\n self.speed_hand_color * inertia + (1 - inertia) * step\n )\n self.hand_color = self.hand_color + self.speed_hand_color\n\n self.iter += 1\n return energy, image.detach().numpy(), diff_image.detach().numpy()\n"
] | [
[
"torch.mean",
"numpy.maximum",
"torch.sum",
"numpy.linalg.norm",
"numpy.tile",
"torch.tensor",
"numpy.full",
"torch.optim.LBFGS",
"numpy.std",
"numpy.column_stack",
"torch.clamp",
"numpy.array",
"numpy.zeros",
"torch.cross"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JFK24/seqQscorer | [
"e08a59defbf16b7530eb6f3764f280e00c84ad1e"
] | [
"analysis_source_code/GEO_data_analysis/plot_PCA.py"
] | [
"\"\"\"Plot PCA for a given GEO dataset\n\nBased on the tpm expression values of the given GEO dataset this script\npost-processes the expression values and applies the PCA. The first two\nprincipal components are used to plot samples in two dimensions. As\ndescribed in the paper, the quality probabilities are added to 10% of\nthe samples with the highest probabilities.\n\nParameters\n----------\ngeo_id : str\n\tthe GEO dataset, either GSE77314 or GSE126848\n\ndate:\t2019-08-30\nauthor:\tSteffen Albrecht\n\t\n\"\"\"\n\nfrom sys import *\nimport os\nimport math\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn import preprocessing\nfrom sklearn.decomposition import PCA\n\n# get GEO ID and create file paths\ngeo_id = argv[1]\ntpm_data_file = './%s/featureCounts/tpm.tsv'%(geo_id)\nsra_data_file = './%s/SRA_list_comp.tsv'%(geo_id)\nquality_probs = './%s/quality_prbabilities.tsv'%(geo_id)\n\n# read in the data needed: TPM values, SRA metadata, quality probabilities\ntpm_data = pd.read_csv(tpm_data_file, sep='\\t')\nsra_data = pd.read_csv(sra_data_file, sep='\\t')\nquality_data = pd.read_csv(quality_probs, sep='\\t', names=['sra', 'probability', 'descr'])\n\n# extract column names that describe a gene ID\nsample_col = tpm_data.columns[0]\nsample_ids = list(tpm_data[sample_col])\ngene_cols = list(filter(lambda x: not x == sample_col, tpm_data.columns.values))\n\n# separate expression values, log transform, and apply scaling (standardize)\nX = tpm_data[gene_cols].values\nfor i in range(X.shape[1]):\n\tX[:,i] = list(map(lambda x: math.log(x+1, 2), X[:,i]))\nX = preprocessing.scale(X, axis=0)\n\n# apply default PCA on the expression values\npca = PCA(n_components=2)\ndim_red_results = pca.fit_transform(X)\ntpm_data['comp1'] = dim_red_results[:,0]\ntpm_data['comp2'] = dim_red_results[:,1]\n\n# add sample information from the SRA metadata\nhue_map = dict(zip(sra_data[sample_col], sra_data['disease']))\ntpm_data['Disease'] = [hue_map[sra] for sra in tpm_data[sample_col]]\nif geo_id == 'GSE126848':\n\tstyle_map = dict(zip(sra_data[sample_col], sra_data['gender']))\n\ttpm_data['Gender'] = [style_map[sra] for sra in tpm_data[sample_col]]\n\n# add quality probabilities to the dataset\nq_map = dict(zip(quality_data['sra'], quality_data['probability']))\ntpm_data['Quality'] = [q_map[sra] for sra in tpm_data[sample_col]]\n\n# define values that format the plot\nfs_rank = 14.0\nrank_diff = 4.0\nstar_x_shift = 5\nstar_y_shift = 0\nfs_legend_title = 14\nfs_ticks = 10\nfs_axis_labels = 15\n\n# plot quality probabilities of 10% with highest values\nthreshold = list(sorted(tpm_data['Quality']))[-int(0.1 * tpm_data.shape[0])]\nif True:\n\tfor index, row in tpm_data.iterrows():\n\t\tif row['Quality'] >= threshold:\n\t\t\tx = row['comp1']\n\t\t\ty = row['comp2']\n\t\t\tplt.text(x=x+5, y=y, s = '%.2f'%(row['Quality']), size = 12)\n\n# create and format the PCA plot\nax = None\nif geo_id == 'GSE126848':\n\tax = sns.scatterplot(x='comp1',y='comp2',hue='Disease', style='Gender', data=tpm_data, **{'s':75})\nelse:\n\tax = sns.scatterplot(x='comp1',y='comp2',hue='Disease', data=tpm_data, **{'s':75})\n\nplt.legend(loc='upper left', title_fontsize=16, fontsize=14,\n\t\t\t framealpha=0.5, frameon=True)\nplt.xticks(fontsize=fs_ticks)\nplt.yticks(fontsize=fs_ticks)\nplt.xlabel('Principal Component 1', fontsize=fs_axis_labels)\nplt.ylabel('Principal Component 2', fontsize=fs_axis_labels)\ntitle = 'PCA on GEO dataset'\nax.set_title(title, fontsize=16, fontstyle='normal')\nfig = ax.get_figure()\nfig.set_size_inches(6, 6)\nfig.savefig('./PCA.svg')\n\n\n\n\n\n\n\n\n\n\n"
] | [
[
"matplotlib.pyplot.text",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.yticks",
"pandas.read_csv",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"sklearn.preprocessing.scale",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
benblack769/ray | [
"def97a4c53772701fa3fe4b4f8512509672ecb84"
] | [
"rllib/evaluation/sampler.py"
] | [
"from abc import abstractmethod, ABCMeta\nfrom collections import defaultdict, namedtuple\nimport logging\nimport numpy as np\nimport queue\nimport threading\nimport time\nfrom typing import Any, Callable, Dict, List, Iterable, Optional, Set, Tuple,\\\n TYPE_CHECKING, Union\n\nfrom ray.util.debug import log_once\nfrom ray.rllib.evaluation.collectors.sample_collector import \\\n _SampleCollector\nfrom ray.rllib.evaluation.collectors.simple_list_collector import \\\n _SimpleListCollector\nfrom ray.rllib.evaluation.episode import MultiAgentEpisode\nfrom ray.rllib.evaluation.rollout_metrics import RolloutMetrics\nfrom ray.rllib.evaluation.sample_batch_builder import \\\n MultiAgentSampleBatchBuilder\nfrom ray.rllib.policy.policy import clip_action, Policy\nfrom ray.rllib.policy.tf_policy import TFPolicy\nfrom ray.rllib.models.preprocessors import Preprocessor\nfrom ray.rllib.utils.filter import Filter\nfrom ray.rllib.env.base_env import BaseEnv, ASYNC_RESET_RETURN\nfrom ray.rllib.env.atari_wrappers import get_wrapper_by_cls, MonitorEnv\nfrom ray.rllib.offline import InputReader\nfrom ray.rllib.utils.annotations import override, DeveloperAPI\nfrom ray.rllib.utils.debug import summarize\nfrom ray.rllib.utils.numpy import convert_to_numpy\nfrom ray.rllib.utils.spaces.space_utils import flatten_to_single_ndarray, \\\n unbatch\nfrom ray.rllib.utils.tf_run_builder import TFRunBuilder\nfrom ray.rllib.utils.typing import SampleBatchType, AgentID, PolicyID, \\\n EnvObsType, EnvInfoDict, EnvID, MultiEnvDict, EnvActionType, \\\n TensorStructType\n\nif TYPE_CHECKING:\n from ray.rllib.agents.callbacks import DefaultCallbacks\n from ray.rllib.evaluation.observation_function import ObservationFunction\n from ray.rllib.evaluation.rollout_worker import RolloutWorker\n\nlogger = logging.getLogger(__name__)\n\nPolicyEvalData = namedtuple(\"PolicyEvalData\", [\n \"env_id\", \"agent_id\", \"obs\", \"info\", \"rnn_state\", \"prev_action\",\n \"prev_reward\"\n])\n\n# A batch of RNN states with dimensions [state_index, batch, state_object].\nStateBatch = List[List[Any]]\n\n\nclass NewEpisodeDefaultDict(defaultdict):\n def __missing__(self, env_index):\n if self.default_factory is None:\n raise KeyError(env_index)\n else:\n ret = self[env_index] = self.default_factory(env_index)\n return ret\n\n\nclass _PerfStats:\n \"\"\"Sampler perf stats that will be included in rollout metrics.\"\"\"\n\n def __init__(self):\n self.iters = 0\n self.env_wait_time = 0.0\n self.raw_obs_processing_time = 0.0\n self.inference_time = 0.0\n self.action_processing_time = 0.0\n\n def get(self):\n # Mean multiplicator (1000 = ms -> sec).\n factor = 1000 / self.iters\n return {\n # Waiting for environment (during poll).\n \"mean_env_wait_ms\": self.env_wait_time * factor,\n # Raw observation preprocessing.\n \"mean_raw_obs_processing_ms\": self.raw_obs_processing_time *\n factor,\n # Computing actions through policy.\n \"mean_inference_ms\": self.inference_time * factor,\n # Processing actions (to be sent to env, e.g. clipping).\n \"mean_action_processing_ms\": self.action_processing_time * factor,\n }\n\n\n@DeveloperAPI\nclass SamplerInput(InputReader, metaclass=ABCMeta):\n \"\"\"Reads input experiences from an existing sampler.\"\"\"\n\n @override(InputReader)\n def next(self) -> SampleBatchType:\n batches = [self.get_data()]\n batches.extend(self.get_extra_batches())\n if len(batches) > 1:\n return batches[0].concat_samples(batches)\n else:\n return batches[0]\n\n @abstractmethod\n @DeveloperAPI\n def get_data(self) -> SampleBatchType:\n raise NotImplementedError\n\n @abstractmethod\n @DeveloperAPI\n def get_metrics(self) -> List[RolloutMetrics]:\n raise NotImplementedError\n\n @abstractmethod\n @DeveloperAPI\n def get_extra_batches(self) -> List[SampleBatchType]:\n raise NotImplementedError\n\n\n@DeveloperAPI\nclass SyncSampler(SamplerInput):\n \"\"\"Sync SamplerInput that collects experiences when `get_data()` is called.\n \"\"\"\n\n def __init__(self,\n *,\n worker: \"RolloutWorker\",\n env: BaseEnv,\n policies: Dict[PolicyID, Policy],\n policy_mapping_fn: Callable[[AgentID], PolicyID],\n preprocessors: Dict[PolicyID, Preprocessor],\n obs_filters: Dict[PolicyID, Filter],\n clip_rewards: bool,\n rollout_fragment_length: int,\n callbacks: \"DefaultCallbacks\",\n horizon: int = None,\n multiple_episodes_in_batch: bool = False,\n tf_sess=None,\n clip_actions: bool = True,\n soft_horizon: bool = False,\n no_done_at_end: bool = False,\n observation_fn: \"ObservationFunction\" = None,\n _use_trajectory_view_api: bool = False):\n \"\"\"Initializes a SyncSampler object.\n\n Args:\n worker (RolloutWorker): The RolloutWorker that will use this\n Sampler for sampling.\n env (Env): Any Env object. Will be converted into an RLlib BaseEnv.\n policies (Dict[str,Policy]): Mapping from policy ID to Policy obj.\n policy_mapping_fn (callable): Callable that takes an agent ID and\n returns a Policy object.\n preprocessors (Dict[str,Preprocessor]): Mapping from policy ID to\n Preprocessor object for the observations prior to filtering.\n obs_filters (Dict[str,Filter]): Mapping from policy ID to\n env Filter object.\n clip_rewards (Union[bool,float]): True for +/-1.0 clipping, actual\n float value for +/- value clipping. False for no clipping.\n rollout_fragment_length (int): The length of a fragment to collect\n before building a SampleBatch from the data and resetting\n the SampleBatchBuilder object.\n callbacks (Callbacks): The Callbacks object to use when episode\n events happen during rollout.\n horizon (Optional[int]): Hard-reset the Env\n multiple_episodes_in_batch (bool): Whether to pack multiple\n episodes into each batch. This guarantees batches will be\n exactly `rollout_fragment_length` in size.\n tf_sess (Optional[tf.Session]): A tf.Session object to use (only if\n framework=tf).\n clip_actions (bool): Whether to clip actions according to the\n given action_space's bounds.\n soft_horizon (bool): If True, calculate bootstrapped values as if\n episode had ended, but don't physically reset the environment\n when the horizon is hit.\n no_done_at_end (bool): Ignore the done=True at the end of the\n episode and instead record done=False.\n observation_fn (Optional[ObservationFunction]): Optional\n multi-agent observation func to use for preprocessing\n observations.\n _use_trajectory_view_api (bool): Whether to use the (experimental)\n `_use_trajectory_view_api` to make generic trajectory views\n available to Models. Default: False.\n \"\"\"\n\n self.base_env = BaseEnv.to_base_env(env)\n self.rollout_fragment_length = rollout_fragment_length\n self.horizon = horizon\n self.policies = policies\n self.policy_mapping_fn = policy_mapping_fn\n self.preprocessors = preprocessors\n self.obs_filters = obs_filters\n self.extra_batches = queue.Queue()\n self.perf_stats = _PerfStats()\n if _use_trajectory_view_api:\n self.sample_collector = _SimpleListCollector(\n policies, clip_rewards, callbacks, multiple_episodes_in_batch,\n rollout_fragment_length)\n else:\n self.sample_collector = None\n\n # Create the rollout generator to use for calls to `get_data()`.\n self.rollout_provider = _env_runner(\n worker, self.base_env, self.extra_batches.put, self.policies,\n self.policy_mapping_fn, self.rollout_fragment_length, self.horizon,\n self.preprocessors, self.obs_filters, clip_rewards, clip_actions,\n multiple_episodes_in_batch, callbacks, tf_sess, self.perf_stats,\n soft_horizon, no_done_at_end, observation_fn,\n _use_trajectory_view_api, self.sample_collector)\n self.metrics_queue = queue.Queue()\n\n @override(SamplerInput)\n def get_data(self) -> SampleBatchType:\n while True:\n item = next(self.rollout_provider)\n if isinstance(item, RolloutMetrics):\n self.metrics_queue.put(item)\n else:\n return item\n\n @override(SamplerInput)\n def get_metrics(self) -> List[RolloutMetrics]:\n completed = []\n while True:\n try:\n completed.append(self.metrics_queue.get_nowait()._replace(\n perf_stats=self.perf_stats.get()))\n except queue.Empty:\n break\n return completed\n\n @override(SamplerInput)\n def get_extra_batches(self) -> List[SampleBatchType]:\n extra = []\n while True:\n try:\n extra.append(self.extra_batches.get_nowait())\n except queue.Empty:\n break\n return extra\n\n\n@DeveloperAPI\nclass AsyncSampler(threading.Thread, SamplerInput):\n \"\"\"Async SamplerInput that collects experiences in thread and queues them.\n\n Once started, experiences are continuously collected and put into a Queue,\n from where they can be unqueued by the caller of `get_data()`.\n \"\"\"\n\n def __init__(self,\n *,\n worker: \"RolloutWorker\",\n env: BaseEnv,\n policies: Dict[PolicyID, Policy],\n policy_mapping_fn: Callable[[AgentID], PolicyID],\n preprocessors: Dict[PolicyID, Preprocessor],\n obs_filters: Dict[PolicyID, Filter],\n clip_rewards: bool,\n rollout_fragment_length: int,\n callbacks: \"DefaultCallbacks\",\n horizon: int = None,\n multiple_episodes_in_batch: bool = False,\n tf_sess=None,\n clip_actions: bool = True,\n blackhole_outputs: bool = False,\n soft_horizon: bool = False,\n no_done_at_end: bool = False,\n observation_fn: \"ObservationFunction\" = None,\n _use_trajectory_view_api: bool = False):\n \"\"\"Initializes a AsyncSampler object.\n\n Args:\n worker (RolloutWorker): The RolloutWorker that will use this\n Sampler for sampling.\n env (Env): Any Env object. Will be converted into an RLlib BaseEnv.\n policies (Dict[str, Policy]): Mapping from policy ID to Policy obj.\n policy_mapping_fn (callable): Callable that takes an agent ID and\n returns a Policy object.\n preprocessors (Dict[str, Preprocessor]): Mapping from policy ID to\n Preprocessor object for the observations prior to filtering.\n obs_filters (Dict[str, Filter]): Mapping from policy ID to\n env Filter object.\n clip_rewards (Union[bool, float]): True for +/-1.0 clipping, actual\n float value for +/- value clipping. False for no clipping.\n rollout_fragment_length (int): The length of a fragment to collect\n before building a SampleBatch from the data and resetting\n the SampleBatchBuilder object.\n callbacks (Callbacks): The Callbacks object to use when episode\n events happen during rollout.\n horizon (Optional[int]): Hard-reset the Env\n multiple_episodes_in_batch (bool): Whether to pack multiple\n episodes into each batch. This guarantees batches will be\n exactly `rollout_fragment_length` in size.\n tf_sess (Optional[tf.Session]): A tf.Session object to use (only if\n framework=tf).\n clip_actions (bool): Whether to clip actions according to the\n given action_space's bounds.\n blackhole_outputs (bool): Whether to collect samples, but then\n not further process or store them (throw away all samples).\n soft_horizon (bool): If True, calculate bootstrapped values as if\n episode had ended, but don't physically reset the environment\n when the horizon is hit.\n no_done_at_end (bool): Ignore the done=True at the end of the\n episode and instead record done=False.\n observation_fn (Optional[ObservationFunction]): Optional\n multi-agent observation func to use for preprocessing\n observations.\n _use_trajectory_view_api (bool): Whether to use the (experimental)\n `_use_trajectory_view_api` to make generic trajectory views\n available to Models. Default: False.\n \"\"\"\n for _, f in obs_filters.items():\n assert getattr(f, \"is_concurrent\", False), \\\n \"Observation Filter must support concurrent updates.\"\n self.worker = worker\n self.base_env = BaseEnv.to_base_env(env)\n threading.Thread.__init__(self)\n self.queue = queue.Queue(5)\n self.extra_batches = queue.Queue()\n self.metrics_queue = queue.Queue()\n self.rollout_fragment_length = rollout_fragment_length\n self.horizon = horizon\n self.policies = policies\n self.policy_mapping_fn = policy_mapping_fn\n self.preprocessors = preprocessors\n self.obs_filters = obs_filters\n self.clip_rewards = clip_rewards\n self.daemon = True\n self.multiple_episodes_in_batch = multiple_episodes_in_batch\n self.tf_sess = tf_sess\n self.callbacks = callbacks\n self.clip_actions = clip_actions\n self.blackhole_outputs = blackhole_outputs\n self.soft_horizon = soft_horizon\n self.no_done_at_end = no_done_at_end\n self.perf_stats = _PerfStats()\n self.shutdown = False\n self.observation_fn = observation_fn\n self._use_trajectory_view_api = _use_trajectory_view_api\n if _use_trajectory_view_api:\n self.sample_collector = _SimpleListCollector(\n policies, clip_rewards, callbacks, multiple_episodes_in_batch,\n rollout_fragment_length)\n else:\n self.sample_collector = None\n\n @override(threading.Thread)\n def run(self):\n try:\n self._run()\n except BaseException as e:\n self.queue.put(e)\n raise e\n\n def _run(self):\n if self.blackhole_outputs:\n queue_putter = (lambda x: None)\n extra_batches_putter = (lambda x: None)\n else:\n queue_putter = self.queue.put\n extra_batches_putter = (\n lambda x: self.extra_batches.put(x, timeout=600.0))\n rollout_provider = _env_runner(\n self.worker, self.base_env, extra_batches_putter, self.policies,\n self.policy_mapping_fn, self.rollout_fragment_length, self.horizon,\n self.preprocessors, self.obs_filters, self.clip_rewards,\n self.clip_actions, self.multiple_episodes_in_batch, self.callbacks,\n self.tf_sess, self.perf_stats, self.soft_horizon,\n self.no_done_at_end, self.observation_fn,\n self._use_trajectory_view_api, self.sample_collector)\n while not self.shutdown:\n # The timeout variable exists because apparently, if one worker\n # dies, the other workers won't die with it, unless the timeout is\n # set to some large number. This is an empirical observation.\n item = next(rollout_provider)\n if isinstance(item, RolloutMetrics):\n self.metrics_queue.put(item)\n else:\n queue_putter(item)\n\n @override(SamplerInput)\n def get_data(self) -> SampleBatchType:\n if not self.is_alive():\n raise RuntimeError(\"Sampling thread has died\")\n rollout = self.queue.get(timeout=600.0)\n\n # Propagate errors.\n if isinstance(rollout, BaseException):\n raise rollout\n\n return rollout\n\n @override(SamplerInput)\n def get_metrics(self) -> List[RolloutMetrics]:\n completed = []\n while True:\n try:\n completed.append(self.metrics_queue.get_nowait()._replace(\n perf_stats=self.perf_stats.get()))\n except queue.Empty:\n break\n return completed\n\n @override(SamplerInput)\n def get_extra_batches(self) -> List[SampleBatchType]:\n extra = []\n while True:\n try:\n extra.append(self.extra_batches.get_nowait())\n except queue.Empty:\n break\n return extra\n\n\ndef _env_runner(\n worker: \"RolloutWorker\",\n base_env: BaseEnv,\n extra_batch_callback: Callable[[SampleBatchType], None],\n policies: Dict[PolicyID, Policy],\n policy_mapping_fn: Callable[[AgentID], PolicyID],\n rollout_fragment_length: int,\n horizon: int,\n preprocessors: Dict[PolicyID, Preprocessor],\n obs_filters: Dict[PolicyID, Filter],\n clip_rewards: bool,\n clip_actions: bool,\n multiple_episodes_in_batch: bool,\n callbacks: \"DefaultCallbacks\",\n tf_sess: Optional[\"tf.Session\"],\n perf_stats: _PerfStats,\n soft_horizon: bool,\n no_done_at_end: bool,\n observation_fn: \"ObservationFunction\",\n _use_trajectory_view_api: bool = False,\n _sample_collector: Optional[_SampleCollector] = None,\n) -> Iterable[SampleBatchType]:\n \"\"\"This implements the common experience collection logic.\n\n Args:\n worker (RolloutWorker): Reference to the current rollout worker.\n base_env (BaseEnv): Env implementing BaseEnv.\n extra_batch_callback (fn): function to send extra batch data to.\n policies (Dict[PolicyID, Policy]): Map of policy ids to Policy\n instances.\n policy_mapping_fn (func): Function that maps agent ids to policy ids.\n This is called when an agent first enters the environment. The\n agent is then \"bound\" to the returned policy for the episode.\n rollout_fragment_length (int): Number of episode steps before\n `SampleBatch` is yielded. Set to infinity to yield complete\n episodes.\n horizon (int): Horizon of the episode.\n preprocessors (dict): Map of policy id to preprocessor for the\n observations prior to filtering.\n obs_filters (dict): Map of policy id to filter used to process\n observations for the policy.\n clip_rewards (bool): Whether to clip rewards before postprocessing.\n multiple_episodes_in_batch (bool): Whether to pack multiple\n episodes into each batch. This guarantees batches will be exactly\n `rollout_fragment_length` in size.\n clip_actions (bool): Whether to clip actions to the space range.\n callbacks (DefaultCallbacks): User callbacks to run on episode events.\n tf_sess (Session|None): Optional tensorflow session to use for batching\n TF policy evaluations.\n perf_stats (_PerfStats): Record perf stats into this object.\n soft_horizon (bool): Calculate rewards but don't reset the\n environment when the horizon is hit.\n no_done_at_end (bool): Ignore the done=True at the end of the episode\n and instead record done=False.\n observation_fn (ObservationFunction): Optional multi-agent\n observation func to use for preprocessing observations.\n _use_trajectory_view_api (bool): Whether to use the (experimental)\n `_use_trajectory_view_api` to make generic trajectory views\n available to Models. Default: False.\n _sample_collector (Optional[_SampleCollector]): An optional\n _SampleCollector object to use\n\n Yields:\n rollout (SampleBatch): Object containing state, action, reward,\n terminal condition, and other fields as dictated by `policy`.\n \"\"\"\n\n # Try to get Env's `max_episode_steps` prop. If it doesn't exist, ignore\n # error and continue with max_episode_steps=None.\n max_episode_steps = None\n try:\n max_episode_steps = base_env.get_unwrapped()[0].spec.max_episode_steps\n except Exception:\n pass\n\n # Trainer has a given `horizon` setting.\n if horizon:\n # `horizon` is larger than env's limit -> Error and explain how\n # to increase Env's own episode limit.\n if max_episode_steps and horizon > max_episode_steps:\n raise ValueError(\n \"Your `horizon` setting ({}) is larger than the Env's own \"\n \"timestep limit ({})! Try to increase the Env's limit via \"\n \"setting its `spec.max_episode_steps` property.\".format(\n horizon, max_episode_steps))\n # Otherwise, set Trainer's horizon to env's max-steps.\n elif max_episode_steps:\n horizon = max_episode_steps\n logger.debug(\n \"No episode horizon specified, setting it to Env's limit ({}).\".\n format(max_episode_steps))\n else:\n horizon = float(\"inf\")\n logger.debug(\"No episode horizon specified, assuming inf.\")\n\n # Pool of batch builders, which can be shared across episodes to pack\n # trajectory data.\n batch_builder_pool: List[MultiAgentSampleBatchBuilder] = []\n\n def get_batch_builder():\n if batch_builder_pool:\n return batch_builder_pool.pop()\n elif _use_trajectory_view_api:\n return None\n else:\n return MultiAgentSampleBatchBuilder(policies, clip_rewards,\n callbacks)\n\n def new_episode(env_index):\n episode = MultiAgentEpisode(policies, policy_mapping_fn,\n get_batch_builder, extra_batch_callback)\n # Call each policy's Exploration.on_episode_start method.\n # type: Policy\n for p in policies.values():\n if getattr(p, \"exploration\", None) is not None:\n p.exploration.on_episode_start(\n policy=p,\n environment=base_env,\n episode=episode,\n tf_sess=getattr(p, \"_sess\", None))\n callbacks.on_episode_start(\n worker=worker,\n base_env=base_env,\n policies=policies,\n episode=episode,\n env_index=env_index,\n )\n return episode\n\n active_episodes: Dict[str, MultiAgentEpisode] = \\\n NewEpisodeDefaultDict(new_episode)\n\n while True:\n perf_stats.iters += 1\n t0 = time.time()\n # Get observations from all ready agents.\n # type: MultiEnvDict, MultiEnvDict, MultiEnvDict, MultiEnvDict, ...\n unfiltered_obs, rewards, dones, infos, off_policy_actions = \\\n base_env.poll()\n perf_stats.env_wait_time += time.time() - t0\n\n if log_once(\"env_returns\"):\n logger.info(\"Raw obs from env: {}\".format(\n summarize(unfiltered_obs)))\n logger.info(\"Info return from env: {}\".format(summarize(infos)))\n\n # Process observations and prepare for policy evaluation.\n t1 = time.time()\n # type: Set[EnvID], Dict[PolicyID, List[PolicyEvalData]],\n # List[Union[RolloutMetrics, SampleBatchType]]\n if _use_trajectory_view_api:\n active_envs, to_eval, outputs = \\\n _process_observations_w_trajectory_view_api(\n worker=worker,\n base_env=base_env,\n policies=policies,\n active_episodes=active_episodes,\n unfiltered_obs=unfiltered_obs,\n rewards=rewards,\n dones=dones,\n infos=infos,\n horizon=horizon,\n preprocessors=preprocessors,\n obs_filters=obs_filters,\n multiple_episodes_in_batch=multiple_episodes_in_batch,\n callbacks=callbacks,\n soft_horizon=soft_horizon,\n no_done_at_end=no_done_at_end,\n observation_fn=observation_fn,\n _sample_collector=_sample_collector,\n )\n else:\n active_envs, to_eval, outputs = _process_observations(\n worker=worker,\n base_env=base_env,\n policies=policies,\n batch_builder_pool=batch_builder_pool,\n active_episodes=active_episodes,\n unfiltered_obs=unfiltered_obs,\n rewards=rewards,\n dones=dones,\n infos=infos,\n horizon=horizon,\n preprocessors=preprocessors,\n obs_filters=obs_filters,\n rollout_fragment_length=rollout_fragment_length,\n multiple_episodes_in_batch=multiple_episodes_in_batch,\n callbacks=callbacks,\n soft_horizon=soft_horizon,\n no_done_at_end=no_done_at_end,\n observation_fn=observation_fn,\n )\n perf_stats.raw_obs_processing_time += time.time() - t1\n for o in outputs:\n yield o\n\n # Do batched policy eval (accross vectorized envs).\n t2 = time.time()\n # type: Dict[PolicyID, Tuple[TensorStructType, StateBatch, dict]]\n if _use_trajectory_view_api:\n eval_results = _do_policy_eval_w_trajectory_view_api(\n to_eval=to_eval,\n policies=policies,\n _sample_collector=_sample_collector,\n active_episodes=active_episodes,\n tf_sess=tf_sess,\n )\n else:\n eval_results = _do_policy_eval(\n to_eval=to_eval,\n policies=policies,\n active_episodes=active_episodes,\n tf_sess=tf_sess,\n )\n perf_stats.inference_time += time.time() - t2\n\n # Process results and update episode state.\n t3 = time.time()\n actions_to_send: Dict[EnvID, Dict[AgentID, EnvActionType]] = \\\n _process_policy_eval_results(\n to_eval=to_eval,\n eval_results=eval_results,\n active_episodes=active_episodes,\n active_envs=active_envs,\n off_policy_actions=off_policy_actions,\n policies=policies,\n clip_actions=clip_actions,\n _use_trajectory_view_api=_use_trajectory_view_api,\n _sample_collector=_sample_collector,\n )\n perf_stats.action_processing_time += time.time() - t3\n\n # Return computed actions to ready envs. We also send to envs that have\n # taken off-policy actions; those envs are free to ignore the action.\n t4 = time.time()\n base_env.send_actions(actions_to_send)\n perf_stats.env_wait_time += time.time() - t4\n\n\ndef _process_observations(\n *,\n worker: \"RolloutWorker\",\n base_env: BaseEnv,\n policies: Dict[PolicyID, Policy],\n batch_builder_pool: List[MultiAgentSampleBatchBuilder],\n active_episodes: Dict[str, MultiAgentEpisode],\n unfiltered_obs: Dict[EnvID, Dict[AgentID, EnvObsType]],\n rewards: Dict[EnvID, Dict[AgentID, float]],\n dones: Dict[EnvID, Dict[AgentID, bool]],\n infos: Dict[EnvID, Dict[AgentID, EnvInfoDict]],\n horizon: int,\n preprocessors: Dict[PolicyID, Preprocessor],\n obs_filters: Dict[PolicyID, Filter],\n rollout_fragment_length: int,\n multiple_episodes_in_batch: bool,\n callbacks: \"DefaultCallbacks\",\n soft_horizon: bool,\n no_done_at_end: bool,\n observation_fn: \"ObservationFunction\",\n) -> Tuple[Set[EnvID], Dict[PolicyID, List[PolicyEvalData]], List[Union[\n RolloutMetrics, SampleBatchType]]]:\n \"\"\"Record new data from the environment and prepare for policy evaluation.\n\n Args:\n worker (RolloutWorker): Reference to the current rollout worker.\n base_env (BaseEnv): Env implementing BaseEnv.\n policies (dict): Map of policy ids to Policy instances.\n batch_builder_pool (List[SampleBatchBuilder]): List of pooled\n SampleBatchBuilder object for recycling.\n active_episodes (Dict[str, MultiAgentEpisode]): Mapping from\n episode ID to currently ongoing MultiAgentEpisode object.\n unfiltered_obs (dict): Doubly keyed dict of env-ids -> agent ids\n -> unfiltered observation tensor, returned by a `BaseEnv.poll()`\n call.\n rewards (dict): Doubly keyed dict of env-ids -> agent ids ->\n rewards tensor, returned by a `BaseEnv.poll()` call.\n dones (dict): Doubly keyed dict of env-ids -> agent ids ->\n boolean done flags, returned by a `BaseEnv.poll()` call.\n infos (dict): Doubly keyed dict of env-ids -> agent ids ->\n info dicts, returned by a `BaseEnv.poll()` call.\n horizon (int): Horizon of the episode.\n preprocessors (dict): Map of policy id to preprocessor for the\n observations prior to filtering.\n obs_filters (dict): Map of policy id to filter used to process\n observations for the policy.\n rollout_fragment_length (int): Number of episode steps before\n `SampleBatch` is yielded. Set to infinity to yield complete\n episodes.\n multiple_episodes_in_batch (bool): Whether to pack multiple\n episodes into each batch. This guarantees batches will be exactly\n `rollout_fragment_length` in size.\n callbacks (DefaultCallbacks): User callbacks to run on episode events.\n soft_horizon (bool): Calculate rewards but don't reset the\n environment when the horizon is hit.\n no_done_at_end (bool): Ignore the done=True at the end of the episode\n and instead record done=False.\n observation_fn (ObservationFunction): Optional multi-agent\n observation func to use for preprocessing observations.\n\n Returns:\n Tuple:\n - active_envs: Set of non-terminated env ids.\n - to_eval: Map of policy_id to list of agent PolicyEvalData.\n - outputs: List of metrics and samples to return from the sampler.\n \"\"\"\n\n # Output objects.\n active_envs: Set[EnvID] = set()\n to_eval: Dict[PolicyID, List[PolicyEvalData]] = defaultdict(list)\n outputs: List[Union[RolloutMetrics, SampleBatchType]] = []\n\n large_batch_threshold: int = max(1000, rollout_fragment_length * 10) if \\\n rollout_fragment_length != float(\"inf\") else 5000\n\n # For each environment.\n # type: EnvID, Dict[AgentID, EnvObsType]\n for env_id, agent_obs in unfiltered_obs.items():\n is_new_episode: bool = env_id not in active_episodes\n episode: MultiAgentEpisode = active_episodes[env_id]\n batch_builder = episode.batch_builder\n if not is_new_episode:\n episode.length += 1\n batch_builder.count += 1\n episode._add_agent_rewards(rewards[env_id])\n\n if (batch_builder.total() > large_batch_threshold\n and log_once(\"large_batch_warning\")):\n logger.warning(\n \"More than {} observations for {} env steps \".format(\n batch_builder.total(), batch_builder.count) +\n \"are buffered in \"\n \"the sampler. If this is more than you expected, check that \"\n \"that you set a horizon on your environment correctly and that\"\n \" it terminates at some point. \"\n \"Note: In multi-agent environments, `rollout_fragment_length` \"\n \"sets the batch size based on environment steps, not the \"\n \"steps of \"\n \"individual agents, which can result in unexpectedly large \"\n \"batches. Also, you may be in evaluation waiting for your Env \"\n \"to terminate (batch_mode=`complete_episodes`). Make sure it \"\n \"does at some point.\")\n\n # Check episode termination conditions.\n if dones[env_id][\"__all__\"] or episode.length >= horizon:\n hit_horizon = (episode.length >= horizon\n and not dones[env_id][\"__all__\"])\n all_agents_done = True\n atari_metrics: List[RolloutMetrics] = _fetch_atari_metrics(\n base_env)\n if atari_metrics is not None:\n for m in atari_metrics:\n outputs.append(\n m._replace(custom_metrics=episode.custom_metrics))\n else:\n outputs.append(\n RolloutMetrics(episode.length, episode.total_reward,\n dict(episode.agent_rewards),\n episode.custom_metrics, {},\n episode.hist_data))\n else:\n hit_horizon = False\n all_agents_done = False\n active_envs.add(env_id)\n\n # Custom observation function is applied before preprocessing.\n if observation_fn:\n agent_obs: Dict[AgentID, EnvObsType] = observation_fn(\n agent_obs=agent_obs,\n worker=worker,\n base_env=base_env,\n policies=policies,\n episode=episode)\n if not isinstance(agent_obs, dict):\n raise ValueError(\n \"observe() must return a dict of agent observations\")\n\n # For each agent in the environment.\n # type: AgentID, EnvObsType\n for agent_id, raw_obs in agent_obs.items():\n assert agent_id != \"__all__\"\n policy_id: PolicyID = episode.policy_for(agent_id)\n prep_obs: EnvObsType = _get_or_raise(preprocessors,\n policy_id).transform(raw_obs)\n if log_once(\"prep_obs\"):\n logger.info(\"Preprocessed obs: {}\".format(summarize(prep_obs)))\n\n filtered_obs: EnvObsType = _get_or_raise(obs_filters,\n policy_id)(prep_obs)\n if log_once(\"filtered_obs\"):\n logger.info(\"Filtered obs: {}\".format(summarize(filtered_obs)))\n\n agent_done = bool(all_agents_done or dones[env_id].get(agent_id))\n if not agent_done:\n item = PolicyEvalData(env_id, agent_id, filtered_obs,\n infos[env_id].get(agent_id, {}),\n episode.rnn_state_for(agent_id),\n episode.last_action_for(agent_id),\n rewards[env_id][agent_id] or 0.0)\n to_eval[policy_id].append(item)\n\n last_observation: EnvObsType = episode.last_observation_for(\n agent_id)\n episode._set_last_observation(agent_id, filtered_obs)\n episode._set_last_raw_obs(agent_id, raw_obs)\n episode._set_last_info(agent_id, infos[env_id].get(agent_id, {}))\n\n # Record transition info if applicable.\n if (last_observation is not None and infos[env_id].get(\n agent_id, {}).get(\"training_enabled\", True)):\n batch_builder.add_values(\n agent_id,\n policy_id,\n t=episode.length - 1,\n eps_id=episode.episode_id,\n agent_index=episode._agent_index(agent_id),\n obs=last_observation,\n actions=episode.last_action_for(agent_id),\n rewards=rewards[env_id][agent_id],\n prev_actions=episode.prev_action_for(agent_id),\n prev_rewards=episode.prev_reward_for(agent_id),\n dones=(False if (no_done_at_end\n or (hit_horizon and soft_horizon)) else\n agent_done),\n infos=infos[env_id].get(agent_id, {}),\n new_obs=filtered_obs,\n **episode.last_pi_info_for(agent_id))\n\n # Invoke the step callback after the step is logged to the episode\n callbacks.on_episode_step(\n worker=worker,\n base_env=base_env,\n episode=episode,\n env_index=env_id)\n\n # Cut the batch if ...\n # - all-agents-done and not packing multiple episodes into one\n # (batch_mode=\"complete_episodes\")\n # - or if we've exceeded the rollout_fragment_length.\n if batch_builder.has_pending_agent_data():\n # Sanity check, whether all agents have done=True, if done[__all__]\n # is True.\n if dones[env_id][\"__all__\"] and not no_done_at_end:\n batch_builder.check_missing_dones()\n\n # Reached end of episode and we are not allowed to pack the\n # next episode into the same SampleBatch -> Build the SampleBatch\n # and add it to \"outputs\".\n if (all_agents_done and not multiple_episodes_in_batch) or \\\n batch_builder.count >= rollout_fragment_length:\n batch_builder.postprocess_batch_so_far(episode)\n outputs.append(batch_builder.build_and_reset(episode))\n # Make sure postprocessor stays within one episode.\n elif all_agents_done:\n batch_builder.postprocess_batch_so_far(episode)\n\n # Episode is done.\n if all_agents_done:\n # We can pass the BatchBuilder to recycling.\n batch_builder_pool.append(batch_builder)\n # Call each policy's Exploration.on_episode_end method.\n for p in policies.values():\n if getattr(p, \"exploration\", None) is not None:\n p.exploration.on_episode_end(\n policy=p,\n environment=base_env,\n episode=episode,\n tf_sess=getattr(p, \"_sess\", None))\n # Call custom on_episode_end callback.\n callbacks.on_episode_end(\n worker=worker,\n base_env=base_env,\n policies=policies,\n episode=episode,\n env_index=env_id,\n )\n # Horizon hit and we have a soft horizon (no hard env reset).\n if hit_horizon and soft_horizon:\n episode.soft_reset()\n resetted_obs: Dict[AgentID, EnvObsType] = agent_obs\n # Env actually ended OR horizon hit and no soft horizon ->\n # Try hard env-reset.\n else:\n # Remove episode from active ones.\n del active_episodes[env_id]\n resetted_obs: Dict[AgentID, EnvObsType] = base_env.try_reset(\n env_id)\n if resetted_obs is None:\n # Reset not supported, drop this env from the ready list.\n if horizon != float(\"inf\"):\n raise ValueError(\n \"Setting episode horizon requires reset() support \"\n \"from the environment.\")\n elif resetted_obs != ASYNC_RESET_RETURN:\n # Creates a new episode if this is not async return.\n # If reset is async, we will get its result in some future poll\n episode: MultiAgentEpisode = active_episodes[env_id]\n if observation_fn:\n resetted_obs: Dict[AgentID, EnvObsType] = observation_fn(\n agent_obs=resetted_obs,\n worker=worker,\n base_env=base_env,\n policies=policies,\n episode=episode)\n # type: AgentID, EnvObsType\n for agent_id, raw_obs in resetted_obs.items():\n policy_id: PolicyID = episode.policy_for(agent_id)\n policy: Policy = _get_or_raise(policies, policy_id)\n prep_obs: EnvObsType = _get_or_raise(\n preprocessors, policy_id).transform(raw_obs)\n filtered_obs: EnvObsType = _get_or_raise(\n obs_filters, policy_id)(prep_obs)\n episode._set_last_observation(agent_id, filtered_obs)\n\n item = PolicyEvalData(\n env_id, agent_id, filtered_obs,\n episode.last_info_for(agent_id) or {},\n episode.rnn_state_for(agent_id),\n np.zeros_like(\n flatten_to_single_ndarray(\n policy.action_space.sample())), 0.0)\n to_eval[policy_id].append(item)\n\n return active_envs, to_eval, outputs\n\n\ndef _process_observations_w_trajectory_view_api(\n *,\n worker: \"RolloutWorker\",\n base_env: BaseEnv,\n policies: Dict[PolicyID, Policy],\n active_episodes: Dict[str, MultiAgentEpisode],\n unfiltered_obs: Dict[EnvID, Dict[AgentID, EnvObsType]],\n rewards: Dict[EnvID, Dict[AgentID, float]],\n dones: Dict[EnvID, Dict[AgentID, bool]],\n infos: Dict[EnvID, Dict[AgentID, EnvInfoDict]],\n horizon: int,\n preprocessors: Dict[PolicyID, Preprocessor],\n obs_filters: Dict[PolicyID, Filter],\n multiple_episodes_in_batch: bool,\n callbacks: \"DefaultCallbacks\",\n soft_horizon: bool,\n no_done_at_end: bool,\n observation_fn: \"ObservationFunction\",\n _sample_collector: _SampleCollector,\n) -> Tuple[Set[EnvID], Dict[PolicyID, List[PolicyEvalData]], List[Union[\n RolloutMetrics, SampleBatchType]]]:\n \"\"\"Trajectory View API version of `_process_observations()`.\n TODO: (sven) Move docstring here once original function is deprecated.\n \"\"\"\n\n # Output objects.\n active_envs: Set[EnvID] = set()\n to_eval: Dict[PolicyID, List[PolicyEvalData]] = defaultdict(list)\n outputs: List[Union[RolloutMetrics, SampleBatchType]] = []\n\n # For each (vectorized) sub-environment.\n # type: EnvID, Dict[AgentID, EnvObsType]\n for env_id, all_agents_obs in unfiltered_obs.items():\n is_new_episode: bool = env_id not in active_episodes\n episode: MultiAgentEpisode = active_episodes[env_id]\n\n if not is_new_episode:\n _sample_collector.episode_step(episode.episode_id)\n episode.length += 1\n episode._add_agent_rewards(rewards[env_id])\n\n # Check episode termination conditions.\n if dones[env_id][\"__all__\"] or episode.length >= horizon:\n hit_horizon = (episode.length >= horizon\n and not dones[env_id][\"__all__\"])\n all_agents_done = True\n atari_metrics: List[RolloutMetrics] = _fetch_atari_metrics(\n base_env)\n if atari_metrics is not None:\n for m in atari_metrics:\n outputs.append(\n m._replace(custom_metrics=episode.custom_metrics))\n else:\n outputs.append(\n RolloutMetrics(episode.length, episode.total_reward,\n dict(episode.agent_rewards),\n episode.custom_metrics, {},\n episode.hist_data))\n else:\n hit_horizon = False\n all_agents_done = False\n active_envs.add(env_id)\n\n # Custom observation function is applied before preprocessing.\n if observation_fn:\n all_agents_obs: Dict[AgentID, EnvObsType] = observation_fn(\n agent_obs=all_agents_obs,\n worker=worker,\n base_env=base_env,\n policies=policies,\n episode=episode)\n if not isinstance(all_agents_obs, dict):\n raise ValueError(\n \"observe() must return a dict of agent observations\")\n\n # For each agent in the environment.\n # type: AgentID, EnvObsType\n for agent_id, raw_obs in all_agents_obs.items():\n assert agent_id != \"__all__\"\n policy_id: PolicyID = episode.policy_for(agent_id)\n prep_obs: EnvObsType = _get_or_raise(preprocessors,\n policy_id).transform(raw_obs)\n if log_once(\"prep_obs\"):\n logger.info(\"Preprocessed obs: {}\".format(summarize(prep_obs)))\n\n filtered_obs: EnvObsType = _get_or_raise(obs_filters,\n policy_id)(prep_obs)\n if log_once(\"filtered_obs\"):\n logger.info(\"Filtered obs: {}\".format(summarize(filtered_obs)))\n\n agent_done = bool(all_agents_done or dones[env_id].get(agent_id))\n\n last_observation: EnvObsType = episode.last_observation_for(\n agent_id)\n episode._set_last_observation(agent_id, filtered_obs)\n episode._set_last_raw_obs(agent_id, raw_obs)\n episode._set_last_info(agent_id, infos[env_id].get(agent_id, {}))\n\n # Record transition info if applicable.\n if last_observation is None:\n _sample_collector.add_init_obs(episode, agent_id, env_id,\n policy_id, filtered_obs)\n else:\n # Add actions, rewards, next-obs to collectors.\n values_dict = {\n \"t\": episode.length - 1,\n \"eps_id\": episode.episode_id,\n \"env_id\": env_id,\n \"agent_index\": episode._agent_index(agent_id),\n # Action (slot 0) taken at timestep t.\n \"actions\": episode.last_action_for(agent_id),\n # Reward received after taking a at timestep t.\n \"rewards\": rewards[env_id][agent_id],\n # After taking action=a, did we reach terminal?\n \"dones\": (False if (no_done_at_end\n or (hit_horizon and soft_horizon)) else\n agent_done),\n # Next observation.\n \"new_obs\": filtered_obs,\n }\n # Add extra-action-fetches to collectors.\n values_dict.update(**episode.last_pi_info_for(agent_id))\n _sample_collector.add_action_reward_next_obs(\n episode.episode_id, agent_id, env_id, policy_id,\n agent_done, values_dict)\n\n if not agent_done:\n item = PolicyEvalData(\n env_id, agent_id, filtered_obs, infos[env_id].get(\n agent_id, {}), None if last_observation is None else\n episode.rnn_state_for(agent_id), None\n if last_observation is None else\n episode.last_action_for(agent_id),\n rewards[env_id][agent_id] or 0.0)\n to_eval[policy_id].append(item)\n\n # Invoke the step callback after the step is logged to the episode\n callbacks.on_episode_step(\n worker=worker,\n base_env=base_env,\n episode=episode,\n env_index=env_id)\n\n # Episode is done for all agents\n # (dones[__all__] == True or hit horizon).\n # Make sure postprocessor stays within one episode.\n if all_agents_done:\n is_done = dones[env_id][\"__all__\"]\n check_dones = is_done and not no_done_at_end\n _sample_collector.postprocess_episode(\n episode, is_done=is_done, check_dones=check_dones)\n # We are not allowed to pack the next episode into the same\n # SampleBatch (batch_mode=complete_episodes) -> Build the\n # MultiAgentBatch from a single episode and add it to \"outputs\".\n if not multiple_episodes_in_batch:\n ma_sample_batch = \\\n _sample_collector.build_multi_agent_batch(episode.length)\n outputs.append(ma_sample_batch)\n\n # Call each policy's Exploration.on_episode_end method.\n for p in policies.values():\n if getattr(p, \"exploration\", None) is not None:\n p.exploration.on_episode_end(\n policy=p,\n environment=base_env,\n episode=episode,\n tf_sess=getattr(p, \"_sess\", None))\n # Call custom on_episode_end callback.\n callbacks.on_episode_end(\n worker=worker,\n base_env=base_env,\n policies=policies,\n episode=episode,\n env_index=env_id,\n )\n # Horizon hit and we have a soft horizon (no hard env reset).\n if hit_horizon and soft_horizon:\n episode.soft_reset()\n resetted_obs: Dict[AgentID, EnvObsType] = all_agents_obs\n else:\n del active_episodes[env_id]\n resetted_obs: Dict[AgentID, EnvObsType] = base_env.try_reset(\n env_id)\n # Reset not supported, drop this env from the ready list.\n if resetted_obs is None:\n if horizon != float(\"inf\"):\n raise ValueError(\n \"Setting episode horizon requires reset() support \"\n \"from the environment.\")\n # Creates a new episode if this is not async return.\n # If reset is async, we will get its result in some future poll.\n elif resetted_obs != ASYNC_RESET_RETURN:\n new_episode: MultiAgentEpisode = active_episodes[env_id]\n if observation_fn:\n resetted_obs: Dict[AgentID, EnvObsType] = observation_fn(\n agent_obs=resetted_obs,\n worker=worker,\n base_env=base_env,\n policies=policies,\n episode=new_episode)\n # type: AgentID, EnvObsType\n for agent_id, raw_obs in resetted_obs.items():\n policy_id: PolicyID = new_episode.policy_for(agent_id)\n prep_obs: EnvObsType = _get_or_raise(\n preprocessors, policy_id).transform(raw_obs)\n filtered_obs: EnvObsType = _get_or_raise(\n obs_filters, policy_id)(prep_obs)\n new_episode._set_last_observation(agent_id, filtered_obs)\n\n # Add initial obs to buffer.\n _sample_collector.add_init_obs(\n new_episode, agent_id, env_id, policy_id, filtered_obs)\n\n item = PolicyEvalData(\n env_id, agent_id, filtered_obs,\n episode.last_info_for(agent_id) or {},\n episode.rnn_state_for(agent_id), None, 0.0)\n to_eval[policy_id].append(item)\n\n # Try to build something.\n if multiple_episodes_in_batch:\n sample_batch = \\\n _sample_collector.try_build_truncated_episode_multi_agent_batch()\n if sample_batch is not None:\n outputs.append(sample_batch)\n\n return active_envs, to_eval, outputs\n\n\ndef _do_policy_eval(\n *,\n to_eval: Dict[PolicyID, List[PolicyEvalData]],\n policies: Dict[PolicyID, Policy],\n active_episodes: Dict[str, MultiAgentEpisode],\n tf_sess=None,\n) -> Dict[PolicyID, Tuple[TensorStructType, StateBatch, dict]]:\n \"\"\"Call compute_actions on collected episode/model data to get next action.\n\n Args:\n to_eval (Dict[PolicyID, List[PolicyEvalData]]): Mapping of policy\n IDs to lists of PolicyEvalData objects (items in these lists will\n be the batch's items for the model forward pass).\n policies (Dict[PolicyID, Policy]): Mapping from policy ID to Policy\n obj.\n active_episodes (defaultdict[str,MultiAgentEpisode]): Mapping from\n episode ID to currently ongoing MultiAgentEpisode object.\n tf_sess (Optional[tf.Session]): Optional tensorflow session to use for\n batching TF policy evaluations.\n\n Returns:\n eval_results: dict of policy to compute_action() outputs.\n \"\"\"\n\n eval_results: Dict[PolicyID, TensorStructType] = {}\n\n if tf_sess:\n builder = TFRunBuilder(tf_sess, \"policy_eval\")\n pending_fetches: Dict[PolicyID, Any] = {}\n else:\n builder = None\n\n if log_once(\"compute_actions_input\"):\n logger.info(\"Inputs to compute_actions():\\n\\n{}\\n\".format(\n summarize(to_eval)))\n\n # type: PolicyID, PolicyEvalData\n for policy_id, eval_data in to_eval.items():\n policy: Policy = _get_or_raise(policies, policy_id)\n # If tf (non eager) AND TFPolicy's compute_action method has not\n # been overridden -> Use `policy._build_compute_actions()`.\n if builder and (policy.compute_actions.__code__ is\n TFPolicy.compute_actions.__code__):\n\n obs_batch: List[EnvObsType] = [t.obs for t in eval_data]\n state_batches: StateBatch = _to_column_format(\n [t.rnn_state for t in eval_data])\n # TODO(ekl): how can we make info batch available to TF code?\n prev_action_batch = [t.prev_action for t in eval_data]\n prev_reward_batch = [t.prev_reward for t in eval_data]\n\n pending_fetches[policy_id] = policy._build_compute_actions(\n builder,\n obs_batch=obs_batch,\n state_batches=state_batches,\n prev_action_batch=prev_action_batch,\n prev_reward_batch=prev_reward_batch,\n timestep=policy.global_timestep)\n else:\n rnn_in = [t.rnn_state for t in eval_data]\n rnn_in_cols: StateBatch = [\n np.stack([row[i] for row in rnn_in])\n for i in range(len(rnn_in[0]))\n ]\n eval_results[policy_id] = policy.compute_actions(\n [t.obs for t in eval_data],\n state_batches=rnn_in_cols,\n prev_action_batch=[t.prev_action for t in eval_data],\n prev_reward_batch=[t.prev_reward for t in eval_data],\n info_batch=[t.info for t in eval_data],\n episodes=[active_episodes[t.env_id] for t in eval_data],\n timestep=policy.global_timestep)\n\n if builder:\n # type: PolicyID, Tuple[TensorStructType, StateBatch, dict]\n for pid, v in pending_fetches.items():\n eval_results[pid] = builder.get(v)\n\n if log_once(\"compute_actions_result\"):\n logger.info(\"Outputs of compute_actions():\\n\\n{}\\n\".format(\n summarize(eval_results)))\n\n return eval_results\n\n\ndef _do_policy_eval_w_trajectory_view_api(\n *,\n to_eval: Dict[PolicyID, List[PolicyEvalData]],\n policies: Dict[PolicyID, Policy],\n _sample_collector,\n active_episodes: Dict[str, MultiAgentEpisode],\n tf_sess: Optional[\"tf.Session\"] = None,\n) -> Dict[PolicyID, Tuple[TensorStructType, StateBatch, dict]]:\n \"\"\"Call compute_actions on collected episode/model data to get next action.\n\n Args:\n to_eval (Dict[PolicyID, List[PolicyEvalData]]): Mapping of policy\n IDs to lists of PolicyEvalData objects (items in these lists will\n be the batch's items for the model forward pass).\n policies (Dict[PolicyID, Policy]): Mapping from policy ID to Policy\n obj.\n _sample_collector (SampleCollector): The SampleCollector object to use.\n tf_sess (Optional[tf.Session]): Optional tensorflow session to use for\n batching TF policy evaluations.\n\n Returns:\n eval_results: dict of policy to compute_action() outputs.\n \"\"\"\n\n eval_results: Dict[PolicyID, TensorStructType] = {}\n\n if tf_sess:\n builder = TFRunBuilder(tf_sess, \"policy_eval\")\n pending_fetches: Dict[PolicyID, Any] = {}\n else:\n builder = None\n\n if log_once(\"compute_actions_input\"):\n logger.info(\"Inputs to compute_actions():\\n\\n{}\\n\".format(\n summarize(to_eval)))\n\n for policy_id, eval_data in to_eval.items():\n policy: Policy = _get_or_raise(policies, policy_id)\n input_dict = _sample_collector.get_inference_input_dict(policy_id)\n eval_results[policy_id] = \\\n policy.compute_actions_from_input_dict(\n input_dict,\n timestep=policy.global_timestep,\n episodes=[active_episodes[t.env_id] for t in eval_data])\n\n if builder:\n # type: PolicyID, Tuple[TensorStructType, StateBatch, dict]\n for pid, v in pending_fetches.items():\n eval_results[pid] = builder.get(v)\n\n if log_once(\"compute_actions_result\"):\n logger.info(\"Outputs of compute_actions():\\n\\n{}\\n\".format(\n summarize(eval_results)))\n\n return eval_results\n\n\ndef _process_policy_eval_results(\n *,\n to_eval: Dict[PolicyID, List[PolicyEvalData]],\n eval_results: Dict[PolicyID, Tuple[TensorStructType, StateBatch,\n dict]],\n active_episodes: Dict[str, MultiAgentEpisode],\n active_envs: Set[int],\n off_policy_actions: MultiEnvDict,\n policies: Dict[PolicyID, Policy],\n clip_actions: bool,\n _use_trajectory_view_api: bool = False,\n _sample_collector=None,\n) -> Dict[EnvID, Dict[AgentID, EnvActionType]]:\n \"\"\"Process the output of policy neural network evaluation.\n\n Records policy evaluation results into the given episode objects and\n returns replies to send back to agents in the env.\n\n Args:\n to_eval (Dict[PolicyID, List[PolicyEvalData]]): Mapping of policy IDs\n to lists of PolicyEvalData objects.\n eval_results (Dict[PolicyID, List]): Mapping of policy IDs to list of\n actions, rnn-out states, extra-action-fetches dicts.\n active_episodes (Dict[str, MultiAgentEpisode]): Mapping from\n episode ID to currently ongoing MultiAgentEpisode object.\n active_envs (Set[int]): Set of non-terminated env ids.\n off_policy_actions (dict): Doubly keyed dict of env-ids -> agent ids ->\n off-policy-action, returned by a `BaseEnv.poll()` call.\n policies (Dict[PolicyID, Policy]): Mapping from policy ID to Policy.\n clip_actions (bool): Whether to clip actions to the action space's\n bounds.\n _use_trajectory_view_api (bool): Whether to use the (experimental)\n `_use_trajectory_view_api` to make generic trajectory views\n available to Models. Default: False.\n\n Returns:\n actions_to_send: Nested dict of env id -> agent id -> actions to be\n sent to Env (np.ndarrays).\n \"\"\"\n\n actions_to_send: Dict[EnvID, Dict[AgentID, EnvActionType]] = \\\n defaultdict(dict)\n\n # type: int\n for env_id in active_envs:\n actions_to_send[env_id] = {} # at minimum send empty dict\n\n # type: PolicyID, List[PolicyEvalData]\n for policy_id, eval_data in to_eval.items():\n actions: TensorStructType = eval_results[policy_id][0]\n actions = convert_to_numpy(actions)\n\n rnn_out_cols: StateBatch = eval_results[policy_id][1]\n pi_info_cols: dict = eval_results[policy_id][2]\n\n # In case actions is a list (representing the 0th dim of a batch of\n # primitive actions), try to convert it first.\n if isinstance(actions, list):\n actions = np.array(actions)\n\n # Store RNN state ins/outs and extra-action fetches to episode.\n if _use_trajectory_view_api:\n for f_i, column in enumerate(rnn_out_cols):\n pi_info_cols[\"state_out_{}\".format(f_i)] = column\n else:\n rnn_in_cols: StateBatch = _to_column_format(\n [t.rnn_state for t in eval_data])\n\n if len(rnn_in_cols) != len(rnn_out_cols):\n raise ValueError(\n \"Length of RNN in did not match RNN out, got: \"\n \"{} vs {}\".format(rnn_in_cols, rnn_out_cols))\n for f_i, column in enumerate(rnn_in_cols):\n pi_info_cols[\"state_in_{}\".format(f_i)] = column\n for f_i, column in enumerate(rnn_out_cols):\n pi_info_cols[\"state_out_{}\".format(f_i)] = column\n\n policy: Policy = _get_or_raise(policies, policy_id)\n # Split action-component batches into single action rows.\n actions: List[EnvActionType] = unbatch(actions)\n # type: int, EnvActionType\n for i, action in enumerate(actions):\n # Clip if necessary.\n if clip_actions:\n clipped_action = clip_action(action,\n policy.action_space_struct)\n else:\n clipped_action = action\n\n env_id: int = eval_data[i].env_id\n agent_id: AgentID = eval_data[i].agent_id\n episode: MultiAgentEpisode = active_episodes[env_id]\n episode._set_rnn_state(agent_id, [c[i] for c in rnn_out_cols])\n episode._set_last_pi_info(\n agent_id, {k: v[i]\n for k, v in pi_info_cols.items()})\n if env_id in off_policy_actions and \\\n agent_id in off_policy_actions[env_id]:\n episode._set_last_action(agent_id,\n off_policy_actions[env_id][agent_id])\n else:\n episode._set_last_action(agent_id, action)\n\n assert agent_id not in actions_to_send[env_id]\n actions_to_send[env_id][agent_id] = clipped_action\n\n return actions_to_send\n\n\ndef _fetch_atari_metrics(base_env: BaseEnv) -> List[RolloutMetrics]:\n \"\"\"Atari games have multiple logical episodes, one per life.\n\n However, for metrics reporting we count full episodes, all lives included.\n \"\"\"\n unwrapped = base_env.get_unwrapped()\n if not unwrapped:\n return None\n atari_out = []\n for u in unwrapped:\n monitor = get_wrapper_by_cls(u, MonitorEnv)\n if not monitor:\n return None\n for eps_rew, eps_len in monitor.next_episode_results():\n atari_out.append(RolloutMetrics(eps_len, eps_rew))\n return atari_out\n\n\ndef _to_column_format(rnn_state_rows: List[List[Any]]) -> StateBatch:\n num_cols = len(rnn_state_rows[0])\n return [[row[i] for row in rnn_state_rows] for i in range(num_cols)]\n\n\ndef _get_or_raise(mapping: Dict[PolicyID, Union[Policy, Preprocessor, Filter]],\n policy_id: PolicyID) -> Union[Policy, Preprocessor, Filter]:\n \"\"\"Returns an object under key `policy_id` in `mapping`.\n\n Args:\n mapping (Dict[PolicyID, Union[Policy, Preprocessor, Filter]]): The\n mapping dict from policy id (str) to actual object (Policy,\n Preprocessor, etc.).\n policy_id (str): The policy ID to lookup.\n\n Returns:\n Union[Policy, Preprocessor, Filter]: The found object.\n\n Raises:\n ValueError: If `policy_id` cannot be found in `mapping`.\n \"\"\"\n if policy_id not in mapping:\n raise ValueError(\n \"Could not find policy for agent: agent policy id `{}` not \"\n \"in policy map keys {}.\".format(policy_id, mapping.keys()))\n return mapping[policy_id]\n"
] | [
[
"numpy.array",
"numpy.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Corallus-Caninus/scipy | [
"c734dacd61c5962a86ab3cc4bf2891fc94b720a6"
] | [
"scipy/ndimage/tests/test_fourier.py"
] | [
"import numpy\nfrom numpy import fft\nfrom numpy.testing import (assert_almost_equal, assert_array_almost_equal)\n\nimport pytest\n\nfrom scipy import ndimage\n\n\nclass TestNdimageFourier:\n\n @pytest.mark.parametrize('shape', [(32, 16), (31, 15)])\n @pytest.mark.parametrize('dtype, dec',\n [(numpy.float32, 6), (numpy.float64, 14)])\n def test_fourier_gaussian_real01(self, shape, dtype, dec):\n a = numpy.zeros(shape, dtype)\n a[0, 0] = 1.0\n a = fft.rfft(a, shape[0], 0)\n a = fft.fft(a, shape[1], 1)\n a = ndimage.fourier_gaussian(a, [5.0, 2.5], shape[0], 0)\n a = fft.ifft(a, shape[1], 1)\n a = fft.irfft(a, shape[0], 0)\n assert_almost_equal(ndimage.sum(a), 1, decimal=dec)\n\n @pytest.mark.parametrize('shape', [(32, 16), (31, 15)])\n @pytest.mark.parametrize('dtype, dec',\n [(numpy.complex64, 6), (numpy.complex128, 14)])\n def test_fourier_gaussian_complex01(self, shape, dtype, dec):\n a = numpy.zeros(shape, dtype)\n a[0, 0] = 1.0\n a = fft.fft(a, shape[0], 0)\n a = fft.fft(a, shape[1], 1)\n a = ndimage.fourier_gaussian(a, [5.0, 2.5], -1, 0)\n a = fft.ifft(a, shape[1], 1)\n a = fft.ifft(a, shape[0], 0)\n assert_almost_equal(ndimage.sum(a.real), 1.0, decimal=dec)\n\n @pytest.mark.parametrize('shape', [(32, 16), (31, 15)])\n @pytest.mark.parametrize('dtype, dec',\n [(numpy.float32, 6), (numpy.float64, 14)])\n def test_fourier_uniform_real01(self, shape, dtype, dec):\n a = numpy.zeros(shape, dtype)\n a[0, 0] = 1.0\n a = fft.rfft(a, shape[0], 0)\n a = fft.fft(a, shape[1], 1)\n a = ndimage.fourier_uniform(a, [5.0, 2.5], shape[0], 0)\n a = fft.ifft(a, shape[1], 1)\n a = fft.irfft(a, shape[0], 0)\n assert_almost_equal(ndimage.sum(a), 1.0, decimal=dec)\n\n @pytest.mark.parametrize('shape', [(32, 16), (31, 15)])\n @pytest.mark.parametrize('dtype, dec',\n [(numpy.complex64, 6), (numpy.complex128, 14)])\n def test_fourier_uniform_complex01(self, shape, dtype, dec):\n a = numpy.zeros(shape, dtype)\n a[0, 0] = 1.0\n a = fft.fft(a, shape[0], 0)\n a = fft.fft(a, shape[1], 1)\n a = ndimage.fourier_uniform(a, [5.0, 2.5], -1, 0)\n a = fft.ifft(a, shape[1], 1)\n a = fft.ifft(a, shape[0], 0)\n assert_almost_equal(ndimage.sum(a.real), 1.0, decimal=dec)\n\n @pytest.mark.parametrize('shape', [(32, 16), (31, 15)])\n @pytest.mark.parametrize('dtype, dec',\n [(numpy.float32, 4), (numpy.float64, 11)])\n def test_fourier_shift_real01(self, shape, dtype, dec):\n expected = numpy.arange(shape[0] * shape[1], dtype=dtype)\n expected.shape = shape\n a = fft.rfft(expected, shape[0], 0)\n a = fft.fft(a, shape[1], 1)\n a = ndimage.fourier_shift(a, [1, 1], shape[0], 0)\n a = fft.ifft(a, shape[1], 1)\n a = fft.irfft(a, shape[0], 0)\n assert_array_almost_equal(a[1:, 1:], expected[:-1, :-1],\n decimal=dec)\n assert_array_almost_equal(a.imag, numpy.zeros(shape),\n decimal=dec)\n\n @pytest.mark.parametrize('shape', [(32, 16), (31, 15)])\n @pytest.mark.parametrize('dtype, dec',\n [(numpy.complex64, 6), (numpy.complex128, 11)])\n def test_fourier_shift_complex01(self, shape, dtype, dec):\n expected = numpy.arange(shape[0] * shape[1], dtype=dtype)\n expected.shape = shape\n a = fft.fft(expected, shape[0], 0)\n a = fft.fft(a, shape[1], 1)\n a = ndimage.fourier_shift(a, [1, 1], -1, 0)\n a = fft.ifft(a, shape[1], 1)\n a = fft.ifft(a, shape[0], 0)\n assert_array_almost_equal(a.real[1:, 1:], expected[:-1, :-1],\n decimal=dec)\n assert_array_almost_equal(a.imag, numpy.zeros(shape),\n decimal=dec)\n\n @pytest.mark.parametrize('shape', [(32, 16), (31, 15)])\n @pytest.mark.parametrize('dtype, dec',\n [(numpy.float32, 5), (numpy.float64, 14)])\n def test_fourier_ellipsoid_real01(self, shape, dtype, dec):\n a = numpy.zeros(shape, dtype)\n a[0, 0] = 1.0\n a = fft.rfft(a, shape[0], 0)\n a = fft.fft(a, shape[1], 1)\n a = ndimage.fourier_ellipsoid(a, [5.0, 2.5],\n shape[0], 0)\n a = fft.ifft(a, shape[1], 1)\n a = fft.irfft(a, shape[0], 0)\n assert_almost_equal(ndimage.sum(a), 1.0, decimal=dec)\n\n @pytest.mark.parametrize('shape', [(32, 16), (31, 15)])\n @pytest.mark.parametrize('dtype, dec',\n [(numpy.complex64, 5), (numpy.complex128, 14)])\n def test_fourier_ellipsoid_complex01(self, shape, dtype, dec):\n a = numpy.zeros(shape, dtype)\n a[0, 0] = 1.0\n a = fft.fft(a, shape[0], 0)\n a = fft.fft(a, shape[1], 1)\n a = ndimage.fourier_ellipsoid(a, [5.0, 2.5], -1, 0)\n a = fft.ifft(a, shape[1], 1)\n a = fft.ifft(a, shape[0], 0)\n assert_almost_equal(ndimage.sum(a.real), 1.0, decimal=dec)\n\n def test_fourier_ellipsoid_1d_complex(self):\n # expected result of 1d ellipsoid is the same as for fourier_uniform\n for shape in [(32, ), (31, )]:\n for type_, dec in zip([numpy.complex64, numpy.complex128],\n [5, 14]):\n x = numpy.ones(shape, dtype=type_)\n a = ndimage.fourier_ellipsoid(x, 5, -1, 0)\n b = ndimage.fourier_uniform(x, 5, -1, 0)\n assert_array_almost_equal(a, b, decimal=dec)\n"
] | [
[
"scipy.ndimage.sum",
"scipy.ndimage.fourier_uniform",
"numpy.fft.irfft",
"numpy.fft.fft",
"numpy.fft.rfft",
"numpy.arange",
"scipy.ndimage.fourier_ellipsoid",
"numpy.ones",
"numpy.fft.ifft",
"scipy.ndimage.fourier_gaussian",
"scipy.ndimage.fourier_shift",
"numpy.zeros",
"numpy.testing.assert_array_almost_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
mbbatukan/vfo | [
"27662cba01c713a5f09dcbf1a765e1120d6ad8a2"
] | [
"vfo/internal_database_functions.py"
] | [
"import numpy as np\nimport os\nimport warnings\nimport openseespy.opensees as ops\n\n\n\ndef _getNodesandElements():\n\t\"\"\"\n\tThis function returns the nodes and elments for an active model, in a \n\tstandardized format. The OpenSees model must be active in order for the \n\tfunction to work.\n \n\tReturns\n\t-------\n\tnodes : 2dArray\n\t\tAn array of all nodes in the model.\n\t\tReturns nodes in the shape:\n\t\t[Nodes, 3] in 2d and [Nodes, 4]\n\t\tFor each node the information is tored as follows:\n\t\t[NodeID, x, y] or [NodeID, x, y, z]\n\telements : Array \n\t\tAn list of all elements in. Each entry in the list is it's own'\n\t\t[element1, element2,...], element1 = [element#, node1, node2,...]\n\t\"\"\"\n \n\t# Get nodes and elements\n\tnodeList = ops.getNodeTags()\n\teleList = ops.getEleTags() \n \n\t# Check Number of dimensions and intialize variables\n\tndm = len(ops.nodeCoord(nodeList[0]))\n\tNnodes = len(nodeList)\n\tNele = len(eleList)\n\tnodes = np.zeros([Nnodes, ndm + 1])\n\teleClassTags = np.zeros([Nele, 2])\n \n\t# Get Node list\n\tfor ii, node in enumerate(nodeList):\n\t\tnodes[ii,0] = node\n\t\tnodes[ii,1:] = ops.nodeCoord(nodeList[ii]) \n \n\tNele = len(eleList)\n\telements = [None]*Nele\n\t\n \n\t# Generate the element list by looping through all emenemts\n\tfor ii, ele in enumerate(eleList):\n\t\ttempNodes = ops.eleNodes(ele)\n\t\n\t\ttempNnodes = len(tempNodes)\n\t\ttempEle = np.zeros(tempNnodes + 1)\n \n\t\ttempEle[0] = int(ele)\n\t\ttempEle[1:] = tempNodes\n \n\t\telements[ii] = tempEle \n\n\t# Generate element class tags by looping through all elements\n\tfor ii, ele in enumerate(eleList):\n\t\teleClassTags[ii,0] = ele\n\t\teleClassTags[ii,1] = ops.getEleClassTags(ele)[0]\n \n\treturn nodes, elements, eleClassTags\n\ndef _saveNodesandElements(ModelName):\n\t\"\"\" \n\tThis file saves the node and element information for the structure. \n\tFor each node information is saved in the following format:\n\t\tNodes: [NodeID, xcord, ycord] or [NodeID, xcord, ycord, zcord]\n \n\tFor elements, the element is saved with the element connectivity. \n\tA different file is created for each type of element\n\teach possible element type.\n\t\tElements: [EleID, eleNode1, eleNode2, ... , eleNodeN]\n\tParameters\n\t----------\n\tnodeName : str, optional\n\t\tThe name of the file to be saved. The default is 'Nodes'.\n eleName : str, optional\n\t\tThe name of the . The default is 'Elements'.\n\tdelim : str, optional\n\t\tThe delimeter for the output file. The default is ','.\n\tfmt : str, optional\n\t\tthe format of the file to be saved in. The default is '%.5e'.\n\t\"\"\"\n \n\n\t# Consider making these optional arguements\n\tnodeName = 'Nodes'\n\teleName = 'Elements'\n\teleClassName = 'EleClassTags'\n\tdelim = ' '\n\tfmt = '%.5e'\n\tftype = '.out'\n \n\tODBdir = ModelName+\"_ODB\"\t\t# ODB Dir name\n\n\t# Read noades and elements\n\tnodes, elements, eleClassTags = _getNodesandElements()\n\n\t# Sort through the element arrays\n\tele2Node = np.array([ele for ele in elements if len(ele) == 3])\n\tele3Node = np.array([ele for ele in elements if len(ele) == 4])\n\tele4Node = np.array([ele for ele in elements if len(ele) == 5])\n\tele8Node = np.array([ele for ele in elements if len(ele) == 9])\n\n \n\tnodeFile = os.path.join(ODBdir, nodeName + ftype)\n \n\tele2File = os.path.join(ODBdir, eleName + \"_2Node\" + ftype)\n\tele3File = os.path.join(ODBdir, eleName + \"_3Node\" + ftype)\n\tele4File = os.path.join(ODBdir, eleName + \"_4Node\" + ftype)\n\tele8File = os.path.join(ODBdir, eleName + \"_8Node\" + ftype)\n\t\n\teleClassTagsFile = os.path.join(ODBdir, eleClassName + ftype)\n\n\t# SaveNodes\n\tnp.savetxt(nodeFile, nodes, delimiter = delim, fmt = fmt)\n \n\t# Save element arrays\n\tnp.savetxt(ele2File, ele2Node, delimiter = delim, fmt = fmt)\n\tnp.savetxt(ele3File, ele3Node, delimiter = delim, fmt = fmt)\n\tnp.savetxt(ele4File, ele4Node, delimiter = delim, fmt = fmt)\n\tnp.savetxt(ele8File, ele8Node, delimiter = delim, fmt = fmt)\n\t\n\t# Save Element Class Tags\n\tnp.savetxt(eleClassTagsFile, eleClassTags, delimiter = delim, fmt = fmt)\n\n\ndef _readNodesandElements(ModelName):\n\t\"\"\" \n\tThis function reads input node/element information, assuming it is in the \n\tstandard format. \n\tIf outputDir == False, the base directory will be used. \n \n\tParameters\n ----------\n nodeName : str, optional\n The base name for the node file. It will be appended to include\n the file type. The default is 'Nodes.out'.\n eleName : str, optional\n The base nae for the element files. The default is 'Elements.out'.\n delim : str, optional\n The delimiter for files to be read. The default is ','.\n dtype : TYPE, optional\n The data type to read in. The default is 'float32'.\n Returns\n -------\n nodes : Array\n An output vector in standard format\n elements : List\n An output Element vector in standard format.\n elements = [ele1, ele2,..., elen], \n ele1 = [element, node 1, node 2, ... , node n]\n \"\"\"\n\n\t# Consider making these optional arguements\n\tnodeName = 'Nodes'\n\teleName = 'Elements'\n\teleClassName = 'EleClassTags'\n\tdelim = ' '\n\tdtype ='float32' \n\tftype = '.out'\n \n\tODBdir = ModelName+\"_ODB\"\t\t# ODB Dir name\n \n\t# Check if output database exists\n\tif not os.path.exists(ODBdir):\n\t\tprint('No directory found for nodes and elements')\n \n\t# Generate the file names\n\tnodeFile = os.path.join(ODBdir, nodeName + ftype)\n\tele2File = os.path.join(ODBdir, eleName + \"_2Node\" + ftype)\n\tele3File = os.path.join(ODBdir, eleName + \"_3Node\" + ftype)\n\tele4File = os.path.join(ODBdir, eleName + \"_4Node\" + ftype)\n\tele8File = os.path.join(ODBdir, eleName + \"_8Node\" + ftype) \n \n\teleFileNames = [ele2File, ele3File, ele4File, ele8File] \n\t\n\teleClassTagsFile = os.path.join(ODBdir, eleClassName + ftype)\n \n\t## Load Node information\n\ttry:\n\t\tnodes = np.loadtxt(nodeFile, dtype, delimiter = delim, unpack=False)\n\texcept:\n\t\tprint(\"Reading node data from a OpenSees Tcl model\")\n\t\tnodes = np.transpose(np.loadtxt(nodeFile, dtype=float, delimiter=None, converters=None, unpack=True))\n\t\t\t\n\t# Populate an array with the input element information\n\tTempEle = [[]]*4\n \n\t# Check if the file exists, read it if it does. Ignore warnings if the files are empty\n\tfor ii, FileName in enumerate(eleFileNames):\n\t\tif os.path.isfile(FileName):\n\t\t\twith warnings.catch_warnings():\n\t\t\t\twarnings.simplefilter(\"ignore\")\n\t\t\t\ttry:\n\t\t\t\t\tTempEle[ii] = np.loadtxt(FileName, dtype, delimiter = delim, skiprows=0, ndmin=2, unpack=False)\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"Reading element data from a OpenSees Tcl model\")\n\t\t\t\t\tTempEle[ii] = np.transpose(np.loadtxt(FileName, dtype=float, delimiter=None, skiprows=0, ndmin=2,converters=None, unpack=True))\n\n\t# define the final element array\n\telements = [*TempEle[0],*TempEle[1],*TempEle[2],*TempEle[3]]\n\n\t## Load Element Class Tags information\n\ttry:\n\t\teleClassTags = np.loadtxt(eleClassTagsFile, dtype, delimiter = delim, unpack=False)\n\texcept:\n\t\tprint(\"No element class tag information was found\")\n\t\n\t# Check if any files were read\n\tif elements is []:\n\t\traise Exception('No element information files were found!')\n\n\treturn nodes, elements, eleClassTags\n\t\n\t\n################ ModeShapes #############################\n\ndef _getModeShapeData(modeNumber):\n\t\n\t# Get nodes and elements\n\tnodeList = ops.getNodeTags()\n \n\t# Check Number of dimensions and intialize variables\n\tndm = len(ops.nodeCoord(nodeList[0]))\n\tNnodes = len(nodeList)\n\tnodes_modeshape = np.zeros([Nnodes, ndm + 1])\n \n\tfor ii, node in enumerate(nodeList):\n\t\tnodes_modeshape[ii,0] = node\n\t\ttempData = ops.nodeEigenvector(nodeList[ii], modeNumber)\n\t\tnodes_modeshape[ii,1:] = tempData[0:ndm]\n\n\treturn nodes_modeshape\n\t\n\t\ndef _saveModeShapeData(ModelName,modeNumber):\n \n\tnodes_modeshape = _getModeShapeData(modeNumber)\n\t\n\t# Consider making these optional arguements\n\tmodeName = \"ModeShape\"\n\tdelim = ' '\n\tfmt = '%.5e'\n\tftype = '.out'\n \n\tODBdir = ModelName+\"_ODB\"\t\t# ODB Dir name\n\tModeShapeDir = os.path.join(ODBdir,\"ModeShapes\")\n\tmodeFile = os.path.join(ModeShapeDir, modeName+str(modeNumber)+ftype)\n\t\n\t## ModeShapeDir is a default name\n\tnp.savetxt(modeFile, nodes_modeshape, delimiter = delim, fmt = fmt) \n\t\n\t\ndef _readModeShapeData(ModelName,modeNumber):\n\n\t# Consider making these optional arguements\n\tmodeName = \"ModeShape\"\n\tdelim = ' '\n\tfmt = '%.5e'\n\tdtype ='float32'\n\tftype = '.out'\n \n\tODBdir = ModelName+\"_ODB\"\t\t# ODB Dir name\n\tModeShapeDir = os.path.join(ODBdir,\"ModeShapes\")\n\t\n # Check if output database exists\n\tif not os.path.exists(ModeShapeDir):\n\t\tprint('Error: No directory found for modeshapes. Use recordODB() command to save modeshapes.')\n\n\tmodeFile = os.path.join(ModeShapeDir, modeName+str(modeNumber)+ftype)\n\tmodeTFile = os.path.join(ModeShapeDir, \"ModalPeriods.out\")\n \n\t## Read modal period data to display\n\tperiods = np.loadtxt(modeTFile, dtype, delimiter = delim, unpack=False)\n\t\n\t## Load Node information\n\ttry:\n\t\tnodes_modeshape = np.loadtxt(modeFile, dtype, delimiter = delim, unpack=False)\n\texcept:\n\t\tprint(\"Reading modeshape data from a OpenSees Tcl model\")\n\t\tnodes_modeshape = np.transpose(np.loadtxt(modeFile, dtype=float, delimiter=None, converters=None, unpack=True))\n\n\treturn nodes_modeshape, periods\n\n\n############## Node Displacement Data ######################################\n\ndef _readNodeDispData(ModelName,LoadCaseName):\n\t\n\tODBdir = ModelName+\"_ODB\"\t\t# ODB Dir name\n\tLoadCaseDir = os.path.join(ODBdir, LoadCaseName)\n\t\n\t# Get number of nodes in the model to set a node displacement array\n\tnodes,elements, eleClassTags = _readNodesandElements(ModelName)\n\tNnodes = len(nodes)\n\tndm = len(nodes[0,1:])\n\t\n\tNodeDispFile = os.path.join(LoadCaseDir,\"NodeDisp_All.out\")\n\tDisp = np.transpose(np.loadtxt(NodeDispFile, dtype=float, delimiter=None, converters=None, unpack=True))\n\t\n\ttimeSteps = Disp[:,0]\n\tNtime = len(Disp[:,0])\n\n\ttempDisp = np.zeros([Ntime,Nnodes,ndm])\n\ttempDisp[:,:,0] = Disp[:,1::ndm]\n\ttempDisp[:,:,1] = Disp[:,2::ndm]\n\t\n\tif ndm == 3:\n\t\ttempDisp[:,:,2] = Disp[:,3::ndm]\n\t\t\n\tnodes_displacement = tempDisp\n\t\n\treturn timeSteps, nodes_displacement\n\n\n#### Read fibre data\n\ndef _readFiberData2D(ModelName, LoadCaseName, eleNumber, sectionNumber):\n \n\t# Consider making these optional arguements\n\tFibreName = \"FiberData\"\n\tdelim = ' '\n\t# fmt = '%.5e'\n\tdtype ='float32'\n\tftype = '.out' \n \n\tODBdir = ModelName+\"_ODB\"\t\t# ODB Dir name\n\tFibreFileName = FibreName + '_ele_' + str(eleNumber) + '_section_' + str(sectionNumber) + ftype\n\tFiberDir = os.path.join(ODBdir, LoadCaseName, FibreFileName)\n\t# Check if output database exists\n\tif not os.path.exists(FiberDir):\n\t\tprint('Error: No file for Fiber data. Use saveFiberData2D() to create a recorder.') \n \n\tFiberData = np.loadtxt(FiberDir, dtype=dtype, delimiter=delim)\n\ttimeSteps = FiberData[:,0]\n\tFiberData = FiberData[:,1:]\n\n\treturn timeSteps, FiberData\n\n\ndef _saveMonitorElementData(monitorEleType, monitorOutput, GroupMonitorDir, monitorEleTags, deltaT, dofList_ele):\n\n\tmonitorGroupArray = np.asarray(monitorEleTags)\n\t\n\tif monitorEleType == \"TwoNodeLink\" or monitorEleType == \"twoNodeLink\":\n\t\tif monitorOutput==\"deformation\" or monitorOutput==\"deformations\":\n\t\t\t\"\"\"\n\t\t\t3D ELements: 1,2,3,4,5,6;\n\t\t\t2D Elements: 1,2,3\n\t\t\t\"\"\"\n\t\t\tMonitorDefFile = os.path.join(GroupMonitorDir,\"MonitorGroup_Deformation.out\")\n\t\t\tMonitorForceFile = os.path.join(GroupMonitorDir,\"MonitorGroup_Force.out\")\t\t\t\t# Needed to plot element hysteresis\n\t\t\tMonitorEleFile = os.path.join(GroupMonitorDir,\"MonitorGroup_Tags_Deformation.out\")\n\t\t\tMonitorInfoFile = os.path.join(GroupMonitorDir,\"MonitorGroup_Info_Deformation.out\")\n\t\tops.recorder('Element', '-file', MonitorDefFile, '-time', '-dT', deltaT, '-ele', *monitorEleTags, '-dof',*dofList_ele, monitorOutput) # Two node link element needs dofs input to record\n\t\tops.recorder('Element', '-file', MonitorForceFile, '-time', '-dT', deltaT, '-ele', *monitorEleTags, '-dof',*dofList_ele, 'localForce') # 6 columns for each column\n\t\tnp.savetxt(MonitorEleFile, monitorGroupArray, delimiter = ' ', fmt = '%.5e')\t\t\t# This file will be read by plotting functions\n\t\tnp.savetxt(MonitorInfoFile, np.asarray(dofList_ele), delimiter = ' ', fmt = '%.5e')\t\t\t# This file will be read by plotting functions\n\n\telif monitorEleType in [\"forceBeamColumn\",\"beamWithHinges\",\"dispBeamColumn\",\"nonlinearBeamColumn\"]:\n\t\tif monitorOutput in [\"chordRotation\", \"chordDeformations\"]:\n\t\t\t\"\"\"\n\t\t\t3D ELements: eps, thetaZ_1, thetaZ_2, thetaY_1, thetaY_2, thetaX;\n\t\t\t2D Elements: eps, theta_1, theta_2\n\t\t\t\"\"\"\n\t\t\tchordOutput = [1,2,3,4,5,6] \n\t\t\tMonitorDefFile = os.path.join(GroupMonitorDir,\"MonitorGroup_Rotation.out\")\n\t\t\tMonitorEleFile = os.path.join(GroupMonitorDir,\"MonitorGroup_Tags_Rotation.out\")\n\t\t\tMonitorInfoFile = os.path.join(GroupMonitorDir,\"MonitorGroup_Info_Rotation.out\")\n\t\t\tops.recorder('Element', '-file', MonitorDefFile, '-time', '-dT', deltaT, '-ele', *monitorEleTags, monitorOutput) # Records chord output\n\t\t\tnp.savetxt(MonitorEleFile, monitorGroupArray, delimiter = ' ', fmt = '%.5e')\t\t\t# This file will be read by plotting functions\n\t\t\tnp.savetxt(MonitorInfoFile, np.asarray(dofList_ele), delimiter = ' ', fmt = '%.5e')\t\t\t# This file will record number of outputs \n\t\telif monitorOutput == \"deformation\" or monitorOutput==\"deformations\":\n\t\t\t\"\"\"\n\t\t\t3D ELements: AxialStrain, CurvY, CurvZ, TorsionStrain\n\t\t\t2D Elements: AxialStrain, Curvature\n\t\t\t\"\"\"\n\t\t\t# Records axial strain, curvature\n\t\t\tcurvatureOutput = [1,2,3,4,5,6] #\n\t\t\tMonitorInfoFile = os.path.join(GroupMonitorDir,\"MonitorGroup_Info_SecDeformation.out\")\n\t\t\tMonitorEleFile = os.path.join(GroupMonitorDir,\"MonitorGroup_Tags_SecDeformation.out\")\n\t\t\tfor SectionNum in range(1,numSections+1):\n\t\t\t\tMonitorDefFile = os.path.join(GroupMonitorDir,\"MonitorGroup_Deformation_section\"+str(SectionNum)+\".out\")\n\t\t\t\tops.recorder('Element', '-file', MonitorDefFile, '-time', '-dT', deltaT, '-ele', *monitorEleTags, 'section',str(SectionNum), monitorOutput) # Records chord output\n\t\t\tnp.savetxt(MonitorEleFile, monitorGroupArray, delimiter = ' ', fmt = '%.5e')\t\t\t# This file will be read by plotting functions\n\t\t\tnp.savetxt(MonitorInfoFile, np.asarray(dofList_ele), delimiter = ' ', fmt = '%.5e')\t\t\t# This file will record number of outputs \n\t\telse:\n\t\t#### FUTURE: Add recorder for nonlinear beam column element sections; hinge rotation for beam with hinges\n\t\t\tpass\n\telse:\n\t\tpass\n\t\t\t\t\t\n\t\t\t\t\t\ndef _readMonitorElementData(monitorOutput,GroupMonitorDir):\n\n\tif monitorOutput==\"deformation\" or monitorOutput==\"deformations\":\n\t\tMonitorDefFile = os.path.join(GroupMonitorDir,\"MonitorGroup_Deformation.out\")\n\t\tMonitorForceFile = os.path.join(GroupMonitorDir,\"MonitorGroup_Force.out\")\t\t\t\t# Needed to plot element hysteresis\n\t\tMonitorEleFile = os.path.join(GroupMonitorDir,\"MonitorGroup_Tags_Deformation.out\")\n\t\tMonitorInfoFile = os.path.join(GroupMonitorDir,\"MonitorGroup_Info_Deformation.out\")\n\t\t\n\telif monitorOutput in [\"chordRotation\", \"chordDeformations\"]:\n\t\t\"\"\"\n\t\t3D ELements: eps, thetaZ_1, thetaZ_2, thetaY_1, thetaY_2, thetaX;\n\t\t2D Elements: eps, theta_1, theta_2\n\t\t\"\"\"\n\t\tchordOutput = [1,2,3,4,5,6] \n\t\tMonitorDefFile = os.path.join(GroupMonitorDir,\"MonitorGroup_Rotation.out\")\n\t\tMonitorEleFile = os.path.join(GroupMonitorDir,\"MonitorGroup_Tags_Rotation.out\")\n\t\tMonitorInfoFile = os.path.join(GroupMonitorDir,\"MonitorGroup_Info_Rotation.out\")\n\t\t\n\telse:\n\t\tprint(\"The output quantity is not recognized. Use 'deformation' or 'chordRotation' based on the user input in createODB command earlier.\")\n\t\t\n\t\t\n\t# ops.recorder('Element', '-file', MonitorDefFile, '-time', '-dT', deltaT, '-ele', *monitorEleTags, '-dof',*dofList, monitorOutput)\n\tMonitorEleDef = np.transpose(np.loadtxt(MonitorDefFile, dtype=float, delimiter=None, converters=None, unpack=True))\t\t# Returns the element deformations\n\tMonitorEleForce = np.transpose(np.loadtxt(MonitorForceFile, dtype=float, delimiter=None, converters=None, unpack=True))\t\t# Returns the element localForce\n\tMonitorEleTags = np.transpose(np.loadtxt(MonitorEleFile, dtype=float, delimiter=None, converters=None, unpack=True))\t# Returns the element tags\n\tMonitorEleInfo = np.transpose(np.loadtxt(MonitorInfoFile, dtype=float, delimiter=None, converters=None, unpack=True)) # Returns array of DOFs recorded\n\t\t\n\treturn MonitorEleDef, MonitorEleForce, MonitorEleTags, MonitorEleInfo\n\t\n\t\n\ndef _elementMonitorCheck(eleTag, dof, monitorOutput, limitStates, limStateColors, MonitorEleInfo, MonitorEleTags, MonitorEleDef, tStep):\n\teleMonitorColor = \"blue\"\n\tdofLength = len(MonitorEleInfo)\n\t\n\tif monitorOutput in [\"deformation\", \"deformations\"]:\n\t\n\t\tiMonitor, = np.where(MonitorEleInfo == float(dof))\t\t\t\t# Get the DOF column number to read for each element\n\t\tjMonitor, = np.where(MonitorEleTags == float(eleTag))\t\t\t# Get the element number\n\t\treadCol = 1 + iMonitor + dofLength*jMonitor\t\t\t\t\t\t# First column is time or load factor\n\t\t# print(iMonitor, MonitorEleTags, float(eleTag), readCol)\n\t\tfor kk in range(1,int(tStep)):\n\t\t\t# print(tStep, kk)\n\t\t\tif abs(MonitorEleDef[kk, readCol]) > abs(MonitorEleDef[kk-1, readCol]):\n\t\t\t\t# print(abs(MonitorEleDef[kk, readCol]))\n\t\t\t\tfor lim in range(0,4):\n\t\t\t\t\tif limitStates[lim] == 0:\n\t\t\t\t\t\teleMonitorColor = \"solid\"\n\t\t\t\t\telif abs(MonitorEleDef[kk, readCol]) >= limitStates[lim]:\n\t\t\t\t\t\teleMonitorColor = limStateColors[lim]\n\t\t\t\t\telse:\n\t\t\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tpass\n\t\t\t\t\t\t\t\n\t\treturn eleMonitorColor\n\t\n\telif monitorOutput in [\"chordRotation\", \"chordDeformations\"]:\n\t\n\t\tiMonitor, = np.where(MonitorEleInfo == float(dof))\t\t\t\t# Get the DOF column number to read for each element\n\t\tjMonitor, = np.where(MonitorEleTags == float(eleTag))\t\t\t# Get the element number\n\t\treadCol = 1 + iMonitor + dofLength*jMonitor\t\t\t\t\t\t# First column is time or load factor\n\t\t\n\t\tpass\n\t\n\ndef _elementHysteresis(eleTag, dof, MonitorEleInfo, MonitorEleTags, MonitorEleDef, MonitorEleForce):\n\n\tdofLength = len(MonitorEleInfo)\n\t\n\tiMonitor, = np.where(MonitorEleInfo == float(dof))\t\t\t\t# Get the DOF column number to read for each element\n\tjMonitor, = np.where(MonitorEleTags == float(eleTag))\t\t\t# Get the element number\n\treadCol = 1 + iMonitor + dofLength*jMonitor\t\t\t\t\t\t# First column is time or load factor\n\treadForceCol = 1 + iMonitor + (1*dofLength)*jMonitor\t\t\t# First column is time or load factor, 6 columns for each elements\n\t# print(iMonitor, MonitorEleTags, float(eleTag), readCol)\n\t\n\teleDeformation = MonitorEleDef[:, readCol]\n\teleForce = MonitorEleForce[:, readForceCol]\n\t\t\n\treturn eleDeformation, eleForce\n\n\t\t\t\t\t\t\n"
] | [
[
"numpy.savetxt",
"numpy.zeros",
"numpy.asarray",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tradingalgorithms/pyfolio | [
"b965e786c82af8d6db3b84d64920befae76734fc"
] | [
"pyfolio/tests/test_risk.py"
] | [
"from unittest import TestCase\nfrom nose_parameterized import parameterized\nimport os\nimport gzip\n\nimport pandas as pd\nfrom pandas import read_csv\nfrom pyfolio.utils import to_utc\n\nfrom pandas.util.testing import assert_frame_equal, assert_series_equal\n\nfrom pyfolio.risk import (compute_style_factor_exposures,\n compute_sector_exposures,\n compute_cap_exposures,\n compute_volume_exposures)\n\n\nclass RiskTestCase(TestCase):\n __location__ = os.path.realpath(\n os.path.join(os.getcwd(), os.path.dirname(__file__)))\n\n test_pos = to_utc(read_csv(\n gzip.open(__location__ + '/test_data/test_pos.csv.gz'),\n index_col=0, parse_dates=True))\n test_pos.columns = [351, 1419, 1787, 25317, 3321, 3951, 4922, 'cash']\n\n test_txn = to_utc(read_csv(\n gzip.open(\n __location__ + '/test_data/test_txn.csv.gz'),\n index_col=0, parse_dates=True))\n test_sectors = to_utc(read_csv(\n __location__ + '/test_data/test_sectors.csv',\n index_col=0, parse_dates=True))\n expected_sectors_longed = to_utc(read_csv(\n __location__ + '/test_data/expected_sectors_longed.csv',\n index_col=0, parse_dates=True))\n expected_sectors_shorted = to_utc(read_csv(\n __location__ + '/test_data/expected_sectors_shorted.csv',\n index_col=0, parse_dates=True))\n expected_sectors_grossed = to_utc(read_csv(\n __location__ + '/test_data/expected_sectors_grossed.csv',\n index_col=0, parse_dates=True))\n test_caps = to_utc(read_csv(\n __location__ + '/test_data/test_caps.csv',\n index_col=0, parse_dates=True))\n expected_caps_longed = to_utc(read_csv(\n __location__ + '/test_data/expected_caps_longed.csv',\n index_col=0, parse_dates=True))\n expected_caps_shorted = to_utc(read_csv(\n __location__ + '/test_data/expected_caps_shorted.csv',\n index_col=0, parse_dates=True))\n expected_caps_grossed = to_utc(read_csv(\n __location__ + '/test_data/expected_caps_grossed.csv',\n index_col=0, parse_dates=True))\n expected_caps_netted = to_utc(read_csv(\n __location__ + '/test_data/expected_caps_netted.csv',\n index_col=0, parse_dates=True))\n test_shares_held = to_utc(read_csv(\n __location__ + '/test_data/test_shares_held.csv',\n index_col=0, parse_dates=True))\n test_volumes = to_utc(read_csv(\n __location__ + '/test_data/test_volumes.csv',\n index_col=0, parse_dates=True))\n expected_volumes = to_utc(read_csv(\n __location__ + '/test_data/expected_volumes.csv',\n index_col=0, parse_dates=True))\n\n test_dict = {}\n styles = ['LT_MOMENTUM', 'LMCAP', 'VLTY', 'MACDSignal']\n for style in styles:\n df = to_utc(read_csv(\n __location__ + '/test_data/test_{}.csv'.format(style),\n index_col=0, parse_dates=True))\n test_dict.update({style: df})\n test_styles = pd.Panel()\n test_styles = test_styles.from_dict(test_dict)\n\n expected_styles = to_utc(read_csv(\n __location__ + '/test_data/expected_styles.csv',\n index_col=0, parse_dates=True))\n\n @parameterized.expand([\n (test_pos, test_styles, expected_styles)\n ])\n def test_compute_style_factor_exposures(self, positions,\n risk_factor_panel, expected):\n style_list = []\n for name, value in risk_factor_panel.iteritems():\n risk_factor_panel[name].columns = \\\n risk_factor_panel[name].columns.astype(int)\n style_list.append(\n compute_style_factor_exposures(positions,\n risk_factor_panel[name])\n )\n expected.columns = expected.columns.astype(int)\n assert_frame_equal(pd.concat(style_list, axis=1), expected)\n\n @parameterized.expand([\n (test_pos, test_sectors, expected_sectors_longed,\n expected_sectors_shorted, expected_sectors_grossed)\n ])\n def test_compute_sector_exposures(self, positions, sectors,\n expected_longed, expected_shorted,\n expected_grossed):\n sectors.columns = sectors.columns.astype(int)\n sector_exposures = compute_sector_exposures(positions, sectors)\n\n expected_longed.columns = expected_longed.columns.astype(int)\n expected_shorted.columns = expected_shorted.columns.astype(int)\n expected_grossed.columns = expected_grossed.columns.astype(int)\n\n assert_frame_equal(pd.concat(sector_exposures[0], axis=1),\n expected_longed)\n assert_frame_equal(pd.concat(sector_exposures[1], axis=1),\n expected_shorted)\n assert_frame_equal(pd.concat(sector_exposures[2], axis=1),\n expected_grossed)\n\n @parameterized.expand([\n (test_pos, test_caps, expected_caps_longed, expected_caps_shorted,\n expected_caps_grossed, expected_caps_netted)\n ])\n def test_compute_cap_exposures(self, positions, caps,\n expected_longed, expected_shorted,\n expected_grossed, expected_netted):\n caps.columns = caps.columns.astype(int)\n cap_exposures = compute_cap_exposures(positions, caps)\n\n expected_longed.columns = expected_longed.columns.astype(int)\n expected_shorted.columns = expected_shorted.columns.astype(int)\n expected_grossed.columns = expected_grossed.columns.astype(int)\n expected_netted.columns = expected_netted.columns.astype(int)\n\n assert_frame_equal(pd.concat(cap_exposures[0], axis=1),\n expected_longed)\n assert_frame_equal(pd.concat(cap_exposures[1], axis=1),\n expected_shorted)\n assert_frame_equal(pd.concat(cap_exposures[2], axis=1),\n expected_grossed)\n assert_frame_equal(pd.concat(cap_exposures[3], axis=1),\n expected_netted)\n\n @parameterized.expand([\n (test_shares_held, test_volumes, 0.1, expected_volumes)\n ])\n def test_compute_volume_exposures(self, shares_held, volumes,\n percentile, expected):\n l_thresh, s_thresh, g_thresh = compute_volume_exposures(shares_held,\n volumes,\n percentile)\n\n assert_series_equal(l_thresh, expected['0'], check_names=False)\n assert_series_equal(s_thresh, expected['1'], check_names=False)\n assert_series_equal(g_thresh, expected['2'], check_names=False)\n"
] | [
[
"pandas.Panel",
"pandas.read_csv",
"pandas.util.testing.assert_series_equal",
"pandas.concat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Sayar1106/Heart-Disease-Web-Application | [
"3dd2293056b73b251fcc21e154a663b592fca168"
] | [
"app/src/utils/predictor.py"
] | [
"import streamlit as st\nimport pandas as pd\nimport joblib\n\ndef create_inference_input(df):\n \"\"\"\n Function that creates an input form for ML model.\n\n The function will build the structure for an input form\n using Streamlit functions. The input from the form will be\n taken and converted into a dictionary with the keys being\n the column names of the dataframe and the values being the \n inputs.\n\n Parameters\n ----------\n df: DataFrame\n A dataframe containing the the heart disease data.\n \n Returns\n -------\n response_dict: Dict\n A dictionary containing the key, value pairs of the \n column names of the dataframe and the values from the input\n form.\n \"\"\"\n input_list = []\n age = st.sidebar.slider(label=\"Age\", \n min_value=min(df[\"age\"]), \n max_value=max(df[\"age\"]))\n input_list.append(age)\n st.sidebar.write(\"\\n\")\n sex = st.sidebar.radio(label=\"Sex\", \n options=df[\"sex\"].unique().tolist())\n st.sidebar.write(\"\\n\")\n input_list.append(sex)\n chest_pain_type = st.sidebar.selectbox(label=\"Chest pain type\", \n options=df[\"chest_pain_type\"].unique().tolist())\n st.sidebar.write(\"\\n\")\n input_list.append(chest_pain_type)\n resting_blood_pressure = st.sidebar.slider(label=\"Resting blood pressure mm Hg\", \n min_value=min(df[\"resting_blood_pressure\"]), \n max_value=max(df[\"resting_blood_pressure\"]))\n st.sidebar.write(\"\\n\")\n input_list.append(resting_blood_pressure)\n cholesterol = st.sidebar.slider(label=\"Cholesterol measurement in mg/dl\", \n min_value=min(df[\"cholesterol\"]),\n max_value=max(df[\"cholesterol\"]))\n st.sidebar.write(\"\\n\")\n input_list.append(cholesterol)\n fasting_blood_sugar = st.sidebar.radio(label=\"Enter the range for the fasting blood sugar\", \n options=df[\"fasting_blood_sugar\"].unique().tolist())\n st.sidebar.write(\"\\n\")\n input_list.append(fasting_blood_sugar)\n rest_ecg = st.sidebar.selectbox(label=\"Resting electromagnetic measurement.\", \n options=df[\"rest_ecg\"].unique().tolist())\n st.sidebar.write(\"\\n\")\n input_list.append(rest_ecg)\n max_heart_rate_achieved = st.sidebar.slider(label=\"Maximum heart rate achieved\", \n min_value=min(df[\"max_heart_rate_achieved\"]), \n max_value=max(df[\"max_heart_rate_achieved\"]))\n st.sidebar.write(\"\\n\")\n input_list.append(max_heart_rate_achieved)\n exercise_induced_angina = st.sidebar.radio(label=\"Exercise induced Angina?\", \n options=df[\"exercise_induced_angina\"].unique().tolist())\n st.sidebar.write(\"\\n\")\n input_list.append(exercise_induced_angina)\n st_depression = st.sidebar.slider(\"Enter the ST depression during exercise\", \n min_value=min(df[\"st_depression\"]), \n max_value=max(df[\"st_depression\"]))\n st.sidebar.write(\"\\n\")\n input_list.append(st_depression)\n st_slope = st.sidebar.selectbox(label=\"Slope of peak exercise ST segment\", \n options=df[\"st_slope\"].unique().tolist())\n st.sidebar.write(\"\\n\")\n input_list.append(st_slope)\n num_major_vessels = st.sidebar.slider(label=\"Number of major vessels\", \n min_value=min(df[\"num_major_vessels\"]), \n max_value=max(df[\"num_major_vessels\"]))\n st.sidebar.write(\"\\n\")\n input_list.append(num_major_vessels)\n thalassemia = st.sidebar.selectbox(label=\"History of Thalassemia?\", \n options=df[\"thalassemia\"].unique().tolist())\n st.sidebar.write(\"\\n\")\n input_list.append(thalassemia)\n\n # Dictionary comprehension for creating the response dictionary:\n response_dict = {column:value for column, value in zip(df.columns, input_list)}\n\n return response_dict\n\n\ndef predictor(df):\n st.header(\"Machine Learning model predictor\")\n st.write(\"\"\"\n A **machine learning model** trained on the heart disease dataset will be used\n to predict whether a patient has heart disease or not. We will be providing dropdowns\n for the user to select inputs for different attributes. These will then be fed into\n the machine learning model which will help predict the possibility of heart disease or not.\n \"\"\")\n st.sidebar.header(\"Input form for ML model\")\n # Getting user input values in correct format\n response_dict = create_inference_input(df)\n \n name = st.text_input(label=\"Enter your name\")\n\n # Dump the user inputs in a file:\n if st.sidebar.button(label=\"Submit input to model\"):\n joblib.dump(response_dict, \"app/src/utils/payload_dump/response_dict.bin\")\n\n if st.button(label=\"Predict\"):\n # Load user inputs:\n response_dict = joblib.load(\"app/src/utils/payload_dump/response_dict.bin\")\n # Append user inputs to existing dataframe:\n df = df.append(response_dict, ignore_index=True)\n # Load the saved ML model:\n model = joblib.load(\"app/src/models/rf_model.bin\")\n # Drop the target variable:\n df.drop([\"target\"], axis=1, inplace=True)\n # Create dummy variables:\n df = pd.get_dummies(df, drop_first=True)\n # Get prediction:\n pred = model.predict(df.iloc[-1, :].values.reshape(1, -1))\n # Get the prediction probabilities for the two classes:\n pred_prob = model.predict_proba(df.iloc[-1, :].values.reshape(1, -1))\n # Convert prediction into human readable string:\n pred = \"No you do not have heart disease\" if pred == 0 else \"You have heart disease\"\n\n # Create a dataframe to store resutls:\n result = pd.DataFrame({\"Values\": [name, round(pred_prob[0][1], 2), pred]},\n index=[\"Name\", \n \"Probability of Heart Disease\", \n \"Verdict\"])\n st.write(result)\n\n\n"
] | [
[
"pandas.get_dummies"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
sergunya17/catalyst | [
"84bc7576c981278f389279d87dda85dd66a758b6",
"f98d71138c09cd1b5a69b788cb5006115f5c7fda",
"f98d71138c09cd1b5a69b788cb5006115f5c7fda",
"f98d71138c09cd1b5a69b788cb5006115f5c7fda"
] | [
"catalyst/contrib/datasets/misc.py",
"tests/pipelines/test_sklearn_classifier_mnist.py",
"catalyst/utils/tracing.py",
"tests/pipelines/test_mnist_multimodel.py"
] | [
"import codecs\nimport gzip\nimport hashlib\nimport lzma\nimport os\nimport tarfile\nimport zipfile\n\nimport numpy as np\n\nimport torch\nfrom torch.utils.model_zoo import tqdm\n\n\ndef _gen_bar_updater():\n pbar = tqdm(total=None)\n\n def bar_update(count, block_size, total_size):\n if pbar.total is None and total_size:\n pbar.total = total_size\n progress_bytes = count * block_size\n pbar.update(progress_bytes - pbar.n)\n\n return bar_update\n\n\ndef _calculate_md5(fpath, chunk_size=1024 * 1024):\n md5 = hashlib.md5()\n with open(fpath, \"rb\") as f:\n for chunk in iter(lambda: f.read(chunk_size), b\"\"):\n md5.update(chunk)\n return md5.hexdigest()\n\n\ndef _check_md5(fpath, md5, **kwargs):\n return md5 == _calculate_md5(fpath, **kwargs)\n\n\ndef _check_integrity(fpath, md5=None):\n if not os.path.isfile(fpath):\n return False\n if md5 is None:\n return True\n return _check_md5(fpath, md5)\n\n\ndef download_url(url, root, filename=None, md5=None):\n \"\"\"Download a file from a url and place it in root.\n\n Args:\n url: URL to download file from\n root: Directory to place downloaded file in\n filename (str, optional): Name to save the file under.\n If None, use the basename of the URL\n md5 (str, optional): MD5 checksum of the download.\n If None, do not check\n\n Raises:\n IOError: if failed to download url\n RuntimeError: if file not found or corrupted\n \"\"\"\n import urllib\n\n root = os.path.expanduser(root)\n if not filename:\n filename = os.path.basename(url)\n fpath = os.path.join(root, filename)\n\n os.makedirs(root, exist_ok=True)\n\n # check if file is already present locally\n if _check_integrity(fpath, md5):\n print(\"Using downloaded and verified file: \" + fpath)\n else: # download the file\n try:\n print(\"Downloading \" + url + \" to \" + fpath)\n urllib.request.urlretrieve(url, fpath, reporthook=_gen_bar_updater())\n except (urllib.error.URLError, IOError) as e:\n if url[:5] == \"https\":\n url = url.replace(\"https:\", \"http:\")\n print(\n \"Failed download. Trying https -> http instead.\"\n \" Downloading \" + url + \" to \" + fpath\n )\n urllib.request.urlretrieve(url, fpath, reporthook=_gen_bar_updater())\n else:\n raise e\n # check integrity of downloaded file\n if not _check_integrity(fpath, md5):\n raise RuntimeError(\"File not found or corrupted.\")\n\n\ndef _extract_archive(from_path, to_path=None, remove_finished=False):\n if to_path is None:\n to_path = os.path.dirname(from_path)\n\n if from_path.endswith(\".tar\"):\n with tarfile.open(from_path, \"r\") as tar:\n tar.extractall(path=to_path)\n elif from_path.endswith(\".tar.gz\") or from_path.endswith(\".tgz\"):\n with tarfile.open(from_path, \"r:gz\") as tar:\n tar.extractall(path=to_path)\n elif from_path.endswith(\".tar.xz\"):\n with tarfile.open(from_path, \"r:xz\") as tar:\n tar.extractall(path=to_path)\n elif from_path.endswith(\".gz\"):\n root, _ = os.path.splitext(os.path.basename(from_path))\n to_path = os.path.join(to_path, root)\n with open(to_path, \"wb\") as out_f, gzip.GzipFile(from_path) as zip_f:\n out_f.write(zip_f.read())\n elif from_path.endswith(\".zip\"):\n with zipfile.ZipFile(from_path, \"r\") as z:\n z.extractall(to_path)\n else:\n raise ValueError(f\"Extraction of {from_path} not supported\")\n\n if remove_finished:\n os.remove(from_path)\n\n\ndef download_and_extract_archive(\n url, download_root, extract_root=None, filename=None, md5=None, remove_finished=False\n):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n download_root = os.path.expanduser(download_root)\n if extract_root is None:\n extract_root = download_root\n if not filename:\n filename = os.path.basename(url)\n\n download_url(url, download_root, filename, md5)\n\n archive = os.path.join(download_root, filename)\n print(f\"Extracting {archive} to {extract_root}\")\n _extract_archive(archive, extract_root, remove_finished)\n\n\ndef _get_int(b):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n return int(codecs.encode(b, \"hex\"), 16)\n\n\ndef _open_maybe_compressed_file(path):\n \"\"\"Return a file object that possibly decompresses 'path' on the fly.\n Decompression occurs when argument `path` is a string\n and ends with '.gz' or '.xz'.\n\n Args:\n path: path\n\n Returns:\n file\n \"\"\"\n if not isinstance(path, torch._six.string_classes):\n return path\n if path.endswith(\".gz\"):\n return gzip.open(path, \"rb\")\n if path.endswith(\".xz\"):\n return lzma.open(path, \"rb\")\n return open(path, \"rb\")\n\n\ndef read_sn3_pascalvincent_tensor(path, strict=True):\n \"\"\"Read a SN3 file in \"Pascal Vincent\" format.\"\"\"\n # typemap\n if not hasattr(read_sn3_pascalvincent_tensor, \"typemap\"):\n read_sn3_pascalvincent_tensor.typemap = {\n 8: (torch.uint8, np.uint8, np.uint8),\n 9: (torch.int8, np.int8, np.int8),\n 11: (torch.int16, np.dtype(\">i2\"), \"i2\"),\n 12: (torch.int32, np.dtype(\">i4\"), \"i4\"),\n 13: (torch.float32, np.dtype(\">f4\"), \"f4\"),\n 14: (torch.float64, np.dtype(\">f8\"), \"f8\"),\n }\n # read\n with _open_maybe_compressed_file(path) as f:\n data = f.read()\n # parse\n magic = _get_int(data[0:4])\n nd = magic % 256\n ty = magic // 256\n assert nd >= 1 and nd <= 3\n assert ty >= 8 and ty <= 14\n m = read_sn3_pascalvincent_tensor.typemap[ty]\n s = [_get_int(data[4 * (i + 1) : 4 * (i + 2)]) for i in range(nd)]\n parsed = np.frombuffer(data, dtype=m[1], offset=(4 * (nd + 1)))\n assert parsed.shape[0] == np.prod(s) or not strict\n return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s)\n\n\n__all__ = [\"download_and_extract_archive\", \"download_url\", \"read_sn3_pascalvincent_tensor\"]\n",
"# flake8: noqa\nimport csv\nimport os\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\n\nfrom pytest import mark\n\nimport torch\nfrom torch import nn\nfrom torch.optim import Adam\nfrom torch.utils.data import DataLoader\n\nfrom catalyst import data, dl\nfrom catalyst.contrib.datasets import MNIST\nfrom catalyst.contrib.losses import TripletMarginLossWithSampler\nfrom catalyst.settings import SETTINGS\n\nif SETTINGS.ml_required:\n from sklearn.ensemble import RandomForestClassifier\n\nTRAIN_EPOCH = 3\nLR = 0.01\nRANDOM_STATE = 42\n\n\ndef read_csv(csv_path: str):\n with open(csv_path, \"r\") as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\",\")\n for line_count, row in enumerate(csv_reader):\n if line_count == 0:\n colnames = row\n else:\n yield {colname: val for colname, val in zip(colnames, row)}\n\n\ndef train_experiment(device, engine=None):\n with TemporaryDirectory() as logdir:\n from catalyst import utils\n\n utils.set_global_seed(RANDOM_STATE)\n # 1. train, valid and test loaders\n train_data = MNIST(os.getcwd(), train=True)\n train_labels = train_data.targets.cpu().numpy().tolist()\n train_sampler = data.BatchBalanceClassSampler(train_labels, num_classes=10, num_samples=4)\n train_loader = DataLoader(train_data, batch_sampler=train_sampler)\n\n valid_dataset = MNIST(root=os.getcwd(), train=False)\n valid_loader = DataLoader(dataset=valid_dataset, batch_size=32)\n\n test_dataset = MNIST(root=os.getcwd(), train=False)\n test_loader = DataLoader(dataset=test_dataset, batch_size=32)\n\n # 2. model and optimizer\n model = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 16), nn.LeakyReLU(inplace=True))\n optimizer = Adam(model.parameters(), lr=LR)\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [2])\n\n # 3. criterion with triplets sampling\n sampler_inbatch = data.HardTripletsSampler(norm_required=False)\n criterion = TripletMarginLossWithSampler(margin=0.5, sampler_inbatch=sampler_inbatch)\n\n # 4. training with catalyst Runner\n class CustomRunner(dl.SupervisedRunner):\n def handle_batch(self, batch) -> None:\n images, targets = batch[\"features\"].float(), batch[\"targets\"].long()\n features = self.model(images)\n self.batch = {\n \"embeddings\": features,\n \"targets\": targets,\n }\n\n callbacks = [\n dl.ControlFlowCallback(\n dl.CriterionCallback(\n input_key=\"embeddings\", target_key=\"targets\", metric_key=\"loss\"\n ),\n loaders=\"train\",\n ),\n dl.SklearnModelCallback(\n feature_key=\"embeddings\",\n target_key=\"targets\",\n train_loader=\"train\",\n valid_loaders=[\"valid\", \"infer\"],\n model_fn=RandomForestClassifier,\n predict_method=\"predict_proba\",\n predict_key=\"sklearn_predict\",\n random_state=RANDOM_STATE,\n n_estimators=50,\n ),\n dl.ControlFlowCallback(\n dl.AccuracyCallback(\n target_key=\"targets\", input_key=\"sklearn_predict\", topk_args=(1, 3)\n ),\n loaders=[\"valid\", \"infer\"],\n ),\n ]\n\n runner = CustomRunner(input_key=\"features\", output_key=\"embeddings\")\n runner.train(\n engine=engine or dl.DeviceEngine(device),\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n scheduler=scheduler,\n callbacks=callbacks,\n loaders={\"train\": train_loader, \"valid\": valid_loader, \"infer\": test_loader},\n verbose=False,\n valid_loader=\"valid\",\n valid_metric=\"accuracy01\",\n minimize_valid_metric=False,\n num_epochs=TRAIN_EPOCH,\n logdir=logdir,\n )\n\n valid_path = Path(logdir) / \"logs/infer.csv\"\n best_accuracy = max(float(row[\"accuracy01\"]) for row in read_csv(valid_path))\n\n assert best_accuracy > 0.8\n\n\[email protected](not SETTINGS.ml_required, reason=\"catalyst[ml] required\")\ndef test_on_cpu():\n train_experiment(\"cpu\")\n",
"from typing import Tuple, Union\nimport logging\n\nimport torch\nfrom torch import jit\n\nfrom catalyst.extras.forward_wrapper import ModelForwardWrapper\nfrom catalyst.typing import Model\nfrom catalyst.utils.torch import get_nn_from_ddp_module\n\nlogger = logging.getLogger(__name__)\n\n\ndef trace_model(\n model: Model, batch: Union[Tuple[torch.Tensor], torch.Tensor], method_name: str = \"forward\"\n) -> jit.ScriptModule:\n \"\"\"Traces model using runner and batch.\n\n Args:\n model: Model to trace\n batch: Batch to trace the model\n method_name: Model's method name that will be\n used as entrypoint during tracing\n\n Example:\n .. code-block:: python\n\n import torch\n\n from catalyst.utils import trace_model\n\n class LinModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.lin1 = torch.nn.Linear(10, 10)\n self.lin2 = torch.nn.Linear(2, 10)\n\n def forward(self, inp_1, inp_2):\n return self.lin1(inp_1), self.lin2(inp_2)\n\n def first_only(self, inp_1):\n return self.lin1(inp_1)\n\n lin_model = LinModel()\n traced_model = trace_model(\n lin_model, batch=torch.randn(1, 10), method_name=\"first_only\"\n )\n\n Returns:\n jit.ScriptModule: Traced model\n \"\"\"\n nn_model = get_nn_from_ddp_module(model)\n wrapped_model = ModelForwardWrapper(model=nn_model, method_name=method_name)\n traced = jit.trace(wrapped_model, example_inputs=batch)\n return traced\n\n\n__all__ = [\"trace_model\"]\n",
"# flake8: noqa\n\nimport os\nfrom tempfile import TemporaryDirectory\n\nfrom pytest import mark\n\nfrom torch import nn, optim\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader\n\nfrom catalyst import dl, metrics, utils\nfrom catalyst.contrib.datasets import MNIST\nfrom catalyst.settings import IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES, SETTINGS\n\n\nclass CustomRunner(dl.Runner):\n def predict_batch(self, batch):\n # model inference step\n return self.model(batch[0].to(self.device))\n\n def on_loader_start(self, runner):\n super().on_loader_start(runner)\n self.meters = {\n key: metrics.AdditiveMetric(compute_on_call=False)\n for key in [\"loss\", \"accuracy01\", \"accuracy03\"]\n }\n\n def handle_batch(self, batch):\n # model train/valid step\n # unpack the batch\n x, y = batch\n # <--- multi-model usage --->\n # run model forward pass\n x_ = self.model[\"encoder\"](x)\n logits = self.model[\"head\"](x_)\n # <--- multi-model usage --->\n # compute the loss\n loss = self.criterion(logits, y)\n # compute other metrics of interest\n accuracy01, accuracy03 = metrics.accuracy(logits, y, topk=(1, 3))\n # log metrics\n self.batch_metrics.update(\n {\"loss\": loss, \"accuracy01\": accuracy01, \"accuracy03\": accuracy03}\n )\n for key in [\"loss\", \"accuracy01\", \"accuracy03\"]:\n self.meters[key].update(self.batch_metrics[key].item(), self.batch_size)\n # run model backward pass\n if self.is_train_loader:\n loss.backward()\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n def on_loader_end(self, runner):\n for key in [\"loss\", \"accuracy01\", \"accuracy03\"]:\n self.loader_metrics[key] = self.meters[key].compute()[0]\n super().on_loader_end(runner)\n\n\ndef train_experiment(device, engine=None):\n with TemporaryDirectory() as logdir:\n\n # <--- multi-model setup --->\n encoder = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 128))\n head = nn.Linear(128, 10)\n model = {\"encoder\": encoder, \"head\": head}\n optimizer = optim.Adam(\n [{\"params\": encoder.parameters()}, {\"params\": head.parameters()}], lr=0.02\n )\n # <--- multi-model setup --->\n criterion = nn.CrossEntropyLoss()\n\n loaders = {\n \"train\": DataLoader(\n MNIST(os.getcwd(), train=True),\n batch_size=32,\n ),\n \"valid\": DataLoader(\n MNIST(os.getcwd(), train=False),\n batch_size=32,\n ),\n }\n\n runner = CustomRunner()\n # model training\n runner.train(\n engine=engine or dl.DeviceEngine(device),\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n loaders=loaders,\n logdir=logdir,\n num_epochs=1,\n verbose=False,\n valid_loader=\"valid\",\n valid_metric=\"loss\",\n minimize_valid_metric=True,\n )\n\n\n# Torch\ndef test_on_cpu():\n train_experiment(\"cpu\")\n\n\[email protected](not IS_CUDA_AVAILABLE, reason=\"CUDA device is not available\")\ndef test_on_torch_cuda0():\n train_experiment(\"cuda:0\")\n\n\[email protected](not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2), reason=\"No CUDA>=2 found\")\ndef test_on_torch_cuda1():\n train_experiment(\"cuda:1\")\n\n\[email protected](not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2), reason=\"No CUDA>=2 found\")\ndef test_on_torch_dp():\n train_experiment(None, dl.DataParallelEngine())\n\n\n# @mark.skipif(\n# not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >=2),\n# reason=\"No CUDA>=2 found\",\n# )\n# def test_on_ddp():\n# train_experiment(None, dl.DistributedDataParallelEngine())\n\n# AMP\[email protected](not (IS_CUDA_AVAILABLE and SETTINGS.amp_required), reason=\"No CUDA or AMP found\")\ndef test_on_amp():\n train_experiment(None, dl.AMPEngine())\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.amp_required),\n reason=\"No CUDA>=2 or AMP found\",\n)\ndef test_on_amp_dp():\n train_experiment(None, dl.DataParallelAMPEngine())\n\n\n# @mark.skipif(\n# not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.amp_required),\n# reason=\"No CUDA>=2 or AMP found\",\n# )\n# def test_on_amp_ddp():\n# train_experiment(None, dl.DistributedDataParallelAMPEngine())\n\n# APEX\[email protected](not (IS_CUDA_AVAILABLE and SETTINGS.apex_required), reason=\"No CUDA or Apex found\")\ndef test_on_apex():\n train_experiment(None, dl.APEXEngine())\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.apex_required),\n reason=\"No CUDA>=2 or Apex found\",\n)\ndef test_on_apex_dp():\n train_experiment(None, dl.DataParallelAPEXEngine())\n\n\n# @mark.skipif(\n# not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.apex_required),\n# reason=\"No CUDA>=2 or Apex found\",\n# )\n# def test_on_apex_ddp():\n# train_experiment(None, dl.DistributedDataParallelApexEngine())\n"
] | [
[
"numpy.prod",
"numpy.frombuffer",
"numpy.dtype",
"torch.utils.model_zoo.tqdm"
],
[
"torch.optim.lr_scheduler.MultiStepLR",
"torch.utils.data.DataLoader",
"torch.nn.Flatten",
"torch.nn.Linear",
"torch.nn.LeakyReLU"
],
[
"torch.jit.trace"
],
[
"torch.nn.Linear",
"torch.nn.CrossEntropyLoss",
"torch.nn.Flatten"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pcallier/scipy | [
"0efe8d0a877a990c0adf059db67aa5d5b97c1849"
] | [
"scipy/misc/pilutil.py"
] | [
"\"\"\"\nA collection of image utilities using the Python Imaging Library (PIL).\n\nNote that PIL is not a dependency of SciPy and this module is not\navailable on systems that don't have PIL installed.\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\n# Functions which need the PIL\n\nimport numpy\nimport tempfile\n\nfrom numpy import (amin, amax, ravel, asarray, cast, arange, ones, newaxis,\n transpose, iscomplexobj, uint8, issubdtype, array)\n\ntry:\n from PIL import Image, ImageFilter\nexcept ImportError:\n import Image\n import ImageFilter\n\n\nif not hasattr(Image, 'frombytes'):\n Image.frombytes = Image.fromstring\n\n__all__ = ['fromimage', 'toimage', 'imsave', 'imread', 'bytescale',\n 'imrotate', 'imresize', 'imshow', 'imfilter']\n\n\n# Returns a byte-scaled image\ndef bytescale(data, cmin=None, cmax=None, high=255, low=0):\n \"\"\"\n Byte scales an array (image).\n\n Byte scaling means converting the input image to uint8 dtype and scaling\n the range to ``(low, high)`` (default 0-255).\n If the input image already has dtype uint8, no scaling is done.\n\n This function is only available if Python Imaging Library (PIL) is installed.\n\n Parameters\n ----------\n data : ndarray\n PIL image data array.\n cmin : scalar, optional\n Bias scaling of small values. Default is ``data.min()``.\n cmax : scalar, optional\n Bias scaling of large values. Default is ``data.max()``.\n high : scalar, optional\n Scale max value to `high`. Default is 255.\n low : scalar, optional\n Scale min value to `low`. Default is 0.\n\n Returns\n -------\n img_array : uint8 ndarray\n The byte-scaled array.\n\n Examples\n --------\n >>> from scipy.misc import bytescale\n >>> img = np.array([[ 91.06794177, 3.39058326, 84.4221549 ],\n ... [ 73.88003259, 80.91433048, 4.88878881],\n ... [ 51.53875334, 34.45808177, 27.5873488 ]])\n >>> bytescale(img)\n array([[255, 0, 236],\n [205, 225, 4],\n [140, 90, 70]], dtype=uint8)\n >>> bytescale(img, high=200, low=100)\n array([[200, 100, 192],\n [180, 188, 102],\n [155, 135, 128]], dtype=uint8)\n >>> bytescale(img, cmin=0, cmax=255)\n array([[91, 3, 84],\n [74, 81, 5],\n [52, 34, 28]], dtype=uint8)\n\n \"\"\"\n if data.dtype == uint8:\n return data\n\n if high > 255:\n raise ValueError(\"`high` should be less than or equal to 255.\")\n if low < 0:\n raise ValueError(\"`low` should be greater than or equal to 0.\")\n if high < low:\n raise ValueError(\"`high` should be greater than or equal to `low`.\")\n\n if cmin is None:\n cmin = data.min()\n if cmax is None:\n cmax = data.max()\n\n cscale = cmax - cmin\n if cscale < 0:\n raise ValueError(\"`cmax` should be larger than `cmin`.\")\n elif cscale == 0:\n cscale = 1\n\n scale = float(high - low) / cscale\n bytedata = (data - cmin) * scale + low\n return (bytedata.clip(low, high) + 0.5).astype(uint8)\n\n\ndef imread(name, flatten=False, mode=None):\n \"\"\"\n Read an image from a file as an array.\n\n This function is only available if Python Imaging Library (PIL) is installed.\n\n Parameters\n ----------\n name : str or file object\n The file name or file object to be read.\n flatten : bool, optional\n If True, flattens the color layers into a single gray-scale layer.\n mode : str, optional\n Mode to convert image to, e.g. ``'RGB'``. See the Notes for more\n details.\n\n Returns\n -------\n imread : ndarray\n The array obtained by reading the image.\n\n Notes\n -----\n `imread` uses the Python Imaging Library (PIL) to read an image.\n The following notes are from the PIL documentation.\n\n `mode` can be one of the following strings:\n\n * 'L' (8-bit pixels, black and white)\n * 'P' (8-bit pixels, mapped to any other mode using a color palette)\n * 'RGB' (3x8-bit pixels, true color)\n * 'RGBA' (4x8-bit pixels, true color with transparency mask)\n * 'CMYK' (4x8-bit pixels, color separation)\n * 'YCbCr' (3x8-bit pixels, color video format)\n * 'I' (32-bit signed integer pixels)\n * 'F' (32-bit floating point pixels)\n\n PIL also provides limited support for a few special modes, including\n 'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa'\n (true color with premultiplied alpha).\n\n When translating a color image to black and white (mode 'L', 'I' or\n 'F'), the library uses the ITU-R 601-2 luma transform::\n\n L = R * 299/1000 + G * 587/1000 + B * 114/1000\n\n When `flatten` is True, the image is converted using mode 'F'.\n When `mode` is not None and `flatten` is True, the image is first\n converted according to `mode`, and the result is then flattened using\n mode 'F'.\n\n \"\"\"\n\n im = Image.open(name)\n return fromimage(im, flatten=flatten, mode=mode)\n\n\ndef imsave(name, arr, format=None):\n \"\"\"\n Save an array as an image.\n\n This function is only available if Python Imaging Library (PIL) is installed.\n\n Parameters\n ----------\n name : str or file object\n Output file name or file object.\n arr : ndarray, MxN or MxNx3 or MxNx4\n Array containing image values. If the shape is ``MxN``, the array\n represents a grey-level image. Shape ``MxNx3`` stores the red, green\n and blue bands along the last dimension. An alpha layer may be\n included, specified as the last colour band of an ``MxNx4`` array.\n format : str\n Image format. If omitted, the format to use is determined from the\n file name extension. If a file object was used instead of a file name,\n this parameter should always be used.\n\n Examples\n --------\n Construct an array of gradient intensity values and save to file:\n\n >>> from scipy.misc import imsave\n >>> x = np.zeros((255, 255))\n >>> x = np.zeros((255, 255), dtype=np.uint8)\n >>> x[:] = np.arange(255)\n >>> imsave('gradient.png', x)\n\n Construct an array with three colour bands (R, G, B) and store to file:\n\n >>> rgb = np.zeros((255, 255, 3), dtype=np.uint8)\n >>> rgb[..., 0] = np.arange(255)\n >>> rgb[..., 1] = 55\n >>> rgb[..., 2] = 1 - np.arange(255)\n >>> imsave('rgb_gradient.png', rgb)\n\n \"\"\"\n im = toimage(arr, channel_axis=2)\n if format is None:\n im.save(name)\n else:\n im.save(name, format)\n return\n\n\ndef fromimage(im, flatten=False, mode=None):\n \"\"\"\n Return a copy of a PIL image as a numpy array.\n\n This function is only available if Python Imaging Library (PIL) is installed.\n\n Parameters\n ----------\n im : PIL image\n Input image.\n flatten : bool\n If true, convert the output to grey-scale.\n mode : str, optional\n Mode to convert image to, e.g. ``'RGB'``. See the Notes of the\n `imread` docstring for more details.\n\n Returns\n -------\n fromimage : ndarray\n The different colour bands/channels are stored in the\n third dimension, such that a grey-image is MxN, an\n RGB-image MxNx3 and an RGBA-image MxNx4.\n\n \"\"\"\n if not Image.isImageType(im):\n raise TypeError(\"Input is not a PIL image.\")\n\n if mode is not None:\n if mode != im.mode:\n im = im.convert(mode)\n elif im.mode == 'P':\n # Mode 'P' means there is an indexed \"palette\". If we leave the mode\n # as 'P', then when we do `a = array(im)` below, `a` will be a 2-D\n # containing the indices into the palette, and not a 3-D array\n # containing the RGB or RGBA values.\n if 'transparency' in im.info:\n im = im.convert('RGBA')\n else:\n im = im.convert('RGB')\n\n if flatten:\n im = im.convert('F')\n elif im.mode == '1':\n # Workaround for crash in PIL. When im is 1-bit, the call array(im)\n # can cause a seg. fault, or generate garbage. See\n # https://github.com/scipy/scipy/issues/2138 and\n # https://github.com/python-pillow/Pillow/issues/350.\n #\n # This converts im from a 1-bit image to an 8-bit image.\n im = im.convert('L')\n\n a = array(im)\n return a\n\n_errstr = \"Mode is unknown or incompatible with input array shape.\"\n\n\ndef toimage(arr, high=255, low=0, cmin=None, cmax=None, pal=None,\n mode=None, channel_axis=None):\n \"\"\"Takes a numpy array and returns a PIL image.\n\n This function is only available if Python Imaging Library (PIL) is installed.\n\n The mode of the PIL image depends on the array shape and the `pal` and\n `mode` keywords.\n\n For 2-D arrays, if `pal` is a valid (N,3) byte-array giving the RGB values\n (from 0 to 255) then ``mode='P'``, otherwise ``mode='L'``, unless mode\n is given as 'F' or 'I' in which case a float and/or integer array is made.\n\n Notes\n -----\n For 3-D arrays, the `channel_axis` argument tells which dimension of the\n array holds the channel data.\n\n For 3-D arrays if one of the dimensions is 3, the mode is 'RGB'\n by default or 'YCbCr' if selected.\n\n The numpy array must be either 2 dimensional or 3 dimensional.\n\n \"\"\"\n data = asarray(arr)\n if iscomplexobj(data):\n raise ValueError(\"Cannot convert a complex-valued array.\")\n shape = list(data.shape)\n valid = len(shape) == 2 or ((len(shape) == 3) and\n ((3 in shape) or (4 in shape)))\n if not valid:\n raise ValueError(\"'arr' does not have a suitable array shape for \"\n \"any mode.\")\n if len(shape) == 2:\n shape = (shape[1], shape[0]) # columns show up first\n if mode == 'F':\n data32 = data.astype(numpy.float32)\n image = Image.frombytes(mode, shape, data32.tostring())\n return image\n if mode in [None, 'L', 'P']:\n bytedata = bytescale(data, high=high, low=low,\n cmin=cmin, cmax=cmax)\n image = Image.frombytes('L', shape, bytedata.tostring())\n if pal is not None:\n image.putpalette(asarray(pal, dtype=uint8).tostring())\n # Becomes a mode='P' automagically.\n elif mode == 'P': # default gray-scale\n pal = (arange(0, 256, 1, dtype=uint8)[:, newaxis] *\n ones((3,), dtype=uint8)[newaxis, :])\n image.putpalette(asarray(pal, dtype=uint8).tostring())\n return image\n if mode == '1': # high input gives threshold for 1\n bytedata = (data > high)\n image = Image.frombytes('1', shape, bytedata.tostring())\n return image\n if cmin is None:\n cmin = amin(ravel(data))\n if cmax is None:\n cmax = amax(ravel(data))\n data = (data*1.0 - cmin)*(high - low)/(cmax - cmin) + low\n if mode == 'I':\n data32 = data.astype(numpy.uint32)\n image = Image.frombytes(mode, shape, data32.tostring())\n else:\n raise ValueError(_errstr)\n return image\n\n # if here then 3-d array with a 3 or a 4 in the shape length.\n # Check for 3 in datacube shape --- 'RGB' or 'YCbCr'\n if channel_axis is None:\n if (3 in shape):\n ca = numpy.flatnonzero(asarray(shape) == 3)[0]\n else:\n ca = numpy.flatnonzero(asarray(shape) == 4)\n if len(ca):\n ca = ca[0]\n else:\n raise ValueError(\"Could not find channel dimension.\")\n else:\n ca = channel_axis\n\n numch = shape[ca]\n if numch not in [3, 4]:\n raise ValueError(\"Channel axis dimension is not valid.\")\n\n bytedata = bytescale(data, high=high, low=low, cmin=cmin, cmax=cmax)\n if ca == 2:\n strdata = bytedata.tostring()\n shape = (shape[1], shape[0])\n elif ca == 1:\n strdata = transpose(bytedata, (0, 2, 1)).tostring()\n shape = (shape[2], shape[0])\n elif ca == 0:\n strdata = transpose(bytedata, (1, 2, 0)).tostring()\n shape = (shape[2], shape[1])\n if mode is None:\n if numch == 3:\n mode = 'RGB'\n else:\n mode = 'RGBA'\n\n if mode not in ['RGB', 'RGBA', 'YCbCr', 'CMYK']:\n raise ValueError(_errstr)\n\n if mode in ['RGB', 'YCbCr']:\n if numch != 3:\n raise ValueError(\"Invalid array shape for mode.\")\n if mode in ['RGBA', 'CMYK']:\n if numch != 4:\n raise ValueError(\"Invalid array shape for mode.\")\n\n # Here we know data and mode is correct\n image = Image.frombytes(mode, shape, strdata)\n return image\n\n\ndef imrotate(arr, angle, interp='bilinear'):\n \"\"\"\n Rotate an image counter-clockwise by angle degrees.\n\n This function is only available if Python Imaging Library (PIL) is installed.\n\n Parameters\n ----------\n arr : ndarray\n Input array of image to be rotated.\n angle : float\n The angle of rotation.\n interp : str, optional\n Interpolation\n\n - 'nearest' : for nearest neighbor\n - 'bilinear' : for bilinear\n - 'lanczos' : for lanczos\n - 'cubic' : for bicubic\n - 'bicubic' : for bicubic\n\n Returns\n -------\n imrotate : ndarray\n The rotated array of image.\n\n \"\"\"\n arr = asarray(arr)\n func = {'nearest': 0, 'lanczos': 1, 'bilinear': 2, 'bicubic': 3, 'cubic': 3}\n im = toimage(arr)\n im = im.rotate(angle, resample=func[interp])\n return fromimage(im)\n\n\ndef imshow(arr):\n \"\"\"\n Simple showing of an image through an external viewer.\n\n This function is only available if Python Imaging Library (PIL) is installed.\n\n Uses the image viewer specified by the environment variable\n SCIPY_PIL_IMAGE_VIEWER, or if that is not defined then `see`,\n to view a temporary file generated from array data.\n\n Parameters\n ----------\n arr : ndarray\n Array of image data to show.\n\n Returns\n -------\n None\n\n Examples\n --------\n >>> a = np.tile(np.arange(255), (255,1))\n >>> from scipy import misc\n >>> misc.imshow(a)\n\n \"\"\"\n im = toimage(arr)\n fnum, fname = tempfile.mkstemp('.png')\n try:\n im.save(fname)\n except:\n raise RuntimeError(\"Error saving temporary image data.\")\n\n import os\n os.close(fnum)\n\n cmd = os.environ.get('SCIPY_PIL_IMAGE_VIEWER', 'see')\n status = os.system(\"%s %s\" % (cmd, fname))\n\n os.unlink(fname)\n if status != 0:\n raise RuntimeError('Could not execute image viewer.')\n\n\ndef imresize(arr, size, interp='bilinear', mode=None):\n \"\"\"\n Resize an image.\n\n This function is only available if Python Imaging Library (PIL) is installed.\n\n Parameters\n ----------\n arr : ndarray\n The array of image to be resized.\n\n size : int, float or tuple\n * int - Percentage of current size.\n * float - Fraction of current size.\n * tuple - Size of the output image.\n\n interp : str, optional\n Interpolation to use for re-sizing ('nearest', 'lanczos', 'bilinear', 'bicubic'\n or 'cubic').\n\n mode : str, optional\n The PIL image mode ('P', 'L', etc.) to convert `arr` before resizing.\n\n Returns\n -------\n imresize : ndarray\n The resized array of image.\n\n See Also\n --------\n toimage : Implicitly used to convert `arr` according to `mode`.\n scipy.ndimage.zoom : More generic implementation that does not use PIL.\n\n \"\"\"\n im = toimage(arr, mode=mode)\n ts = type(size)\n if issubdtype(ts, int):\n percent = size / 100.0\n size = tuple((array(im.size)*percent).astype(int))\n elif issubdtype(type(size), float):\n size = tuple((array(im.size)*size).astype(int))\n else:\n size = (size[1], size[0])\n func = {'nearest': 0, 'lanczos': 1, 'bilinear': 2, 'bicubic': 3, 'cubic': 3}\n imnew = im.resize(size, resample=func[interp])\n return fromimage(imnew)\n\n\ndef imfilter(arr, ftype):\n \"\"\"\n Simple filtering of an image.\n\n This function is only available if Python Imaging Library (PIL) is installed.\n\n Parameters\n ----------\n arr : ndarray\n The array of Image in which the filter is to be applied.\n ftype : str\n The filter that has to be applied. Legal values are:\n 'blur', 'contour', 'detail', 'edge_enhance', 'edge_enhance_more',\n 'emboss', 'find_edges', 'smooth', 'smooth_more', 'sharpen'.\n\n Returns\n -------\n imfilter : ndarray\n The array with filter applied.\n\n Raises\n ------\n ValueError\n *Unknown filter type.* If the filter you are trying\n to apply is unsupported.\n\n \"\"\"\n _tdict = {'blur': ImageFilter.BLUR,\n 'contour': ImageFilter.CONTOUR,\n 'detail': ImageFilter.DETAIL,\n 'edge_enhance': ImageFilter.EDGE_ENHANCE,\n 'edge_enhance_more': ImageFilter.EDGE_ENHANCE_MORE,\n 'emboss': ImageFilter.EMBOSS,\n 'find_edges': ImageFilter.FIND_EDGES,\n 'smooth': ImageFilter.SMOOTH,\n 'smooth_more': ImageFilter.SMOOTH_MORE,\n 'sharpen': ImageFilter.SHARPEN\n }\n\n im = toimage(arr)\n if ftype not in _tdict:\n raise ValueError(\"Unknown filter type.\")\n return fromimage(im.filter(_tdict[ftype]))\n"
] | [
[
"numpy.asarray",
"numpy.arange",
"numpy.issubdtype",
"numpy.transpose",
"numpy.ones",
"numpy.iscomplexobj",
"numpy.ravel",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fcitil/UAV-Line-Follower | [
"4899c5f6bc4e48b74145a6c7d4020494ce5ca582"
] | [
"linefollow-videoinputver.py"
] | [
"import cv2 as cv\nimport numpy as np\nimport time\n\ndef intersection(img):\n height,width,channels=img.shape\n ppt = np.array([\n [width/20,height/20*19],\n [width/20*19,height/20*19],\n [width/20*19,height/20],\n [width/20,height/20]\n ], np.int32)\n ppt = ppt.reshape((-1, 1, 2))\n cv.fillPoly(img, [ppt], (255, 255, 255), 8)\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n blur = cv.GaussianBlur(gray, (5, 5), 0)\n _, th = cv.threshold(blur, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)\n contours, hierarchy = cv.findContours(th, 1, cv.CHAIN_APPROX_SIMPLE)\n if len(contours)>2:\n return True\n else:\n return False\n#opening camera\ncap=cv.VideoCapture('line.mp4')\nif not cap.isOpened():\n print(\"Camera cannot be opened\")\n#height&widht of captured video\n(height,width)=(640,480)\nret = cap.set(cv.CAP_PROP_FRAME_WIDTH, height)\nret = cap.set(cv.CAP_PROP_FRAME_HEIGHT, width)\n#ret = cap.set(cv.CAP_PROP_FPS,10)\n\nwhile(cap.isOpened()):\n #capture video frame by frame\n ret, frame= cap.read()\n frame=~frame\n #ret gives bool\n if not ret:\n print(\"Can't receive frame.EXİTİNG...\")\n gray=cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n blur = cv.GaussianBlur(gray,(5,5),0)\n _, th = cv.threshold(blur, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)\n contours, hierarchy = cv.findContours(th, 1, cv.CHAIN_APPROX_SIMPLE)\n clrth=cv.cvtColor(th,cv.COLOR_GRAY2BGR)\n cv.drawContours(clrth, contours, -1, (0, 255, 0), 2)\n\n (x, y), (MA, ma), angle = cv.fitEllipse(max(contours, key=cv.contourArea))\n\n\n if len(contours)>0:\n c = max(contours, key=cv.contourArea)\n M = cv.moments(c)\n cx = int(M['m10'] / M['m00'])\n cy = int(M['m01'] / M['m00'])\n cv.line(clrth, (cx, 0), (cx, 480), (255, 0, 0), 1)\n cv.line(clrth, (0, cy), (640, cy), (255, 0, 0), 1)\n\n #print('error on x axis:'+str((cx-320)/320)+'(-1,1)')\n if intersection(frame):\n print('intersection')\n #display frame\n cv.line(clrth,(320,230),(320,250),(0,0,255),3)\n cv.putText(frame,'Angle:'+str(angle),(10,60),cv.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv.LINE_4)\n cv.imshow('Normal',~frame)\n cv.imshow('Contours', clrth)\n cv.imshow('asds', th)\n\n #press 'ESC' to quit\n if cv.waitKey(1) == 27:\n break\n\n\ncap.release()\ncv.destroyAllWindows()"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jahall/GPflow | [
"7bf3bf755d7b5d9de9f46827af4033c2f5fb05c9",
"7bf3bf755d7b5d9de9f46827af4033c2f5fb05c9"
] | [
"tests/test_variational.py",
"gpflow/kernels/base.py"
] | [
"# Copyright 2016 the GPflow authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_allclose\n\nimport gpflow\nfrom gpflow.kernels import SquaredExponential\nfrom gpflow.likelihoods import Gaussian\nfrom tests.reference import ref_rbf_kernel\n\nrng = np.random.RandomState(1)\n\n# ------------------------------------------\n# Helpers\n# ------------------------------------------\n\n\ndef univariate_log_marginal_likelihood(y, K, noise_var):\n return (-0.5 * y * y / (K + noise_var) - 0.5 * np.log(K + noise_var) -\n 0.5 * np.log(np.pi * 2.))\n\n\ndef univariate_posterior(y, K, noise_var):\n mean = K * y / (K + noise_var)\n variance = K - K / (K + noise_var)\n return mean, variance\n\n\ndef univariate_prior_KL(meanA, meanB, varA, varB):\n # KL[ qA | qB ] = E_{qA} \\log [qA / qB] where qA and qB are univariate normal distributions.\n return (0.5 * (np.log(varB) - np.log(varA) - 1. + varA / varB +\n (meanB - meanA) * (meanB - meanA) / varB))\n\n\ndef multivariate_prior_KL(meanA, covA, meanB, covB):\n # KL[ qA | qB ] = E_{qA} \\log [qA / qB] where qA and aB are\n # K dimensional multivariate normal distributions.\n # Analytically tractable and equal to...\n # 0.5 * (Tr(covB^{-1} covA) + (meanB - meanA)^T covB^{-1} (meanB - meanA)\n # - K + log(det(covB)) - log (det(covA)))\n K = covA.shape[0]\n traceTerm = 0.5 * np.trace(np.linalg.solve(covB, covA))\n delta = meanB - meanA\n mahalanobisTerm = 0.5 * np.dot(delta.T, np.linalg.solve(covB, delta))\n constantTerm = -0.5 * K\n priorLogDeterminantTerm = 0.5 * np.linalg.slogdet(covB)[1]\n variationalLogDeterminantTerm = -0.5 * np.linalg.slogdet(covA)[1]\n return (traceTerm + mahalanobisTerm + constantTerm +\n priorLogDeterminantTerm + variationalLogDeterminantTerm)\n\n\n# ------------------------------------------\n# Data classes: storing constants\n# ------------------------------------------\n\n\nclass Datum:\n num_latent = 1\n y_data = 2.\n X = np.atleast_2d(np.array([0.]))\n Y = np.atleast_2d(np.array([y_data]))\n Z = X.copy()\n zero_mean = 0.\n K = 1.\n noise_var = 0.5\n posterior_mean, posterior_var = univariate_posterior(y=y_data,\n K=K,\n noise_var=noise_var)\n posterior_std = np.sqrt(posterior_var)\n\n\nclass MultiDatum:\n dim = 3\n num_latent = 1\n Y = rng.randn(dim, 1)\n X = rng.randn(dim, 1)\n Z = X.copy()\n noise_var = 0.5\n signal_var = 1.5\n ls = 1.7\n q_mean = rng.randn(dim, num_latent)\n q_sqrt_diag = rng.rand(dim, num_latent)\n q_sqrt_full = np.tril(rng.rand(dim, dim))\n\n\ndef test_refrence_implementation_consistency():\n q_mean = rng.rand(1, 1)\n q_cov = rng.rand(1, 1)\n p_mean = rng.rand(1, 1)\n p_cov = rng.rand(1, 1)\n\n multivariate_KL = multivariate_prior_KL(q_mean, p_mean, q_cov, p_cov)\n univariate_KL = univariate_prior_KL(q_mean.reshape(-1), p_mean.reshape(-1),\n q_cov.reshape(-1), p_cov.reshape(-1))\n\n assert_allclose(univariate_KL - multivariate_KL, 0, atol=4)\n\n\[email protected]('diag', [True, False])\[email protected]('whiten', [True, False])\ndef test_variational_univariate_prior_KL(diag, whiten):\n reference_kl = univariate_prior_KL(Datum.posterior_mean, Datum.zero_mean,\n Datum.posterior_var, Datum.K)\n q_mu = np.ones((1, Datum.num_latent)) * Datum.posterior_mean\n ones = np.ones((1, Datum.num_latent)) if diag else np.ones(\n (1, 1, Datum.num_latent))\n q_sqrt = ones * Datum.posterior_std\n model = gpflow.models.SVGP(kernel=SquaredExponential(variance=Datum.K),\n likelihood=Gaussian(),\n inducing_variable=Datum.Z,\n num_latent=Datum.num_latent,\n q_diag=diag,\n whiten=whiten,\n q_mu=q_mu,\n q_sqrt=q_sqrt)\n test_prior_KL = model.prior_kl()\n assert_allclose(reference_kl - test_prior_KL, 0, atol=4)\n\n\[email protected]('diag', [True, False])\[email protected]('whiten', [True, False])\ndef test_variational_univariate_log_likelihood(diag, whiten):\n # reference marginal likelihood estimate\n reference_log_marginal_likelihood = univariate_log_marginal_likelihood(\n y=Datum.y_data, K=Datum.K, noise_var=Datum.noise_var)\n q_mu = np.ones((1, Datum.num_latent)) * Datum.posterior_mean\n ones = np.ones((1, Datum.num_latent)) if diag else np.ones(\n (1, 1, Datum.num_latent))\n q_sqrt = ones * Datum.posterior_std\n model = gpflow.models.SVGP(kernel=SquaredExponential(variance=Datum.K),\n likelihood=Gaussian(),\n inducing_variable=Datum.Z,\n num_latent=Datum.num_latent,\n q_diag=diag,\n whiten=whiten,\n q_mu=q_mu,\n q_sqrt=q_sqrt)\n model_likelihood = model.log_likelihood(X=Datum.X, Y=Datum.Y).numpy()\n assert_allclose(model_likelihood - reference_log_marginal_likelihood,\n 0,\n atol=4)\n\n\[email protected]('diag', [True, False])\[email protected]('whiten', [True, False])\ndef test_variational_univariate_conditionals(diag, whiten):\n q_mu = np.ones((1, Datum.num_latent)) * Datum.posterior_mean\n ones = np.ones((1, Datum.num_latent)) if diag else np.ones(\n (1, 1, Datum.num_latent))\n q_sqrt = ones * Datum.posterior_std\n model = gpflow.models.SVGP(kernel=SquaredExponential(variance=Datum.K),\n likelihood=Gaussian(),\n inducing_variable=Datum.Z,\n num_latent=Datum.num_latent,\n q_diag=diag,\n whiten=whiten,\n q_mu=q_mu,\n q_sqrt=q_sqrt)\n\n fmean_func, fvar_func = gpflow.conditionals.conditional(\n Datum.X,\n Datum.Z,\n model.kernel,\n model.q_mu,\n q_sqrt=model.q_sqrt,\n white=whiten)\n mean_value, var_value = fmean_func[0, 0], fvar_func[0, 0]\n\n assert_allclose(mean_value - Datum.posterior_mean, 0, atol=4)\n assert_allclose(var_value - Datum.posterior_var, 0, atol=4)\n\n\[email protected]('whiten', [True, False])\ndef test_variational_multivariate_prior_KL_full_q(whiten):\n cov_q = MultiDatum.q_sqrt_full @ MultiDatum.q_sqrt_full.T\n mean_prior = np.zeros((MultiDatum.dim, 1))\n cov_prior = np.eye(MultiDatum.dim) if whiten else ref_rbf_kernel(\n MultiDatum.X, MultiDatum.ls, MultiDatum.signal_var)\n reference_kl = multivariate_prior_KL(MultiDatum.q_mean, cov_q, mean_prior,\n cov_prior)\n\n q_sqrt = MultiDatum.q_sqrt_full[None, :, :]\n model = gpflow.models.SVGP(kernel=SquaredExponential(variance=MultiDatum.signal_var,\n lengthscale=MultiDatum.ls),\n likelihood=Gaussian(MultiDatum.noise_var),\n inducing_variable=MultiDatum.Z,\n num_latent=MultiDatum.num_latent,\n q_diag=False,\n whiten=whiten,\n q_mu=MultiDatum.q_mean,\n q_sqrt=q_sqrt)\n\n test_prior_kl = model.prior_kl()\n assert_allclose(reference_kl - test_prior_kl, 0, atol=4)\n",
"# Copyright 2018 GPflow\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"\nKernels form a core component of GPflow models and allow prior information to\nbe encoded about a latent function of interest. The effect of choosing\ndifferent kernels, and how it is possible to combine multiple kernels is shown\nin the `\"Using kernels in GPflow\" notebook <notebooks/kernels.html>`_.\n\"\"\"\n\nimport abc\nfrom functools import partial, reduce\nfrom typing import List, Optional, Union\n\nimport numpy as np\nimport tensorflow as tf\n\n\nclass Kernel(tf.Module):\n \"\"\"\n The basic kernel class. Handles active dims.\n \"\"\"\n\n def __init__(self,\n active_dims: Optional[Union[slice, list]] = None,\n name: Optional[str] = None):\n \"\"\"\n :param active_dims: active dimensions, has the slice type.\n :param name: optional kernel name.\n \"\"\"\n super().__init__(name=name)\n if isinstance(active_dims, list):\n active_dims = np.array(active_dims)\n self._active_dims = active_dims\n\n @property\n def active_dims(self):\n return self._active_dims\n\n @active_dims.setter\n def active_dims(self, value):\n if value is None:\n value = slice(None, None, None)\n if not isinstance(value, slice):\n value = np.array(value, dtype=int)\n self._active_dims = value\n\n def on_separate_dims(self, other):\n \"\"\"\n Checks if the dimensions, over which the kernels are specified, overlap.\n Returns True if they are defined on different/separate dimensions and False otherwise.\n \"\"\"\n if isinstance(self.active_dims, slice) or isinstance(\n other.active_dims, slice):\n # Be very conservative for kernels defined over slices of dimensions\n return False\n\n if self.active_dims is None or other.active_dims:\n return False\n\n this_dims = tf.reshape(self.active_dims, (-1, 1))\n other_dims = tf.reshape(other.active_dims, (1, -1))\n return not np.any(tf.equal(this_dims, other_dims))\n\n def slice(self, X: tf.Tensor, Y: Optional[tf.Tensor] = None):\n \"\"\"\n Slice the correct dimensions for use in the kernel, as indicated by `self.active_dims`.\n\n :param X: Input 1 [N, D].\n :param Y: Input 2 [M, D], can be None.\n :return: Sliced X, Y, [N, I], I - input dimension.\n \"\"\"\n dims = self.active_dims\n if isinstance(dims, slice):\n X = X[..., dims]\n Y = Y[..., dims] if Y is not None else X\n elif dims is not None:\n # TODO(@awav): Convert when TF2.0 whill support proper slicing.\n X = tf.gather(X, dims, axis=-1)\n Y = tf.gather(Y, dims, axis=-1) if Y is not None else X\n return X, Y\n\n def slice_cov(self, cov: tf.Tensor) -> tf.Tensor:\n \"\"\"\n Slice the correct dimensions for use in the kernel, as indicated by\n `self.active_dims` for covariance matrices. This requires slicing the\n rows *and* columns. This will also turn flattened diagonal\n matrices into a tensor of full diagonal matrices.\n\n :param cov: Tensor of covariance matrices, [N, D, D] or [N, D].\n :return: [N, I, I].\n \"\"\"\n if cov.shape.ndims == 2:\n cov = tf.linalg.diag(cov)\n\n dims = self.active_dims\n\n if isinstance(dims, slice):\n return cov[..., dims, dims]\n elif dims is not None:\n nlast = cov.shape[-1]\n ndims = len(dims)\n\n cov_shape = cov.shape\n cov_reshaped = tf.reshape(cov, [-1, nlast, nlast])\n gather1 = tf.gather(tf.transpose(cov_reshaped, [2, 1, 0]), dims)\n gather2 = tf.gather(tf.transpose(gather1, [1, 0, 2]), dims)\n cov = tf.reshape(tf.transpose(gather2, [2, 0, 1]),\n tf.concat([cov_shape[:-2], [ndims, ndims]], 0))\n\n return cov\n\n def _validate_ard_active_dims(self, ard_parameter):\n \"\"\"\n Validate that ARD parameter matches the number of active_dims (provided active_dims\n has been specified as an array).\n \"\"\"\n if self.active_dims is None or isinstance(self.active_dims, slice):\n # Can only validate parameter if active_dims is an array\n return\n\n if ard_parameter.shape.rank > 0 and ard_parameter.shape[0] != len(self.active_dims):\n raise ValueError(f\"Size of `active_dims` {self.active_dims} does not match \"\n f\"size of ard parameter ({ard_parameter.shape[0]})\")\n\n @abc.abstractmethod\n def K(self, X, X2=None, presliced=False):\n raise NotImplementedError\n\n @abc.abstractmethod\n def K_diag(self, X, presliced=False):\n raise NotImplementedError\n\n def __call__(self, X, Y=None, full=True, presliced=False):\n if not full and Y is not None:\n raise ValueError(\n \"Ambiguous inputs: `diagonal` and `y` are not compatible.\")\n if not full:\n return self.K_diag(X)\n return self.K(X, Y)\n\n def __add__(self, other):\n return Sum([self, other])\n\n def __mul__(self, other):\n return Product([self, other])\n\n\nclass Combination(Kernel):\n \"\"\"\n Combine a list of kernels, e.g. by adding or multiplying (see inheriting\n classes).\n\n The names of the kernels to be combined are generated from their class\n names.\n \"\"\"\n\n _reduction = None\n\n def __init__(self, kernels: List[Kernel], name: Optional[str] = None):\n super().__init__(name=name)\n\n if not all(isinstance(k, Kernel) for k in kernels):\n raise TypeError(\n \"can only combine Kernel instances\") # pragma: no cover\n\n self._set_kernels(kernels)\n\n def _set_kernels(self, kernels: List[Kernel]):\n # add kernels to a list, flattening out instances of this class therein\n kernels_list = []\n for k in kernels:\n if isinstance(k, self.__class__):\n kernels_list.extend(k.kernels)\n else:\n kernels_list.append(k)\n self.kernels = kernels_list\n\n @property\n def on_separate_dimensions(self):\n \"\"\"\n Checks whether the kernels in the combination act on disjoint subsets\n of dimensions. Currently, it is hard to asses whether two slice objects\n will overlap, so this will always return False.\n\n :return: Boolean indicator.\n \"\"\"\n if np.any([isinstance(k.active_dims, slice) for k in self.kernels]):\n # Be conservative in the case of a slice object\n return False\n else:\n dimlist = [k.active_dims for k in self.kernels]\n overlapping = False\n for i, dims_i in enumerate(dimlist):\n for dims_j in dimlist[i + 1:]:\n print(f\"dims_i = {type(dims_i)}\")\n if np.any(dims_i.reshape(-1, 1) == dims_j.reshape(1, -1)):\n overlapping = True\n return not overlapping\n\n def K(self, X: tf.Tensor, X2: Optional[tf.Tensor] = None, presliced: bool = False) -> tf.Tensor:\n res = [k.K(X, X2, presliced=presliced) for k in self.kernels]\n return self._reduce(res)\n\n def K_diag(self, X: tf.Tensor, presliced: bool = False) -> tf.Tensor:\n res = [k.K_diag(X, presliced=presliced) for k in self.kernels]\n return self._reduce(res)\n\n\nclass Sum(Combination):\n @property\n def _reduce(cls):\n return tf.add_n\n\n\nclass Product(Combination):\n @property\n def _reduce(cls):\n return partial(reduce, tf.multiply)\n"
] | [
[
"numpy.log",
"numpy.linalg.solve",
"numpy.sqrt",
"numpy.linalg.slogdet",
"numpy.eye",
"numpy.ones",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.zeros",
"numpy.random.RandomState"
],
[
"tensorflow.transpose",
"tensorflow.concat",
"tensorflow.reshape",
"tensorflow.equal",
"tensorflow.linalg.diag",
"tensorflow.gather",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Dawars/graphics | [
"8b3013e0188f9317aea3bc0637b6ae74b4a3e837"
] | [
"tensorflow_graphics/geometry/transformation/tests/rotation_matrix_3d_test.py"
] | [
"#Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for 3d rotation matrix.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import flagsaver\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_graphics.geometry.transformation import axis_angle\nfrom tensorflow_graphics.geometry.transformation import quaternion\nfrom tensorflow_graphics.geometry.transformation import rotation_matrix_3d\nfrom tensorflow_graphics.geometry.transformation.tests import test_data as td\nfrom tensorflow_graphics.geometry.transformation.tests import test_helpers\nfrom tensorflow_graphics.util import test_case\n\n\nclass RotationMatrix3dTest(test_case.TestCase):\n\n @flagsaver.flagsaver(tfg_add_asserts_to_graph=False)\n def test_assert_rotation_matrix_normalized_passthrough(self):\n \"\"\"Checks that the assert is a passthrough when the flag is False.\"\"\"\n angles = test_helpers.generate_preset_test_euler_angles()\n\n matrix_input = rotation_matrix_3d.from_euler(angles)\n matrix_output = rotation_matrix_3d.assert_rotation_matrix_normalized(\n matrix_input)\n\n self.assertTrue(matrix_input is matrix_output)\n\n @parameterized.parameters((np.float32), (np.float64))\n def test_assert_rotation_matrix_normalized_preset(self, dtype):\n \"\"\"Checks that assert_normalized function works as expected.\"\"\"\n angles = test_helpers.generate_preset_test_euler_angles().astype(dtype)\n\n matrix = rotation_matrix_3d.from_euler(angles)\n matrix_rescaled = matrix * 1.01\n matrix_normalized = rotation_matrix_3d.assert_rotation_matrix_normalized(\n matrix)\n self.evaluate(matrix_normalized)\n\n with self.assertRaises(tf.errors.InvalidArgumentError):\n rescaled_normalized = rotation_matrix_3d.assert_rotation_matrix_normalized(\n matrix_rescaled)\n self.evaluate(rescaled_normalized)\n\n @parameterized.parameters(\n ((3, 3),),\n ((None, 3, 3),),\n )\n def test_assert_rotation_matrix_normalized_exception_not_raised(\n self, *shapes):\n \"\"\"Tests that the shape exceptions are not raised.\"\"\"\n self.assert_exception_is_not_raised(\n rotation_matrix_3d.assert_rotation_matrix_normalized, shapes)\n\n @parameterized.parameters(\n (\"must have a rank greater than 1\", (3,)),\n (\"must have exactly 3 dimensions in axis -1\", (3, None)),\n (\"must have exactly 3 dimensions in axis -2\", (None, 3)),\n )\n def test_assert_rotation_matrix_normalized_exception_raised(\n self, error_msg, *shapes):\n \"\"\"Tests that the shape exceptions are raised.\"\"\"\n self.assert_exception_is_raised(\n rotation_matrix_3d.assert_rotation_matrix_normalized, error_msg, shapes)\n\n @parameterized.parameters(\n ((3,), (1,)),\n ((None, 3), (None, 1)),\n ((1, 3), (1, 1)),\n ((2, 3), (2, 1)),\n ((1, 3), (1,)),\n ((3,), (1, 1)),\n )\n def test_from_axis_angle_exception_not_raised(self, *shapes):\n \"\"\"Tests that the shape exceptions are not raised.\"\"\"\n self.assert_exception_is_not_raised(rotation_matrix_3d.from_axis_angle,\n shapes)\n\n @parameterized.parameters(\n (\"must have exactly 3 dimensions in axis -1\", (None,), (1,)),\n (\"must have exactly 1 dimensions in axis -1\", (3,), (None,)),\n )\n def test_from_axis_angle_exception_raised(self, error_msg, *shapes):\n \"\"\"Tests that the shape exceptions are properly raised.\"\"\"\n self.assert_exception_is_raised(rotation_matrix_3d.from_axis_angle,\n error_msg, shapes)\n\n def test_from_axis_angle_normalized_preset(self):\n \"\"\"Tests that axis-angles can be converted to rotation matrices.\"\"\"\n euler_angles = test_helpers.generate_preset_test_euler_angles()\n\n axis, angle = axis_angle.from_euler(euler_angles)\n matrix_axis_angle = rotation_matrix_3d.from_axis_angle(axis, angle)\n\n self.assertAllEqual(\n rotation_matrix_3d.is_valid(matrix_axis_angle),\n np.ones(euler_angles.shape[0:-1] + (1,)))\n\n def test_from_axis_angle_normalized_random(self):\n \"\"\"Tests that axis-angles can be converted to rotation matrices.\"\"\"\n tensor_shape = np.random.randint(1, 10, size=np.random.randint(3)).tolist()\n random_axis = np.random.normal(size=tensor_shape + [3])\n random_axis /= np.linalg.norm(random_axis, axis=-1, keepdims=True)\n random_angle = np.random.normal(size=tensor_shape + [1])\n\n matrix_axis_angle = rotation_matrix_3d.from_axis_angle(\n random_axis, random_angle)\n\n self.assertAllEqual(\n rotation_matrix_3d.is_valid(matrix_axis_angle),\n np.ones(tensor_shape + [1]))\n\n @parameterized.parameters(\n ((td.AXIS_3D_X, td.ANGLE_45), (td.MAT_3D_X_45,)),\n ((td.AXIS_3D_Y, td.ANGLE_45), (td.MAT_3D_Y_45,)),\n ((td.AXIS_3D_Z, td.ANGLE_45), (td.MAT_3D_Z_45,)),\n ((td.AXIS_3D_X, td.ANGLE_90), (td.MAT_3D_X_90,)),\n ((td.AXIS_3D_Y, td.ANGLE_90), (td.MAT_3D_Y_90,)),\n ((td.AXIS_3D_Z, td.ANGLE_90), (td.MAT_3D_Z_90,)),\n ((td.AXIS_3D_X, td.ANGLE_180), (td.MAT_3D_X_180,)),\n ((td.AXIS_3D_Y, td.ANGLE_180), (td.MAT_3D_Y_180,)),\n ((td.AXIS_3D_Z, td.ANGLE_180), (td.MAT_3D_Z_180,)),\n )\n def test_from_axis_angle_preset(self, test_inputs, test_outputs):\n \"\"\"Tests that an axis-angle maps to correct matrix.\"\"\"\n self.assert_output_is_correct(rotation_matrix_3d.from_axis_angle,\n test_inputs, test_outputs)\n\n def test_from_axis_angle_random(self):\n \"\"\"Tests conversion to matrix.\"\"\"\n tensor_shape = np.random.randint(1, 10, size=np.random.randint(3)).tolist()\n random_axis = np.random.normal(size=tensor_shape + [3])\n random_axis /= np.linalg.norm(random_axis, axis=-1, keepdims=True)\n random_angle = np.random.normal(size=tensor_shape + [1])\n\n matrix_axis_angle = rotation_matrix_3d.from_axis_angle(\n random_axis, random_angle)\n random_quaternion = quaternion.from_axis_angle(random_axis, random_angle)\n matrix_quaternion = rotation_matrix_3d.from_quaternion(random_quaternion)\n\n self.assertAllClose(matrix_axis_angle, matrix_quaternion, rtol=1e-3)\n # Checks that resulting rotation matrices are normalized.\n self.assertAllEqual(\n rotation_matrix_3d.is_valid(matrix_axis_angle),\n np.ones(tensor_shape + [1]))\n\n @parameterized.parameters(\n ((td.AXIS_3D_X, td.ANGLE_90, td.AXIS_3D_X), (td.AXIS_3D_X,)),\n ((td.AXIS_3D_X, td.ANGLE_90, td.AXIS_3D_Y), (td.AXIS_3D_Z,)),\n ((td.AXIS_3D_X, -td.ANGLE_90, td.AXIS_3D_Z), (td.AXIS_3D_Y,)),\n ((td.AXIS_3D_Y, -td.ANGLE_90, td.AXIS_3D_X), (td.AXIS_3D_Z,)),\n ((td.AXIS_3D_Y, td.ANGLE_90, td.AXIS_3D_Y), (td.AXIS_3D_Y,)),\n ((td.AXIS_3D_Y, td.ANGLE_90, td.AXIS_3D_Z), (td.AXIS_3D_X,)),\n ((td.AXIS_3D_Z, td.ANGLE_90, td.AXIS_3D_X), (td.AXIS_3D_Y,)),\n ((td.AXIS_3D_Z, -td.ANGLE_90, td.AXIS_3D_Y), (td.AXIS_3D_X,)),\n ((td.AXIS_3D_Z, td.ANGLE_90, td.AXIS_3D_Z), (td.AXIS_3D_Z,)),\n )\n def test_from_axis_angle_rotate_vector_preset(self, test_inputs,\n test_outputs):\n \"\"\"Tests the directionality of axis-angle rotations.\"\"\"\n\n def func(axis, angle, point):\n matrix = rotation_matrix_3d.from_axis_angle(axis, angle)\n return rotation_matrix_3d.rotate(point, matrix)\n\n self.assert_output_is_correct(func, test_inputs, test_outputs)\n\n @parameterized.parameters(\n ((3,),),\n ((None, 3),),\n ((2, 3),),\n )\n def test_from_euler_exception_not_raised(self, *shapes):\n \"\"\"Tests that the shape exceptions are not raised.\"\"\"\n self.assert_exception_is_not_raised(rotation_matrix_3d.from_euler, shapes)\n\n @parameterized.parameters(\n (\"must have exactly 3 dimensions in axis -1\", (None,)),)\n def test_from_euler_exception_raised(self, error_msg, *shapes):\n \"\"\"Tests that the shape exceptions are properly raised.\"\"\"\n self.assert_exception_is_raised(rotation_matrix_3d.from_euler, error_msg,\n shapes)\n\n @flagsaver.flagsaver(tfg_add_asserts_to_graph=False)\n def test_from_euler_jacobian_preset(self):\n \"\"\"Test the Jacobian of the from_euler function.\"\"\"\n x_init = test_helpers.generate_preset_test_euler_angles()\n x = tf.convert_to_tensor(value=x_init)\n\n y = rotation_matrix_3d.from_euler(x)\n\n self.assert_jacobian_is_correct(x, x_init, y)\n\n @flagsaver.flagsaver(tfg_add_asserts_to_graph=False)\n def test_from_euler_jacobian_random(self):\n \"\"\"Test the Jacobian of the from_euler function.\"\"\"\n x_init = test_helpers.generate_random_test_euler_angles()\n x = tf.convert_to_tensor(value=x_init)\n\n y = rotation_matrix_3d.from_euler(x)\n\n self.assert_jacobian_is_correct(x, x_init, y)\n\n def test_from_euler_normalized_preset(self):\n \"\"\"Tests that euler angles can be converted to rotation matrices.\"\"\"\n euler_angles = test_helpers.generate_preset_test_euler_angles()\n\n matrix = rotation_matrix_3d.from_euler(euler_angles)\n\n self.assertAllEqual(\n rotation_matrix_3d.is_valid(matrix),\n np.ones(euler_angles.shape[0:-1] + (1,)))\n\n def test_from_euler_normalized_random(self):\n \"\"\"Tests that euler angles can be converted to rotation matrices.\"\"\"\n random_euler_angles = test_helpers.generate_random_test_euler_angles()\n\n matrix = rotation_matrix_3d.from_euler(random_euler_angles)\n\n self.assertAllEqual(\n rotation_matrix_3d.is_valid(matrix),\n np.ones(random_euler_angles.shape[0:-1] + (1,)))\n\n @parameterized.parameters(\n ((td.AXIS_3D_0,), (td.MAT_3D_ID,)),\n ((td.ANGLE_45 * td.AXIS_3D_X,), (td.MAT_3D_X_45,)),\n ((td.ANGLE_45 * td.AXIS_3D_Y,), (td.MAT_3D_Y_45,)),\n ((td.ANGLE_45 * td.AXIS_3D_Z,), (td.MAT_3D_Z_45,)),\n ((td.ANGLE_90 * td.AXIS_3D_X,), (td.MAT_3D_X_90,)),\n ((td.ANGLE_90 * td.AXIS_3D_Y,), (td.MAT_3D_Y_90,)),\n ((td.ANGLE_90 * td.AXIS_3D_Z,), (td.MAT_3D_Z_90,)),\n ((td.ANGLE_180 * td.AXIS_3D_X,), (td.MAT_3D_X_180,)),\n ((td.ANGLE_180 * td.AXIS_3D_Y,), (td.MAT_3D_Y_180,)),\n ((td.ANGLE_180 * td.AXIS_3D_Z,), (td.MAT_3D_Z_180,)),\n )\n def test_from_euler_preset(self, test_inputs, test_outputs):\n \"\"\"Tests that Euler angles create the expected matrix.\"\"\"\n self.assert_output_is_correct(rotation_matrix_3d.from_euler, test_inputs,\n test_outputs)\n\n def test_from_euler_random(self):\n \"\"\"Tests that Euler angles produce the same result as axis-angle.\"\"\"\n angles = test_helpers.generate_random_test_euler_angles()\n matrix = rotation_matrix_3d.from_euler(angles)\n tensor_tile = angles.shape[:-1]\n\n x_axis = np.tile(td.AXIS_3D_X, tensor_tile + (1,))\n y_axis = np.tile(td.AXIS_3D_Y, tensor_tile + (1,))\n z_axis = np.tile(td.AXIS_3D_Z, tensor_tile + (1,))\n x_angle = np.expand_dims(angles[..., 0], axis=-1)\n y_angle = np.expand_dims(angles[..., 1], axis=-1)\n z_angle = np.expand_dims(angles[..., 2], axis=-1)\n x_rotation = rotation_matrix_3d.from_axis_angle(x_axis, x_angle)\n y_rotation = rotation_matrix_3d.from_axis_angle(y_axis, y_angle)\n z_rotation = rotation_matrix_3d.from_axis_angle(z_axis, z_angle)\n expected_matrix = tf.matmul(z_rotation, tf.matmul(y_rotation, x_rotation))\n\n self.assertAllClose(expected_matrix, matrix, rtol=1e-3)\n\n @parameterized.parameters(\n ((3,),),\n ((None, 3),),\n )\n def test_from_euler_with_small_angles_approximation_exception_not_raised(\n self, *shapes):\n \"\"\"Tests that the shape exceptions are not raised.\"\"\"\n self.assert_exception_is_not_raised(\n rotation_matrix_3d.from_euler_with_small_angles_approximation, shapes)\n\n @parameterized.parameters(\n (\"must have exactly 3 dimensions in axis -1\", (None,)),)\n def test_from_euler_with_small_angles_approximation_exception_raised(\n self, error_msg, *shapes):\n \"\"\"Tests that the shape exceptions are properly raised.\"\"\"\n self.assert_exception_is_raised(\n rotation_matrix_3d.from_euler_with_small_angles_approximation,\n error_msg, shapes)\n\n @flagsaver.flagsaver(tfg_add_asserts_to_graph=False)\n def test_from_euler_with_small_angles_approximation_jacobian_random(self):\n \"\"\"Test the Jacobian of from_euler_with_small_angles_approximation.\"\"\"\n x_init = test_helpers.generate_random_test_euler_angles(\n min_angle=-0.17, max_angle=0.17)\n x = tf.convert_to_tensor(value=x_init)\n\n y = rotation_matrix_3d.from_euler_with_small_angles_approximation(x)\n\n self.assert_jacobian_is_correct(x, x_init, y)\n\n def test_from_euler_with_small_angles_approximation_random(self):\n \"\"\"Tests small_angles approximation by comparing to exact calculation.\"\"\"\n # Only generate small angles. For a test tolerance of 1e-3, 0.16 was found\n # empirically to be the range where the small angle approximation works.\n random_euler_angles = test_helpers.generate_random_test_euler_angles(\n min_angle=-0.16, max_angle=0.16)\n\n exact_matrix = rotation_matrix_3d.from_euler(random_euler_angles)\n approximate_matrix = (\n rotation_matrix_3d.from_euler_with_small_angles_approximation(\n random_euler_angles))\n\n self.assertAllClose(exact_matrix, approximate_matrix, atol=1e-3)\n\n @parameterized.parameters(\n ((4,),),\n ((None, 4),),\n )\n def test_from_quaternion_exception_not_raised(self, *shapes):\n \"\"\"Tests that the shape exceptions are not raised.\"\"\"\n self.assert_exception_is_not_raised(rotation_matrix_3d.from_quaternion,\n shapes)\n\n @parameterized.parameters(\n (\"must have exactly 4 dimensions in axis -1\", (None,)),)\n def test_from_quaternion_exception_raised(self, error_msg, *shapes):\n \"\"\"Tests that the shape exceptions are properly raised.\"\"\"\n self.assert_exception_is_raised(rotation_matrix_3d.from_quaternion,\n error_msg, shapes)\n\n @flagsaver.flagsaver(tfg_add_asserts_to_graph=False)\n def test_from_quaternion_jacobian_preset(self):\n \"\"\"Test the Jacobian of the from_quaternion function.\"\"\"\n x_init = test_helpers.generate_preset_test_quaternions()\n x = tf.convert_to_tensor(value=x_init)\n\n y = rotation_matrix_3d.from_quaternion(x)\n\n self.assert_jacobian_is_correct(x, x_init, y)\n\n @flagsaver.flagsaver(tfg_add_asserts_to_graph=False)\n def test_from_quaternion_jacobian_random(self):\n \"\"\"Test the Jacobian of the from_quaternion function.\"\"\"\n x_init = test_helpers.generate_random_test_quaternions()\n x = tf.convert_to_tensor(value=x_init)\n\n y = rotation_matrix_3d.from_quaternion(x)\n\n self.assert_jacobian_is_correct(x, x_init, y)\n\n def test_from_quaternion_normalized_preset(self):\n \"\"\"Tests that quaternions can be converted to rotation matrices.\"\"\"\n euler_angles = test_helpers.generate_preset_test_euler_angles()\n\n quat = quaternion.from_euler(euler_angles)\n matrix_quat = rotation_matrix_3d.from_quaternion(quat)\n\n self.assertAllEqual(\n rotation_matrix_3d.is_valid(matrix_quat),\n np.ones(euler_angles.shape[0:-1] + (1,)))\n\n def test_from_quaternion_normalized_random(self):\n \"\"\"Tests that random quaternions can be converted to rotation matrices.\"\"\"\n random_quaternion = test_helpers.generate_random_test_quaternions()\n tensor_shape = random_quaternion.shape[:-1]\n\n random_matrix = rotation_matrix_3d.from_quaternion(random_quaternion)\n\n self.assertAllEqual(\n rotation_matrix_3d.is_valid(random_matrix),\n np.ones(tensor_shape + (1,)))\n\n def test_from_quaternion_preset(self):\n \"\"\"Tests that a quaternion maps to correct matrix.\"\"\"\n preset_quaternions = test_helpers.generate_preset_test_quaternions()\n\n preset_matrices = test_helpers.generate_preset_test_rotation_matrices_3d()\n\n self.assertAllClose(preset_matrices,\n rotation_matrix_3d.from_quaternion(preset_quaternions))\n\n def test_from_quaternion_random(self):\n \"\"\"Tests conversion to matrix.\"\"\"\n random_euler_angles = test_helpers.generate_random_test_euler_angles()\n\n random_quaternions = quaternion.from_euler(random_euler_angles)\n random_rotation_matrices = rotation_matrix_3d.from_euler(\n random_euler_angles)\n\n self.assertAllClose(random_rotation_matrices,\n rotation_matrix_3d.from_quaternion(random_quaternions))\n\n @parameterized.parameters(\n ((3, 3),),\n ((None, 3, 3),),\n ((2, 3, 3),),\n )\n def test_inverse_exception_not_raised(self, *shapes):\n \"\"\"Checks the inputs of the rotate function.\"\"\"\n self.assert_exception_is_not_raised(rotation_matrix_3d.inverse, shapes)\n\n @parameterized.parameters(\n (\"must have a rank greater than 1\", (3,)),\n (\"must have exactly 3 dimensions in axis -1\", (3, None)),\n (\"must have exactly 3 dimensions in axis -2\", (None, 3)),\n )\n def test_inverse_exception_raised(self, error_msg, *shapes):\n \"\"\"Tests that the shape exceptions are properly raised.\"\"\"\n self.assert_exception_is_raised(rotation_matrix_3d.inverse, error_msg,\n shapes)\n\n @flagsaver.flagsaver(tfg_add_asserts_to_graph=False)\n def test_inverse_jacobian_preset(self):\n \"\"\"Test the Jacobian of the inverse function.\"\"\"\n x_init = test_helpers.generate_preset_test_rotation_matrices_3d()\n x = tf.convert_to_tensor(value=x_init)\n\n y = rotation_matrix_3d.inverse(x)\n\n self.assert_jacobian_is_correct(x, x_init, y)\n\n @flagsaver.flagsaver(tfg_add_asserts_to_graph=False)\n def test_inverse_jacobian_random(self):\n \"\"\"Test the Jacobian of the inverse function.\"\"\"\n x_init = test_helpers.generate_random_test_rotation_matrix_3d()\n x = tf.convert_to_tensor(value=x_init)\n\n y = rotation_matrix_3d.inverse(x)\n\n self.assert_jacobian_is_correct(x, x_init, y)\n\n def test_inverse_normalized_random(self):\n \"\"\"Checks that inverted rotation matrices are valid rotations.\"\"\"\n random_euler_angle = test_helpers.generate_random_test_euler_angles()\n tensor_tile = random_euler_angle.shape[:-1]\n\n random_matrix = rotation_matrix_3d.from_euler(random_euler_angle)\n predicted_invert_random_matrix = rotation_matrix_3d.inverse(random_matrix)\n\n self.assertAllEqual(\n rotation_matrix_3d.is_valid(predicted_invert_random_matrix),\n np.ones(tensor_tile + (1,)))\n\n def test_inverse_random(self):\n \"\"\"Checks that inverting rotated points results in no transformation.\"\"\"\n random_euler_angle = test_helpers.generate_random_test_euler_angles()\n tensor_tile = random_euler_angle.shape[:-1]\n random_matrix = rotation_matrix_3d.from_euler(random_euler_angle)\n random_point = np.random.normal(size=tensor_tile + (3,))\n\n rotated_random_points = rotation_matrix_3d.rotate(random_point,\n random_matrix)\n predicted_invert_random_matrix = rotation_matrix_3d.inverse(random_matrix)\n predicted_invert_rotated_random_points = rotation_matrix_3d.rotate(\n rotated_random_points, predicted_invert_random_matrix)\n\n self.assertAllClose(\n random_point, predicted_invert_rotated_random_points, rtol=1e-6)\n\n @parameterized.parameters(\n ((3, 3),),\n ((None, 3, 3),),\n ((2, 3, 3),),\n )\n def test_is_valid_exception_not_raised(self, *shapes):\n \"\"\"Tests that the shape exceptions are not raised.\"\"\"\n self.assert_exception_is_not_raised(rotation_matrix_3d.is_valid, shapes)\n\n @parameterized.parameters(\n (\"must have a rank greater than 1\", (3,)),\n (\"must have exactly 3 dimensions in axis -1\", (3, None)),\n (\"must have exactly 3 dimensions in axis -2\", (None, 3)),\n )\n def test_is_valid_exception_raised(self, error_msg, *shape):\n \"\"\"Tests that the shape exceptions are raised.\"\"\"\n self.assert_exception_is_raised(rotation_matrix_3d.is_valid, error_msg,\n shape)\n\n def test_is_valid_random(self):\n \"\"\"Tests that is_valid works as intended.\"\"\"\n random_euler_angle = test_helpers.generate_random_test_euler_angles()\n tensor_tile = random_euler_angle.shape[:-1]\n\n rotation_matrix = rotation_matrix_3d.from_euler(random_euler_angle)\n pred_normalized = rotation_matrix_3d.is_valid(rotation_matrix)\n\n with self.subTest(name=\"all_normalized\"):\n self.assertAllEqual(pred_normalized,\n np.ones(shape=tensor_tile + (1,), dtype=bool))\n\n with self.subTest(name=\"non_orthonormal\"):\n test_matrix = np.array([[2., 0., 0.], [0., 0.5, 0], [0., 0., 1.]])\n pred_normalized = rotation_matrix_3d.is_valid(test_matrix)\n\n self.assertAllEqual(pred_normalized, np.zeros(shape=(1,), dtype=bool))\n\n with self.subTest(name=\"negative_orthonormal\"):\n test_matrix = np.array([[1., 0., 0.], [0., -1., 0.], [0., 0., 1.]])\n pred_normalized = rotation_matrix_3d.is_valid(test_matrix)\n\n self.assertAllEqual(pred_normalized, np.zeros(shape=(1,), dtype=bool))\n\n @parameterized.parameters(\n ((3,), (3, 3)),\n ((None, 3), (None, 3, 3)),\n ((1, 3), (1, 3, 3)),\n ((2, 3), (2, 3, 3)),\n ((3,), (1, 3, 3)),\n ((1, 3), (3, 3)),\n )\n def test_rotate_exception_not_raised(self, *shapes):\n \"\"\"Tests that the shape exceptions are not raised.\"\"\"\n self.assert_exception_is_not_raised(rotation_matrix_3d.rotate, shapes)\n\n @parameterized.parameters(\n (\"must have exactly 3 dimensions in axis -1\", (None,), (3, 3)),\n (\"must have a rank greater than 1\", (3,), (3,)),\n (\"must have exactly 3 dimensions in axis -1\", (3,), (3, None)),\n (\"must have exactly 3 dimensions in axis -2\", (3,), (None, 3)),\n )\n def test_rotate_exception_raised(self, error_msg, *shapes):\n \"\"\"Checks the inputs of the rotate function.\"\"\"\n self.assert_exception_is_raised(rotation_matrix_3d.rotate, error_msg,\n shapes)\n\n @flagsaver.flagsaver(tfg_add_asserts_to_graph=False)\n def test_rotate_jacobian_preset(self):\n \"\"\"Test the Jacobian of the rotate function.\"\"\"\n x_matrix_init = test_helpers.generate_preset_test_rotation_matrices_3d()\n x_matrix = tf.convert_to_tensor(value=x_matrix_init)\n tensor_shape = x_matrix.shape[:-1]\n x_point_init = np.random.uniform(size=tensor_shape)\n x_point = tf.convert_to_tensor(value=x_point_init)\n\n y = rotation_matrix_3d.rotate(x_point, x_matrix)\n\n self.assert_jacobian_is_correct(x_matrix, x_matrix_init, y)\n self.assert_jacobian_is_correct(x_point, x_point_init, y)\n\n @flagsaver.flagsaver(tfg_add_asserts_to_graph=False)\n def test_rotate_jacobian_random(self):\n \"\"\"Test the Jacobian of the rotate function.\"\"\"\n x_matrix_init = test_helpers.generate_random_test_rotation_matrix_3d()\n x_matrix = tf.convert_to_tensor(value=x_matrix_init)\n tensor_shape = x_matrix.shape[:-1]\n x_point_init = np.random.uniform(size=tensor_shape)\n x_point = tf.convert_to_tensor(value=x_point_init)\n\n y = rotation_matrix_3d.rotate(x_point, x_matrix)\n\n self.assert_jacobian_is_correct(x_matrix, x_matrix_init, y)\n self.assert_jacobian_is_correct(x_point, x_point_init, y)\n\n @parameterized.parameters(\n ((td.ANGLE_90 * td.AXIS_3D_X, td.AXIS_3D_X), (td.AXIS_3D_X,)),\n ((td.ANGLE_90 * td.AXIS_3D_X, td.AXIS_3D_Y), (td.AXIS_3D_Z,)),\n ((-td.ANGLE_90 * td.AXIS_3D_X, td.AXIS_3D_Z), (td.AXIS_3D_Y,)),\n ((-td.ANGLE_90 * td.AXIS_3D_Y, td.AXIS_3D_X), (td.AXIS_3D_Z,)),\n ((td.ANGLE_90 * td.AXIS_3D_Y, td.AXIS_3D_Y), (td.AXIS_3D_Y,)),\n ((td.ANGLE_90 * td.AXIS_3D_Y, td.AXIS_3D_Z), (td.AXIS_3D_X,)),\n ((td.ANGLE_90 * td.AXIS_3D_Z, td.AXIS_3D_X), (td.AXIS_3D_Y,)),\n ((-td.ANGLE_90 * td.AXIS_3D_Z, td.AXIS_3D_Y), (td.AXIS_3D_X,)),\n ((td.ANGLE_90 * td.AXIS_3D_Z, td.AXIS_3D_Z), (td.AXIS_3D_Z,)),\n )\n def test_rotate_vector_preset(self, test_inputs, test_outputs):\n \"\"\"Tests that the rotate function produces the expected results.\"\"\"\n\n def func(angles, point):\n matrix = rotation_matrix_3d.from_euler(angles)\n return rotation_matrix_3d.rotate(point, matrix)\n\n self.assert_output_is_correct(func, test_inputs, test_outputs)\n\n def test_rotate_vs_rotate_quaternion_random(self):\n \"\"\"Tests that the rotate provide the same results as quaternion.rotate.\"\"\"\n random_euler_angle = test_helpers.generate_random_test_euler_angles()\n tensor_tile = random_euler_angle.shape[:-1]\n\n random_matrix = rotation_matrix_3d.from_euler(random_euler_angle)\n random_quaternion = quaternion.from_rotation_matrix(random_matrix)\n random_point = np.random.normal(size=tensor_tile + (3,))\n ground_truth = quaternion.rotate(random_point, random_quaternion)\n prediction = rotation_matrix_3d.rotate(random_point, random_matrix)\n\n self.assertAllClose(ground_truth, prediction, rtol=1e-6)\n\n\nif __name__ == \"__main__\":\n test_case.main()\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.matmul",
"numpy.expand_dims",
"numpy.tile",
"numpy.linalg.norm",
"numpy.ones",
"numpy.random.normal",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
johannlilly/tf-quant-finance | [
"0259bf2b93bb1742ad661abc82b9a84156bc2dfc",
"0259bf2b93bb1742ad661abc82b9a84156bc2dfc"
] | [
"tf_quant_finance/models/hjm/swaption_pricing.py",
"tf_quant_finance/models/hjm/swaption_pricing_test.py"
] | [
"# Lint as: python3\n# Copyright 2020 Google LLC\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Pricing of the Interest rate Swaption using the HJM model.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nfrom tf_quant_finance.models.hjm import quasi_gaussian_hjm\nfrom tf_quant_finance.models.hjm import swaption_util\n\n\ndef price(*,\n expiries,\n fixed_leg_payment_times,\n fixed_leg_daycount_fractions,\n fixed_leg_coupon,\n reference_rate_fn,\n num_hjm_factors,\n mean_reversion,\n volatility,\n time_step,\n notional=None,\n is_payer_swaption=None,\n num_samples=1,\n random_type=None,\n seed=None,\n skip=0,\n dtype=None,\n name=None):\n \"\"\"Calculates the price of European swaptions using the HJM model.\n\n A European Swaption is a contract that gives the holder an option to enter a\n swap contract at a future date at a prespecified fixed rate. A swaption that\n grants the holder the right to pay fixed rate and receive floating rate is\n called a payer swaption while the swaption that grants the holder the right to\n receive fixed and pay floating payments is called the receiver swaption.\n Typically the start date (or the inception date) of the swap coincides with\n the expiry of the swaption. Mid-curve swaptions are currently not supported\n (b/160061740).\n\n This implementation uses the HJM model to numerically value the swaption via\n Monte-Carlo. For more information on the formulation of the HJM model, see\n quasi_gaussian_hjm.py.\n\n\n #### References:\n [1]: D. Brigo, F. Mercurio. Interest Rate Models-Theory and Practice.\n Second Edition. 2007. Section 6.7, page 237.\n\n Args:\n expiries: A real `Tensor` of any shape and dtype. The time to expiration of\n the swaptions. The shape of this input determines the number (and shape)\n of swaptions to be priced and the shape of the output.\n fixed_leg_payment_times: A real `Tensor` of the same dtype as `expiries`.\n The payment times for each payment in the fixed leg. The shape of this\n input should be `expiries.shape + [n]` where `n` denotes the number of\n fixed payments in each leg. The `fixed_leg_payment_times` should be\n greater-than or equal-to the corresponding expiries.\n fixed_leg_daycount_fractions: A real `Tensor` of the same dtype and\n compatible shape as `fixed_leg_payment_times`. The daycount fractions for\n each payment in the fixed leg.\n fixed_leg_coupon: A real `Tensor` of the same dtype and compatible shape as\n `fixed_leg_payment_times`. The fixed rate for each payment in the fixed\n leg.\n reference_rate_fn: A Python callable that accepts expiry time as a real\n `Tensor` and returns a `Tensor` of shape `input_shape +\n [num_hjm_factors]`. Returns the continuously compounded zero rate at the\n present time for the input expiry time.\n num_hjm_factors: A Python scalar which corresponds to the number of factors\n in the HJM model to be used for pricing.\n mean_reversion: A real positive `Tensor` of shape `[num_hjm_factors]`.\n Corresponds to the mean reversion rate of each factor.\n volatility: A real positive `Tensor` of the same `dtype` and shape as\n `mean_reversion` or a callable with the following properties: (a) The\n callable should accept a scalar `Tensor` `t` and a 1-D `Tensor` `r(t)`\n of shape `[num_samples]` and returns a 2-D `Tensor` of shape\n `[num_samples, num_hjm_factors]`. The variable `t` stands for time and\n `r(t)` is the short rate at time `t`. The function returns the\n instantaneous volatility `sigma(t) = sigma(t, r(r))`. When `volatility`\n is specified as a real `Tensor`, each factor is assumed to have a\n constant instantaneous volatility and the model is effectively a\n Gaussian HJM model. Corresponds to the instantaneous volatility of each\n factor.\n time_step: Scalar real `Tensor`. Maximal distance between time grid points\n in Euler scheme. Relevant when Euler scheme is used for simulation. This\n input is required.\n notional: An optional `Tensor` of same dtype and compatible shape as\n `strikes`specifying the notional amount for the underlying swaps.\n Default value: None in which case the notional is set to 1.\n is_payer_swaption: A boolean `Tensor` of a shape compatible with `expiries`.\n Indicates whether the swaption is a payer (if True) or a receiver (if\n False) swaption. If not supplied, payer swaptions are assumed.\n num_samples: Positive scalar `int32` `Tensor`. The number of simulation\n paths during Monte-Carlo valuation. This input is ignored during analytic\n valuation.\n Default value: The default value is 1.\n random_type: Enum value of `RandomType`. The type of (quasi)-random number\n generator to use to generate the simulation paths. This input is relevant\n only for Monte-Carlo valuation and ignored during analytic valuation.\n Default value: `None` which maps to the standard pseudo-random numbers.\n seed: Seed for the random number generator. The seed is only relevant if\n `random_type` is one of `[STATELESS, PSEUDO, HALTON_RANDOMIZED,\n PSEUDO_ANTITHETIC, STATELESS_ANTITHETIC]`. For `PSEUDO`,\n `PSEUDO_ANTITHETIC` and `HALTON_RANDOMIZED` the seed should be an Python\n integer. For `STATELESS` and `STATELESS_ANTITHETIC` must be supplied as\n an integer `Tensor` of shape `[2]`. This input is relevant only for\n Monte-Carlo valuation and ignored during analytic valuation.\n Default value: `None` which means no seed is set.\n skip: `int32` 0-d `Tensor`. The number of initial points of the Sobol or\n Halton sequence to skip. Used only when `random_type` is 'SOBOL',\n 'HALTON', or 'HALTON_RANDOMIZED', otherwise ignored.\n Default value: `0`.\n dtype: The default dtype to use when converting values to `Tensor`s.\n Default value: `None` which means that default dtypes inferred by\n TensorFlow are used.\n name: Python string. The name to give to the ops created by this function.\n Default value: `None` which maps to the default name `hjm_swaption_price`.\n\n Returns:\n A `Tensor` of real dtype and shape expiries.shape + [num_hjm_factors]\n containing the computed swaption prices. For swaptions that have reset in\n the past (expiries<0), the function sets the corresponding option prices to\n 0.0.\n \"\"\"\n if time_step is None:\n raise ValueError('`time_step` must be provided for simulation based '\n 'swaption valuation.')\n\n # TODO(b/160061740): Extend the functionality to support mid-curve swaptions.\n name = name or 'hjm_swaption_price'\n with tf.name_scope(name):\n expiries = tf.convert_to_tensor(expiries, dtype=dtype, name='expiries')\n dtype = dtype or expiries.dtype\n fixed_leg_payment_times = tf.convert_to_tensor(\n fixed_leg_payment_times, dtype=dtype, name='fixed_leg_payment_times')\n fixed_leg_daycount_fractions = tf.convert_to_tensor(\n fixed_leg_daycount_fractions,\n dtype=dtype,\n name='fixed_leg_daycount_fractions')\n fixed_leg_coupon = tf.convert_to_tensor(\n fixed_leg_coupon, dtype=dtype, name='fixed_leg_coupon')\n notional = tf.convert_to_tensor(notional, dtype=dtype, name='notional')\n notional = tf.expand_dims(\n tf.broadcast_to(notional, expiries.shape), axis=-1)\n if is_payer_swaption is None:\n is_payer_swaption = True\n is_payer_swaption = tf.convert_to_tensor(\n is_payer_swaption, dtype=tf.bool, name='is_payer_swaption')\n\n output_shape = expiries.shape.as_list() + [1]\n # Add a dimension corresponding to multiple cashflows in a swap\n if expiries.shape.rank == fixed_leg_payment_times.shape.rank - 1:\n expiries = tf.expand_dims(expiries, axis=-1)\n elif expiries.shape.rank < fixed_leg_payment_times.shape.rank - 1:\n raise ValueError('Swaption expiries not specified for all swaptions '\n 'in the batch. Expected rank {} but received {}.'.format(\n fixed_leg_payment_times.shape.rank - 1,\n expiries.shape.rank))\n\n # Expected shape: batch_shape + [m], where m is the number of fixed leg\n # payments per underlying swap. This is the same as\n # fixed_leg_payment_times.shape\n #\n # We need to explicitly use tf.repeat because we need to price\n # batch_shape + [m] bond options with different strikes along the last\n # dimension.\n expiries = tf.repeat(\n expiries, tf.shape(fixed_leg_payment_times)[-1], axis=-1)\n\n # Monte-Carlo pricing\n model = quasi_gaussian_hjm.QuasiGaussianHJM(\n num_hjm_factors,\n mean_reversion=mean_reversion,\n volatility=volatility,\n initial_discount_rate_fn=reference_rate_fn,\n dtype=dtype)\n\n def _sample_discount_curve_path_fn(times, curve_times, num_samples):\n p_t_tau, r_t, _ = model.sample_discount_curve_paths(\n times=times,\n curve_times=curve_times,\n num_samples=num_samples,\n random_type=random_type,\n time_step=time_step,\n seed=seed,\n skip=skip)\n p_t_tau = tf.expand_dims(p_t_tau, axis=-1)\n r_t = tf.expand_dims(r_t, axis=-1)\n return p_t_tau, r_t\n\n payoff_discount_factors, payoff_bond_price = (\n swaption_util.discount_factors_and_bond_prices_from_samples(\n expiries=expiries,\n payment_times=fixed_leg_payment_times,\n sample_discount_curve_paths_fn=_sample_discount_curve_path_fn,\n num_samples=num_samples,\n time_step=time_step,\n dtype=dtype))\n\n # Add an axis corresponding to `dim`\n fixed_leg_pv = tf.expand_dims(\n fixed_leg_coupon * fixed_leg_daycount_fractions,\n axis=-1) * payoff_bond_price\n\n # Sum fixed coupon payments within each swap.\n # Here, axis=-2 is the payments axis - i.e. summing over all payments; and\n # the last axis is the `dim` axis, as explained in comment above\n # `fixed_leg_pv` (Note that for HJM the dim of this axis is 1 always).\n fixed_leg_pv = tf.math.reduce_sum(fixed_leg_pv, axis=-2)\n float_leg_pv = 1.0 - payoff_bond_price[..., -1, :]\n payoff_swap = payoff_discount_factors[..., -1, :] * (\n float_leg_pv - fixed_leg_pv)\n payoff_swap = tf.where(is_payer_swaption, payoff_swap, -1.0 * payoff_swap)\n payoff_swaption = tf.math.maximum(payoff_swap, 0.0)\n option_value = tf.reshape(\n tf.math.reduce_mean(payoff_swaption, axis=0), output_shape)\n\n return notional * option_value\n",
"# Lint as: python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for swaptions using HJM model.\"\"\"\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nimport tf_quant_finance as tff\n\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import\n\n\nclass HJMSwaptionTest(tf.test.TestCase):\n\n def test_correctness_1d(self):\n \"\"\"Tests model with constant parameters in 1 dimension.\"\"\"\n dtype = tf.float64\n error_tol = 1e-3\n\n # 1y x 1y swaption with quarterly payments.\n expiries = np.array([1.0])\n fixed_leg_payment_times = np.array([1.25, 1.5, 1.75, 2.0])\n fixed_leg_daycount_fractions = 0.25 * np.ones_like(fixed_leg_payment_times)\n fixed_leg_coupon = 0.011 * np.ones_like(fixed_leg_payment_times)\n zero_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype)\n mean_reversion = [0.03]\n volatility = [0.02]\n\n price = tff.models.hjm.swaption_price(\n expiries=expiries,\n fixed_leg_payment_times=fixed_leg_payment_times,\n fixed_leg_daycount_fractions=fixed_leg_daycount_fractions,\n fixed_leg_coupon=fixed_leg_coupon,\n reference_rate_fn=zero_rate_fn,\n notional=100.,\n num_hjm_factors=1,\n mean_reversion=mean_reversion,\n volatility=volatility,\n num_samples=500000,\n time_step=0.1,\n random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,\n seed=[1, 2])\n\n self.assertEqual(price.dtype, dtype)\n self.assertAllEqual(price.shape, [1, 1])\n price = self.evaluate(price)\n self.assertAllClose(\n price, [[0.7163243383624043]], rtol=error_tol, atol=error_tol)\n\n def test_receiver_1d(self):\n \"\"\"Test model with constant parameters in 1 dimension.\"\"\"\n dtype = tf.float64\n error_tol = 1e-2\n\n # 1y x 1y swaption with quarterly payments.\n expiries = np.array([1.0])\n fixed_leg_payment_times = np.array([1.25, 1.5, 1.75, 2.0])\n fixed_leg_daycount_fractions = 0.25 * np.ones_like(fixed_leg_payment_times)\n fixed_leg_coupon = 0.011 * np.ones_like(fixed_leg_payment_times)\n zero_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype)\n mean_reversion = [0.03]\n volatility = [0.02]\n\n price = tff.models.hjm.swaption_price(\n expiries=expiries,\n fixed_leg_payment_times=fixed_leg_payment_times,\n fixed_leg_daycount_fractions=fixed_leg_daycount_fractions,\n fixed_leg_coupon=fixed_leg_coupon,\n reference_rate_fn=zero_rate_fn,\n notional=100.,\n num_hjm_factors=1,\n mean_reversion=mean_reversion,\n volatility=volatility,\n is_payer_swaption=False,\n num_samples=500000,\n time_step=0.1,\n random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,\n seed=[1, 2],\n dtype=dtype)\n\n self.assertEqual(price.dtype, dtype)\n self.assertAllEqual(price.shape, [1, 1])\n price = self.evaluate(price)\n self.assertAllClose(\n price, [[0.813482544626056]], rtol=error_tol, atol=error_tol)\n\n def test_time_dep_1d(self):\n \"\"\"Tests model with time-dependent parameters in 1 dimension.\"\"\"\n dtype = tf.float64\n error_tol = 1e-3\n\n # 1y x 1y swaption with quarterly payments.\n expiries = np.array([1.0])\n fixed_leg_payment_times = np.array([1.25, 1.5, 1.75, 2.0])\n fixed_leg_daycount_fractions = 0.25 * np.ones_like(fixed_leg_payment_times)\n fixed_leg_coupon = 0.011 * np.ones_like(fixed_leg_payment_times)\n zero_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype)\n mean_reversion = [0.03]\n\n zero_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype)\n vol_piecewise_constant_fn = tff.math.piecewise.PiecewiseConstantFunc(\n jump_locations=[0.5], values=[0.01, 0.02], dtype=dtype)\n\n def piecewise_1d_volatility_fn(t, r_t):\n vol = vol_piecewise_constant_fn([t])\n return tf.fill(dims=[r_t.shape[0], 1], value=vol)\n\n price = tff.models.hjm.swaption_price(\n expiries=expiries,\n fixed_leg_payment_times=fixed_leg_payment_times,\n fixed_leg_daycount_fractions=fixed_leg_daycount_fractions,\n fixed_leg_coupon=fixed_leg_coupon,\n reference_rate_fn=zero_rate_fn,\n notional=100.,\n num_hjm_factors=1,\n mean_reversion=mean_reversion,\n volatility=piecewise_1d_volatility_fn,\n num_samples=1000000,\n time_step=0.1,\n random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,\n seed=[1, 2],\n dtype=dtype)\n\n self.assertEqual(price.dtype, dtype)\n self.assertAllEqual(price.shape, [1, 1])\n price = self.evaluate(price)\n self.assertAllClose(\n price, [[0.5593057004094042]], rtol=error_tol, atol=error_tol)\n\n def test_1d_batch_1d(self):\n \"\"\"Tests 1-d batch.\"\"\"\n dtype = tf.float64\n error_tol = 1e-3\n\n # 1y x 1y swaption with quarterly payments.\n expiries = np.array([1.0, 1.0])\n fixed_leg_payment_times = np.array([[1.25, 1.5, 1.75, 2.0],\n [1.25, 1.5, 1.75, 2.0]])\n fixed_leg_daycount_fractions = 0.25 * np.ones_like(fixed_leg_payment_times)\n fixed_leg_coupon = 0.011 * np.ones_like(fixed_leg_payment_times)\n zero_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype)\n mean_reversion = [0.03]\n volatility = [0.02]\n\n zero_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype)\n price = tff.models.hjm.swaption_price(\n expiries=expiries,\n fixed_leg_payment_times=fixed_leg_payment_times,\n fixed_leg_daycount_fractions=fixed_leg_daycount_fractions,\n fixed_leg_coupon=fixed_leg_coupon,\n reference_rate_fn=zero_rate_fn,\n notional=100.,\n num_hjm_factors=1,\n mean_reversion=mean_reversion,\n volatility=volatility,\n num_samples=500000,\n time_step=0.1,\n random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,\n seed=[1, 2],\n dtype=dtype)\n\n self.assertEqual(price.dtype, dtype)\n self.assertAllEqual(price.shape, [2, 1])\n price = self.evaluate(price)\n self.assertAllClose(\n price, [[0.7163243383624043], [0.7163243383624043]],\n rtol=error_tol,\n atol=error_tol)\n\n def test_1d_batch_1d_notional(self):\n \"\"\"Tests 1-d batch with different notionals.\"\"\"\n dtype = tf.float64\n error_tol = 1e-3\n\n # 1y x 1y swaption with quarterly payments.\n expiries = np.array([1.0, 1.0])\n fixed_leg_payment_times = np.array([[1.25, 1.5, 1.75, 2.0],\n [1.25, 1.5, 1.75, 2.0]])\n fixed_leg_daycount_fractions = 0.25 * np.ones_like(fixed_leg_payment_times)\n fixed_leg_coupon = 0.011 * np.ones_like(fixed_leg_payment_times)\n zero_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype)\n mean_reversion = [0.03]\n volatility = [0.02]\n\n price = tff.models.hjm.swaption_price(\n expiries=expiries,\n fixed_leg_payment_times=fixed_leg_payment_times,\n fixed_leg_daycount_fractions=fixed_leg_daycount_fractions,\n fixed_leg_coupon=fixed_leg_coupon,\n reference_rate_fn=zero_rate_fn,\n notional=[100., 200.],\n num_hjm_factors=1,\n mean_reversion=mean_reversion,\n volatility=volatility,\n num_samples=500000,\n time_step=0.1,\n random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,\n seed=[1, 2],\n dtype=dtype)\n\n self.assertEqual(price.dtype, dtype)\n self.assertAllEqual(price.shape, [2, 1])\n price = self.evaluate(price)\n self.assertAllClose(\n price, [[0.7163243383624043], [2 * 0.7163243383624043]],\n rtol=error_tol,\n atol=error_tol)\n\n def test_2d_batch_1d(self):\n \"\"\"Tests 2-d batch.\"\"\"\n dtype = tf.float64\n error_tol = 1e-3\n\n zero_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype)\n expiries_2d = np.array([[1.0, 1.0], [1.0, 1.0]])\n fixed_leg_payment_times_2d = np.array([[[1.25, 1.5, 1.75, 2.0],\n [1.25, 1.5, 1.75, 2.0]],\n [[1.25, 1.5, 1.75, 2.0],\n [1.25, 1.5, 1.75, 2.0]]])\n fixed_leg_daycount_fractions_2d = 0.25 * np.ones_like(\n fixed_leg_payment_times_2d)\n fixed_leg_coupon_2d = 0.011 * np.ones_like(fixed_leg_payment_times_2d)\n mean_reversion = [0.03]\n volatility = [0.02]\n\n price = tff.models.hjm.swaption_price(\n expiries=expiries_2d,\n fixed_leg_payment_times=fixed_leg_payment_times_2d,\n fixed_leg_daycount_fractions=fixed_leg_daycount_fractions_2d,\n fixed_leg_coupon=fixed_leg_coupon_2d,\n reference_rate_fn=zero_rate_fn,\n notional=100.,\n num_hjm_factors=1,\n mean_reversion=mean_reversion,\n volatility=volatility,\n num_samples=500000,\n time_step=0.1,\n random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,\n seed=[1, 2],\n dtype=dtype)\n\n self.assertEqual(price.dtype, dtype)\n self.assertAllEqual(price.shape, [2, 2, 1])\n price = self.evaluate(price)\n expected = [\n 0.7163243383624043, 0.7163243383624043, 0.7163243383624043,\n 0.7163243383624043\n ]\n self.assertAllClose(\n price, tf.reshape(expected, (2, 2, 1)), rtol=error_tol, atol=error_tol)\n\n def test_correctness_2_factor(self):\n \"\"\"Tests model with constant parameters in 2 dimensions.\"\"\"\n # 1y x 1y swaption with quarterly payments.\n dtype = tf.float64\n error_tol = 1e-3\n\n expiries = np.array([1.0])\n fixed_leg_payment_times = np.array([1.25, 1.5, 1.75, 2.0])\n fixed_leg_daycount_fractions = 0.25 * np.ones_like(fixed_leg_payment_times)\n fixed_leg_coupon = 0.011 * np.ones_like(fixed_leg_payment_times)\n mean_reversion = [0.03, 0.06]\n volatility = [0.02, 0.01]\n zero_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype)\n\n price = tff.models.hjm.swaption_price(\n expiries=expiries,\n fixed_leg_payment_times=fixed_leg_payment_times,\n fixed_leg_daycount_fractions=fixed_leg_daycount_fractions,\n fixed_leg_coupon=fixed_leg_coupon,\n reference_rate_fn=zero_rate_fn,\n notional=100.,\n num_hjm_factors=2,\n mean_reversion=mean_reversion,\n volatility=volatility,\n num_samples=500000,\n time_step=0.1,\n random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,\n seed=[1, 2],\n dtype=dtype)\n\n self.assertEqual(price.dtype, dtype)\n self.assertAllEqual(price.shape, [1, 1])\n price = self.evaluate(price)\n self.assertAllClose(price, [[0.802226]], rtol=error_tol, atol=error_tol)\n\n def test_correctness_2_factor_hull_white_consistency(self):\n \"\"\"Test that under certain conditions HJM matches analytic HW results.\n\n For the two factor model, when both mean reversions are equivalent, then\n the HJM model matches that of a HW one-factor model with the same mean\n reversion, and effective volatility:\n\n eff_vol = sqrt(vol1^2 + vol2^2 + 2 rho vol1 * vol2)\n\n where rho is the cross correlation between the two factors. In this\n specific test, we assume rho = 0.0.\n \"\"\"\n dtype = tf.float64\n error_tol = 1e-3\n\n expiries = np.array([1.0])\n fixed_leg_payment_times = np.array([1.25, 1.5, 1.75, 2.0])\n fixed_leg_daycount_fractions = 0.25 * np.ones_like(fixed_leg_payment_times)\n fixed_leg_coupon = 0.011 * np.ones_like(fixed_leg_payment_times)\n zero_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype)\n\n mu = 0.03\n vol1 = 0.02\n vol2 = 0.01\n eff_vol = np.sqrt(vol1**2 + vol2**2)\n\n hjm_price = tff.models.hjm.swaption_price(\n expiries=expiries,\n fixed_leg_payment_times=fixed_leg_payment_times,\n fixed_leg_daycount_fractions=fixed_leg_daycount_fractions,\n fixed_leg_coupon=fixed_leg_coupon,\n reference_rate_fn=zero_rate_fn,\n notional=100.,\n num_hjm_factors=2,\n mean_reversion=[mu, mu],\n volatility=[vol1, vol2],\n num_samples=500000,\n time_step=0.1,\n random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,\n seed=[1, 2],\n dtype=dtype)\n hjm_price = self.evaluate(hjm_price)\n\n hw_price = tff.models.hull_white.swaption_price(\n expiries=expiries,\n floating_leg_start_times=[0], # Unused\n floating_leg_end_times=[0], # Unused\n floating_leg_daycount_fractions=[0], # Unused\n fixed_leg_payment_times=fixed_leg_payment_times,\n fixed_leg_daycount_fractions=fixed_leg_daycount_fractions,\n fixed_leg_coupon=fixed_leg_coupon,\n reference_rate_fn=zero_rate_fn,\n notional=100.,\n dim=1,\n mean_reversion=[mu],\n volatility=[eff_vol],\n use_analytic_pricing=True,\n dtype=dtype)\n hw_price = self.evaluate(hw_price)\n\n self.assertNear(hjm_price, hw_price, error_tol)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.compat.v2.math.reduce_mean",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.broadcast_to",
"tensorflow.compat.v2.where",
"tensorflow.compat.v2.math.reduce_sum",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.expand_dims",
"tensorflow.compat.v2.math.maximum"
],
[
"tensorflow.compat.v2.fill",
"tensorflow.compat.v2.ones_like",
"numpy.ones_like",
"numpy.sqrt",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.reshape",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mageofboy/pylot | [
"d1295a42f0edd79670dc64053824a3e075d433e2",
"d1295a42f0edd79670dc64053824a3e075d433e2"
] | [
"scripts/gen_crops_from_obj_tracker_logs.py",
"pylot/perception/fusion/fusion_verification_operator.py"
] | [
"\"\"\"\nTakes in a folder of observations (center-[timestep].png images,\nmot-[timestep].txt tracker logs) and creates and saves crops of the bounding\nboxes. Useful for training the feature extractor model for DeepSORT tracker.\n\nExample usage:\npython gen_crops_from_obj_tracker_logs.py --data sample_data --out crops\n\"\"\"\n\nimport cv2\nimport glob\nimport json\nimport numpy as np\nimport os\n\nfrom absl import app\nfrom absl import flags\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_bool(\"bbox_json\", False,\n \"If True, generate crops from bbox json log files \\\n instead of mot text log files.\")\nflags.DEFINE_string(\"data\", \"data\", \"Path to data folder.\")\nflags.DEFINE_string(\"out\", \"crops\", \"Path to dir for output data.\")\n\n\ndef merge_bbox_json_files(dir_path, result_file=\"combined_logs.txt\"):\n \"\"\"\n Merges individual bbox json files into one txt file of detections.\n These files contain lists of detections, with each detection being\n a 4-item list with format: [class label, detailed label, id, bbox].\n The bbox has format [[xmin, ymin], [xmax, ymax]].\n\n Args:\n dir_path: Path to directory that holds bboxes-[timestep].json files.\n result_file: Name of output file containing merged log files.\n \"\"\"\n merged_mot_text = []\n bbox_json_logs = sorted(glob.glob(dir_path + \"/bboxes*\"),\n key=lambda line: int(line.split(\"bboxes-\")[1][:-5])) # sort by timestep\n print(\"Found {} bbox json files.\".format(len(bbox_json_logs)))\n for log in bbox_json_logs:\n timestamp = log.split(\"bboxes-\")[1][:-5]\n with open(log, \"r\") as f:\n data = json.load(f)\n for detection in data:\n general_label, detailed_label, obj_id, bbox_coords = detection\n obj_id = \"+\".join([detailed_label, str(obj_id)])\n x, y = bbox_coords[0]\n w, h = bbox_coords[1][0] - x, bbox_coords[1][1] - y\n mot_text_line = \",\".join([timestamp, obj_id, str(x), str(y), str(w), str(h)])\n merged_mot_text.append(mot_text_line)\n with open(result_file, \"w\") as f:\n f.write('\\n'.join(merged_mot_text))\n\ndef merge_mot_txt_files(dir_path, result_file=\"combined_logs.txt\"):\n \"\"\"\n Merges individual mot-format txt files into one txt file of detections.\n\n Args:\n dir_path: Path to directory that holds mot-[timestep].txt files.\n result_file: Name of output file containing merged log files.\n \"\"\"\n relevant_files = sorted(\n glob.glob(dir_path + \"/*.txt\"),\n key=lambda line: int(line.split(\"mot-\")[1][:-4])) # sort by timestamp\n print(\"Found {} mot txt files.\".format(len(relevant_files)))\n with open(result_file, \"w\") as combined_file:\n combined_text = []\n for f in relevant_files:\n with open(f, \"r\") as sub_file:\n combined_text.extend(sub_file.readlines())\n combined_file.writelines(combined_text)\n\n\ndef get_crops(mot_detections_file, imgs_path, out_dir, area_tol=1500):\n \"\"\"\n Uses detections/bboxes from mot_detections_file to crop bboxes from\n corresponding images at imgs_path. Only saves crops with area > area_tol.\n Writes new log file that is filtered to only contain the saved crops.\n \"\"\"\n with open(mot_detections_file, \"r\") as f:\n mot_data = f.readlines()\n kept_crops_infos = []\n for line in mot_data:\n info = line.split(\",\")\n timestamp, obj_id, x, y, w, h = info[0], info[1], int(info[2]), int(\n info[3]), int(info[4]), int(info[5])\n img = cv2.imread(imgs_path + \"/center-{}.png\".format(timestamp))\n crop = img[y:y + h, x:x + w, :]\n if h * w >= area_tol:\n cv2.imwrite(out_dir + \"/crop-{}-{}.png\".format(timestamp, obj_id),\n crop)\n kept_crops_infos.append(line)\n print(\"Keeping {}/{} crops with area >= {}\".format(len(kept_crops_infos),\n len(mot_data),\n area_tol))\n with open(out_dir + \"/filtered_crops_logs.txt\", \"w\") as f:\n f.writelines(kept_crops_infos)\n\n\ndef convert_crops_to_pytorch_imagefolder_structure(crops_dir):\n \"\"\"\n Converts crops to training and test set (~90/10 split).\n All crops for a certain object id are grouped into 1 directory.\n (i.e., crops/train/object_id/all_crops_of_this_object_id.png)\n \"\"\"\n files = glob.glob(crops_dir + \"/*.png\")\n obj_id_to_crops = {}\n for f in files:\n obj_id = f.split(\".png\")[0].split(\"-\")[-1]\n if obj_id not in obj_id_to_crops:\n obj_id_to_crops[obj_id] = {f}\n else:\n obj_id_to_crops[obj_id].add(f)\n os.mkdir(crops_dir + \"/train\")\n os.mkdir(crops_dir + \"/test\")\n for obj_id in obj_id_to_crops:\n os.mkdir(crops_dir + \"/train/\" + obj_id)\n os.mkdir(crops_dir + \"/test/\" + obj_id)\n for f in obj_id_to_crops[obj_id]:\n img_name = f.split(\"/\")[-1]\n if np.random.randint(0, 10):\n os.rename(f, crops_dir + \"/train/\" + obj_id + \"/\" + img_name)\n else:\n os.rename(f, crops_dir + \"/test/\" + obj_id + \"/\" + img_name)\n\n\ndef main(_):\n log_file_path = FLAGS.data + \"/combined_logs.txt\"\n if FLAGS.bbox_json:\n merge_bbox_json_files(FLAGS.data, result_file=log_file_path)\n else:\n merge_mot_txt_files(FLAGS.data, result_file=log_file_path)\n get_crops(log_file_path, FLAGS.data, FLAGS.out)\n convert_crops_to_pytorch_imagefolder_structure(FLAGS.out)\n\n\nif __name__ == \"__main__\":\n app.run(main)\n",
"from collections import deque\n\nimport erdos\n\nimport numpy as np\n\n\nclass FusionVerificationOperator(erdos.Operator):\n def __init__(self, ground_obstacles_stream, fusion_stream):\n ground_obstacles_stream.add_callback(self.on_obstacles_update)\n fusion_stream.add_callback(self.on_fusion_update)\n self._logger = erdos.utils.setup_logging(self.config.name,\n self.config.log_file_name)\n self.vehicles = deque()\n\n @staticmethod\n def connect(ground_obstacles_stream, fusion_stream):\n return []\n\n def on_obstacles_update(self, msg):\n vehicle_positions = []\n for obstacle in msg.obstacles:\n if obstacle.is_vehicle():\n position = np.array([\n obstacle.transform.location.x,\n obstacle.transform.location.y\n ])\n vehicle_positions.append(position)\n\n self.vehicles.append((msg.timestamp, vehicle_positions))\n\n def on_fusion_update(self, msg):\n while self.vehicles[0][0] < msg.timestamp:\n self.vehicles.popleft()\n\n truths = self.vehicles[0][1]\n min_errors = []\n for prediction in msg.obstacle_positions:\n min_error = float(\"inf\")\n for truth in truths:\n error = np.linalg.norm(prediction - truth)\n min_error = min(error, min_error)\n min_errors.append(min_error)\n\n self._logger.info(\n \"Fusion: min vehicle position errors: {}\".format(min_errors))\n"
] | [
[
"numpy.random.randint"
],
[
"numpy.array",
"numpy.linalg.norm"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TheSeriousProgrammer/Keras_QuickNet_SSD | [
"30c3c7ac8a2c05cc60fb4635a3f954c45e46108a"
] | [
"utils/priors.py"
] | [
"from typing import List\r\nimport itertools\r\nimport collections\r\nimport tensorflow as tf\r\nimport numpy as np\r\nfrom utils.misc import *\r\n\r\nSSDBoxSizes = collections.namedtuple('SSDBoxSizes', ['min', 'max'])\r\n\r\nSSDSpec = collections.namedtuple('SSDSpec', ['feature_map_size', 'shrinkage', 'box_sizes', 'aspect_ratios'])\r\n\r\ndef generate_ssd_priors(specs: List[SSDSpec], image_size, clamp=True):\r\n \"\"\"Generate SSD Prior Boxes.\r\n It returns the center, height and width of the priors. The values are relative to the image size\r\n Args:\r\n specs: SSDSpecs about the shapes of sizes of prior boxes. i.e.\r\n specs = [\r\n SSDSpec(38, 8, SSDBoxSizes(30, 60), [2]),\r\n SSDSpec(19, 16, SSDBoxSizes(60, 111), [2, 3]),\r\n SSDSpec(10, 32, SSDBoxSizes(111, 162), [2, 3]),\r\n SSDSpec(5, 64, SSDBoxSizes(162, 213), [2, 3]),\r\n SSDSpec(3, 100, SSDBoxSizes(213, 264), [2]),\r\n SSDSpec(1, 300, SSDBoxSizes(264, 315), [2])\r\n ]\r\n image_size: image size.\r\n clamp: if true, clamp the values to make fall between [0.0, 1.0]\r\n Returns:\r\n priors (num_priors, 4): The prior boxes represented as [[center_x, center_y, w, h]]. All the values\r\n are relative to the image size.\r\n \"\"\"\r\n \r\n priors = []\r\n for spec in specs:\r\n scale = image_size / spec.shrinkage\r\n for j, i in itertools.product(range(spec.feature_map_size), repeat=2):\r\n x_center = (i + 0.5) / scale\r\n y_center = (j + 0.5) / scale\r\n\r\n # small sized square box\r\n size = spec.box_sizes.min\r\n h = w = size / image_size\r\n priors.append([\r\n x_center,\r\n y_center,\r\n w,\r\n h\r\n ])\r\n\r\n # big sized square box\r\n size = np.sqrt(spec.box_sizes.max * spec.box_sizes.min)\r\n h = w = size / image_size\r\n priors.append([\r\n x_center,\r\n y_center,\r\n w,\r\n h\r\n ])\r\n\r\n # change h/w ratio of the small sized box\r\n size = spec.box_sizes.min\r\n h = w = size / image_size\r\n for ratio in spec.aspect_ratios:\r\n ratio = np.sqrt(ratio)\r\n priors.append([\r\n x_center,\r\n y_center,\r\n w * ratio,\r\n h / ratio\r\n ])\r\n priors.append([\r\n x_center,\r\n y_center,\r\n w / ratio,\r\n h * ratio\r\n ])\r\n\r\n priors = np.array(priors, dtype=np.float32)\r\n if clamp:\r\n np.clip(priors, 0.0, 1.0, out=priors)\r\n return tf.convert_to_tensor(priors)\r\n\r\[email protected]\r\ndef assign_priors(gt_boxes, gt_labels, corner_form_priors,\r\n iou_threshold=0.45):\r\n \"\"\"Assign ground truth boxes and targets to priors.\r\n Args:\r\n gt_boxes (num_targets, 4): ground truth boxes.\r\n gt_labels (num_targets): labels of targets.\r\n priors (num_priors, 4): corner form priors\r\n Returns:\r\n boxes (num_priors, 4): real values for priors.\r\n labels (num_priors): labels for priors.\r\n \"\"\"\r\n # size: num_priors x num_targets\r\n ious = iou_of(tf.expand_dims(gt_boxes, axis=0), tf.expand_dims(corner_form_priors, axis=1))\r\n\r\n # size: num_priors\r\n best_target_per_prior = tf.math.reduce_max(ious, axis=1)\r\n best_target_per_prior_index = tf.math.argmax(ious, axis=1)\r\n # size: num_targets\r\n best_prior_per_target = tf.math.reduce_max(ious, axis=0)\r\n best_prior_per_target_index = tf.math.argmax(ious, axis=0)\r\n\r\n targets = tf.range(tf.shape(best_prior_per_target_index)[0], dtype='int64')\r\n \r\n best_target_per_prior_index = tf.tensor_scatter_nd_update(best_target_per_prior_index, tf.expand_dims(best_prior_per_target_index, 1), targets)\r\n # 2.0 is used to make sure every target has a prior assigned\r\n best_target_per_prior = tf.tensor_scatter_nd_update(best_target_per_prior, tf.expand_dims(best_prior_per_target_index, 1), tf.ones_like(best_prior_per_target_index, dtype=tf.float32)*2.0)\r\n # size: num_priors\r\n labels = tf.gather(gt_labels, best_target_per_prior_index)\r\n\r\n labels = tf.where(tf.less(best_target_per_prior, iou_threshold), tf.constant(0, dtype='int64'), labels)\r\n\r\n # labels[best_target_per_prior < iou_threshold] = 0 # the backgournd id\r\n boxes = tf.gather(gt_boxes, best_target_per_prior_index)\r\n return boxes, labels\r\n\r\nclass MatchPrior(object):\r\n def __init__(self, center_form_priors, center_variance, size_variance, iou_threshold):\r\n self.center_form_priors = center_form_priors\r\n self.corner_form_priors = center_form_to_corner_form(center_form_priors)\r\n self.center_variance = center_variance\r\n self.size_variance = size_variance\r\n self.iou_threshold = iou_threshold\r\n\r\n def __call__(self, gt_boxes, gt_labels):\r\n if type(gt_boxes) is np.ndarray:\r\n gt_boxes = tf.convert_to_tensor(gt_boxes)\r\n if type(gt_labels) is np.ndarray:\r\n gt_labels = tf.convert_to_tensor(gt_labels)\r\n boxes, labels = assign_priors(gt_boxes, gt_labels, self.corner_form_priors, self.iou_threshold)\r\n boxes = corner_form_to_center_form(boxes)\r\n locations = convert_boxes_to_locations(boxes, self.center_form_priors, self.center_variance, self.size_variance)\r\n return locations, labels"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.math.argmax",
"tensorflow.constant",
"numpy.sqrt",
"numpy.clip",
"tensorflow.math.reduce_max",
"tensorflow.less",
"tensorflow.shape",
"tensorflow.ones_like",
"tensorflow.expand_dims",
"tensorflow.gather",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
lshengjian/rlstudy | [
"263bf68c94867551002ff0b02441c67b02ebe8ef"
] | [
"rlstudy/a2c/policies.py"
] | [
"import numpy as np\nimport tensorflow as tf\n#from gym import Discrete\nfrom rlstudy.common.utils import conv, fc, conv_to_fc, batch_to_seq, seq_to_batch\nfrom rlstudy.common.distributions import make_pdtype\n#from rlstudy.common.input import observation_input\nfrom gym.spaces import Discrete, Box\n\ndef nature_cnn(unscaled_images, **conv_kwargs):\n \"\"\"\n CNN from Nature paper.\n \"\"\"\n scaled_images = tf.cast(unscaled_images, tf.float32) / 255.\n activ = tf.nn.relu\n h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2),\n **conv_kwargs))\n h2 = activ(conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs))\n h3 = activ(conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs))\n h3 = conv_to_fc(h3)\n return activ(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2)))\n\nclass CnnPolicy(object):\n def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, reuse=False, **conv_kwargs): #pylint: disable=W0613\n self.pdtype = make_pdtype(ac_space)\n X, processed_x = observation_input(ob_space, nbatch)\n # X:0~255 processed_x:0~1.0\n with tf.variable_scope(\"model\", reuse=reuse):\n h = nature_cnn(processed_x, **conv_kwargs)\n vf = fc(h, 'v', 1)[:,0]\n self.pd, self.pi = self.pdtype.pdfromlatent(h, init_scale=0.01)\n\n a0 = self.pd.sample()\n neglogp0 = self.pd.neglogp(a0)\n self.initial_state = None\n\n def step(ob, *_args, **_kwargs):\n a, v, neglogp = sess.run([a0, vf, neglogp0], {X:ob})\n return a, v, self.initial_state, neglogp\n\n def value(ob, *_args, **_kwargs):\n return sess.run(vf, {X:ob})\n\n self.X = X\n self.vf = vf\n self.step = step\n self.value = value\n\n\ndef observation_input(ob_space, batch_size=None, name='Ob'):\n '''\n Build observation input with encoding depending on the \n observation space type\n Params:\n \n ob_space: observation space (should be one of gym.spaces)\n batch_size: batch size for input (default is None, so that resulting input placeholder can take tensors with any batch size)\n name: tensorflow variable name for input placeholder\n\n returns: tuple (input_placeholder, processed_input_tensor)\n '''\n if isinstance(ob_space, Discrete):\n input_x = tf.placeholder(shape=(batch_size,), dtype=tf.int32, name=name)\n processed_x = tf.to_float(tf.one_hot(input_x, ob_space.n))\n return input_x, processed_x\n\n elif isinstance(ob_space, Box):\n input_shape = (batch_size,) + ob_space.shape\n input_x = tf.placeholder(shape=input_shape, dtype=ob_space.dtype, name=name)\n processed_x = tf.to_float(input_x)\n return input_x, processed_x\n\n else:\n raise NotImplementedError"
] | [
[
"numpy.sqrt",
"tensorflow.cast",
"tensorflow.placeholder",
"tensorflow.one_hot",
"tensorflow.to_float",
"tensorflow.variable_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
boehm-e/self-attentive-parser | [
"24a50b529d38cc182082e4e72bbf79d1b24ec1da"
] | [
"src/benepar/integrations/spacy_plugin.py"
] | [
"import numpy as np\n\nfrom .downloader import load_trained_model\nfrom ..parse_base import BaseParser, BaseInputExample\nfrom .spacy_extensions import ConstituentData, NonConstituentException\n\nimport torch\n\n\nclass PartialConstituentData:\n def __init__(self):\n self.starts = [np.array([], dtype=int)]\n self.ends = [np.array([], dtype=int)]\n self.labels = [np.array([], dtype=int)]\n\n def finalize(self, doc, label_vocab):\n self.starts = np.hstack(self.starts)\n self.ends = np.hstack(self.ends)\n self.labels = np.hstack(self.labels)\n\n # TODO(nikita): Python for loops aren't very fast\n loc_to_constituent = np.full(len(doc), -1, dtype=int)\n prev = None\n for position in range(self.starts.shape[0]):\n if self.starts[position] != prev:\n prev = self.starts[position]\n loc_to_constituent[self.starts[position]] = position\n return ConstituentData(\n self.starts, self.ends, self.labels, loc_to_constituent, label_vocab\n ).serialize()\n\n\nclass SentenceWrapper(BaseInputExample):\n TEXT_NORMALIZATION_MAPPING = {\n \"`\": \"'\",\n \"«\": '\"',\n \"»\": '\"',\n \"‘\": \"'\",\n \"’\": \"'\",\n \"“\": '\"',\n \"”\": '\"',\n \"„\": '\"',\n \"‹\": \"'\",\n \"›\": \"'\",\n \"—\": \"--\", # em dash\n }\n\n def __init__(self, spacy_sent):\n self.sent = spacy_sent\n\n @property\n def words(self):\n return [\n self.TEXT_NORMALIZATION_MAPPING.get(token.text, token.text)\n for token in self.sent\n ]\n\n @property\n def space_after(self):\n return [bool(token.whitespace_) for token in self.sent]\n\n @property\n def tree(self):\n return None\n\n def leaves(self):\n return self.words\n\n def pos(self):\n return [(word, \"UNK\") for word in self.words]\n\n\nclass BeneparComponent:\n \"\"\"\n Berkeley Neural Parser (benepar) component for spaCy.\n\n Sample usage:\n >>> nlp = spacy.load('en_core_web_md')\n >>> if spacy.__version__.startswith('2'):\n nlp.add_pipe(BeneparComponent(\"benepar_en3\"))\n else:\n nlp.add_pipe(\"benepar\", config={\"model\": \"benepar_en3\"})\n >>> doc = nlp(\"The quick brown fox jumps over the lazy dog.\")\n >>> sent = list(doc.sents)[0]\n >>> print(sent._.parse_string)\n\n This component is only responsible for constituency parsing and (for some\n trained models) part-of-speech tagging. It should be preceded in the\n pipeline by other components that can, at minimum, perform tokenization and\n sentence segmentation.\n \"\"\"\n\n name = \"benepar\"\n\n def __init__(\n self,\n name,\n subbatch_max_tokens=500,\n disable_tagger=False,\n batch_size=\"ignored\",\n ):\n \"\"\"Load a trained parser model.\n\n Args:\n name (str): Model name, or path to pytorch saved model\n subbatch_max_tokens (int): Maximum number of tokens to process in\n each batch\n disable_tagger (bool, default False): Unless disabled, the parser\n will set predicted part-of-speech tags for the document,\n overwriting any existing tags provided by spaCy models or\n previous pipeline steps. This option has no effect for parser\n models that do not have a part-of-speech tagger built in.\n batch_size: deprecated and ignored; use subbatch_max_tokens instead\n \"\"\"\n self._parser = load_trained_model(name)\n if torch.cuda.is_available():\n self._parser.cuda()\n\n self.subbatch_max_tokens = subbatch_max_tokens\n self.disable_tagger = disable_tagger\n\n self._label_vocab = self._parser.config[\"label_vocab\"]\n label_vocab_size = max(self._label_vocab.values()) + 1\n self._label_from_index = [()] * label_vocab_size\n for label, i in self._label_vocab.items():\n if label:\n self._label_from_index[i] = tuple(label.split(\"::\"))\n else:\n self._label_from_index[i] = ()\n self._label_from_index = tuple(self._label_from_index)\n\n if not self.disable_tagger:\n tag_vocab = self._parser.config[\"tag_vocab\"]\n tag_vocab_size = max(tag_vocab.values()) + 1\n self._tag_from_index = [()] * tag_vocab_size\n for tag, i in tag_vocab.items():\n self._tag_from_index[i] = tag\n self._tag_from_index = tuple(self._tag_from_index)\n else:\n self._tag_from_index = None\n\n def __call__(self, doc):\n \"\"\"Update the input document with predicted constituency parses.\"\"\"\n # TODO(https://github.com/nikitakit/self-attentive-parser/issues/16): handle\n # tokens that consist entirely of whitespace.\n constituent_data = PartialConstituentData()\n wrapped_sents = [SentenceWrapper(sent) for sent in doc.sents]\n for sent, parse in zip(\n doc.sents,\n self._parser.parse(\n wrapped_sents,\n return_compressed=True,\n subbatch_max_tokens=self.subbatch_max_tokens,\n ),\n ):\n constituent_data.starts.append(parse.starts + sent.start)\n constituent_data.ends.append(parse.ends + sent.start)\n constituent_data.labels.append(parse.labels)\n\n if parse.tags is not None and not self.disable_tagger:\n for i, tag_id in enumerate(parse.tags):\n sent[i].tag_ = self._tag_from_index[tag_id]\n\n doc._._constituent_data = constituent_data.finalize(doc, self._label_from_index)\n return doc\n\n\ndef create_benepar_component(\n nlp,\n name,\n model: str,\n subbatch_max_tokens: int,\n disable_tagger: bool,\n):\n return BeneparComponent(\n model,\n subbatch_max_tokens=subbatch_max_tokens,\n disable_tagger=disable_tagger,\n )\n\n\ndef register_benepar_component_factory():\n # Starting with spaCy 3.0, nlp.add_pipe no longer directly accepts\n # BeneparComponent instances. We must instead register a component factory.\n import spacy\n\n if spacy.__version__.startswith(\"2\"):\n return\n\n from spacy.language import Language\n\n Language.factory(\n \"benepar\",\n default_config={\n \"subbatch_max_tokens\": 500,\n \"disable_tagger\": False,\n },\n func=create_benepar_component,\n )\n\n\ntry:\n register_benepar_component_factory()\nexcept ImportError:\n pass\n"
] | [
[
"numpy.hstack",
"numpy.array",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
himopantsu/shaxybot | [
"5396933cca7fbd26ac45a7a67af43f9476451323"
] | [
"Document2.py"
] | [
"import discord\r\nimport glob\r\nfrom discord.ext import commands,tasks\r\nimport gspread\r\nimport random # おみくじで使用\r\nfrom oauth2client.service_account import ServiceAccountCredentials\r\nimport numpy as np\r\nimport pandas as pd\r\nimport datetime\r\nimport os\r\nimport urllib.request, urllib.error\r\nimport requests\r\nimport matplotlib.pyplot as plt\r\nfrom bs4 import BeautifulSoup\r\nfrom datetime import timedelta\r\n\r\n\r\nscope = ['https://spreadsheets.google.com/feeds',\r\n\t\t\t'https://www.googleapis.com/auth/drive']\r\n\r\nsheet_token = os.environ['SHEET_TOKEN']\r\nbot_token = os.environ['DISCORD_BOT_TOKEN']\r\n\r\nclient = discord.Client() # 接続に使用するオブジェクト\r\n\r\ncredentials = ServiceAccountCredentials.from_json_keyfile_name('okashi-55fd53c0b60c.json', scope)\r\ngc = gspread.authorize(credentials)\r\nSPREADSHEET_KEY = sheet_token\r\nworkbook = gc.open_by_key(SPREADSHEET_KEY)\r\nworksheet = workbook.worksheet(\"!参加\")\r\n\r\ndef dayedit(day):\r\n\tindex = day.find(\"'\")\r\n\treturn day[index:-2]\r\n\t\r\ndef idedit(id):\r\n\treturn id[3:-1]\r\n\t\r\ndef set_cell(user):\r\n\ttoday = datetime.date.today()\r\n\ttoday = today.strftime(\"%Y/%m/%d\")\r\n\tarray = np.zeros((14,2))\r\n\trow_cell = 0\r\n\tcol_cell = 0\r\n\tdf = pd.DataFrame(worksheet.get_all_values())\r\n\t\r\n\tdf1 = df.iloc[:2,5:19].T\r\n\tdf1.columns = [\"_\",\"day\"]\r\n\trow_cell = df1[df1[\"day\"] == today].index[0] + 1\r\n\t\r\n\tprint(row_cell)\r\n\t\t\t\r\n\tdf2 = df.iloc[:,19:22]\r\n\tdf2.columns=[\"count\",\"_\",\"discord\"]\r\n\tcol_cell = df2[df2[\"discord\"] == \"<@!\"+str(user)+\">\"]\r\n\tcount = 0\r\n\tif col_cell.empty:\r\n\t\tcol_cell = 0\r\n\telse:\r\n\t\tcount = col_cell.values\r\n\t\tcount = int(count[0,0]) + 1\r\n\t\tprint(count)\r\n\t\tcol_cell = col_cell.index[0] + 1\r\n\tprint(col_cell)\r\n\treturn row_cell,col_cell,count\r\n\r\ndef uranai(url):\r\n\thtml = urllib.request.urlopen(url)\r\n\tsoup = BeautifulSoup(html)\r\n\tdf2 = pd.DataFrame(soup.find_all(\"a\"))\r\n\tbbbb = str(df2[0][17])\r\n\tbbb = bbbb.split(\"「\")\r\n\tbbb = bbb[1].split(\"」\")\r\n\tbbb = bbb[0]\r\n\r\n\tdf = pd.DataFrame(soup.find_all(\"td\"))\r\n\ttest = str(df[0][1])\r\n\tn = test.split(\"=\")\r\n\r\n\tdf4 = pd.DataFrame(soup.find_all(\"p\"))\r\n\ttest2 = str(df4[0][4])\r\n\tmm = test2.split(\"=\")\r\n\tmmm = mm[0].split(\">\")[1].split(\"<\")[0].split(\"。\")\r\n\r\n\tccc = str(soup.find_all(\"meta\")[7]).split(\"=\")\r\n\tddd = ccc[1][1:-10]\r\n\t\r\n\tlist = []\r\n\tlist.append(n[3].split(\" \")[0][1:-1])\r\n\tlist.append(n[6].split(\" \")[0][1:-1])\r\n\tlist.append(n[9].split(\" \")[0][1:-1])\r\n\tlist.append(n[12].split(\" \")[0][1:-1])\r\n\tlist.append(bbb)\r\n\treturn list,ddd\r\n\r\ndef yari():\r\n\timagepicture = glob.glob(\"maplembuki/1_sensi/yari/*.png\")\r\n\tresult = random.choices(imagepicture, k=1)\r\n\treturn result\r\n\r\[email protected]\r\nasync def on_ready():\r\n\t\"\"\"起動時に通知してくれる処理\"\"\"\r\n\tprint('ログインしました')\r\n\tprint(client.user.name) # ボットの名前\r\n\tprint(client.user.id) # ボットのID\r\n\tprint(discord.__version__) # discord.pyのバージョン\r\n\tprint('------')\r\n\r\[email protected]\r\nasync def on_member_join(member):\r\n\tCHANNEL_ID = 576797690906673154\r\n\tsetumei = 641052750699954187\r\n\tjikosyoukai = 577531306632806415\r\n\tchannel = client.get_channel(CHANNEL_ID)\r\n\tawait channel.send(f\"{member.mention} さんこんにちはしゃくし~です\")\r\n\tawait channel.send(f\"まずは<#{setumei}>の確認と<#{jikosyoukai}>の記入をお願いします\")\r\n\tawait channel.send(f\"これからよろしくお願いします!\")\r\n\t\r\[email protected]\r\nasync def on_message(message):\r\n\t\"\"\"メッセージを処理\"\"\"\r\n\tif message.author.bot: # ボットのメッセージをハネる\r\n\t\treturn\r\n\t\r\n\telif message.content == \"!参加\":\r\n\t# チャンネルへメッセージを送信\r\n\t\tcell_1,cell_2,count = set_cell(message.author.id)\r\n\t\tif cell_2 == 0:\r\n\t\t\tawait message.channel.send(f\"{message.author.mention}さん シートにIDがありません\") # f文字列(フォーマット済み文字列リテラル)\r\n\t\t\t\r\n\t\telse:\r\n\t\t\tworksheet.update_cell(cell_2,cell_1,\"〇\")\r\n\t\t\tawait message.channel.send(f\"{message.author.mention}さん 参加確認しました\\n今シーズンの参加回数は累計{count}回です\") # f文字列(フォーマット済み文字列リテラル)\r\n\r\n\telif message.content == \"!星空\":\r\n\t\tif message.author.id == 573911598008107009:\r\n\t\t\tcell_1,cell_2,count = set_cell(506660639964659768)\r\n\t\t\tif cell_2 == 0:\r\n\t\t\t\tawait message.channel.send(f\"星空さん シートにIDがありません\") # f文字列(フォーマット済み文字列リテラル)\r\n\t\t\telse:\r\n\t\t\t\tworksheet.update_cell(cell_2,cell_1,\"〇\")\r\n\t\t\t\tawait message.channel.send(f\"星空さん 参加確認しました\\n今シーズンの参加回数は累計{count}回です\") # f文字列(フォーマット済み文字列リテラル)\r\n\t\telse:await message.channel.send(f\"それはニートちゃんしか使えないよ\")\r\n\t\r\n\telif message.content == \"!きゃすん\":\r\n\t\tembed = discord.Embed(title=\"個通相手募集~\", description=f\"{message.author.mention}さんが個通相手を募集しています!\",color=0xFF6EC7)\r\n\t\tembed.set_thumbnail(url=message.author.avatar_url)\r\n\t\tawait message.channel.send(embed=embed)\r\n\t\r\n\telif message.content == \"!ビビデバビデブー\":\r\n\t\tif message.author.id == 303215008802930699:\r\n\t\t\tday = datetime.date.today() + timedelta(days=(7-datetime.date.today().weekday()))\r\n\t\t\tyoubi = np.array([\"月\",\"火\",\"水\",\"木\",\"金\",\"土\",\"日\",\"月\",\"火\",\"水\",\"木\",\"金\",\"土\",\"日\"])\r\n\t\t\tawait message.channel.send(f\"@everyone 来シーズンの出欠席\\nチェックお願いします\")\r\n\t\t\tawait message.channel.send(f\"日付の下の:relaxed::o::x::question:を押して貰えれば\\nチェック完了です:ok_hand::skin-tone-1::sparkles:\")\r\n\t\t\tawait message.channel.send(f\":relaxed: ▷優先的に参加にします\\n:o:▷参加可能の日\\n:x:▷参加不可の日\\n:question:▷どちらか未定の日\")\r\n\t\t\tawait message.channel.send(f\":o:の人が20人いない場合は:question:の人も呼び出す事があるので出られない場合は無理せず\")\r\n\t\t\tawait message.channel.send(f\"#要塞戦出席表 に出れないと書いて貰えれば待機してくれる人がいるので、お願いします🤲\")\r\n\t\t\tawait message.channel.send(f\"ちなみに、このシステムはほぼ手動なので後から:x:に変更しても気付かない場合があるのでその場合も\\n #要塞戦出席表 に書いてもらえると助かります:strawberry:\")\r\n\t\t\tawait message.channel.send(f\"全部❌でも怒られないので\")\r\n\t\t\tawait message.channel.send(f\"リアクションおしてくれると助かります:macs: \")\r\n\t\t\tawait message.channel.send(f\"残りの今シーズンも頑張りましょう:daynogal:\")\r\n\t\t\tfor i in range(14):\r\n\t\t\t\tq = await message.channel.send(f\"{(day+timedelta(days=i)).month}/{(day+timedelta(days=i)).day}({youbi[i]})\")\r\n\t\t\t\t[await q.add_reaction(i) for i in ('😊','⭕','❌','❓')]\r\n\r\n\t\telse:await message.channel.send(f\"それはまあこしか使えないよ\")\r\n\t\t\r\n\telif message.content == \"!やるじゃん\":\r\n\t\tawait message.channel.send(f\"ありがとう\")\r\n\r\n\telif message.content == \"!えふぃる\":\r\n\t\tawait message.channel.send(f\"ごみしかついてなーい\")\r\n\t\t\r\n\telif message.content == \"!Esprit\":\r\n\t\tawait message.channel.send(f\"抜けたほうがいいですよ\")\r\n\t\t\r\n\telif message.content == \"!えっち\":\r\n\t\tawait message.channel.send(f'きゃー!{message.author.mention}さんのえっち!!', file=discord.File(\"4ba65a1c.jpg\"))\r\n\t\t\r\n\telif message.content == \"!くるみ\":\r\n\t\tawait message.channel.send(f'zeulon、私たちはもう終わりよ', file=discord.File(\"kurumi.png\"))\r\n\t\t\r\n\telif message.content == \"!ドッグラン\":\r\n\t\tawait message.channel.send(file=discord.File(\"dogrun.jpg\"))\r\n\t\t\r\n\telif message.content == \"!ヘリコプター\":\r\n\t\tawait message.channel.send(file=discord.File(\"herineet.png\"))\r\n\r\n\telif message.content == \"!まあこ\":\r\n\t\tawait message.channel.send(f\"寝てるよ\")\r\n\t\t\r\n\telif message.content == \"!ハンバーグ\":\r\n\t\tawait message.channel.send(f\"ハンバアアアアアアアアアアアアアアアアアアアアアアアアアアアグ!!!!!!\")\r\n\t\r\n\telif message.content == \"!やってないじゃん\":\r\n\t\tawait message.channel.send(f\"ごめんなさい\")\r\n\t\t\r\n\telif message.content == \"!ゆきやこんこ\":\r\n\t\tawait message.channel.send(f\"⛄雪や⛄\\n\\n❄❅❆❄❅❆❄❅❆❄\\n▉▉▉ ◥◣ ▉▉▉ \\n ▉ ◢◤ ▉ \\n▉▉▉ ◢▉◤ ▉▉▉ \\n❄❅❆❄❅❆❄❅❆❄\\n\\n🚽ケツから🚽\\n\\n💩💩💩💩💩💩💩💩\\n ▉\\n▉▉▉▉◥◣ ▉▉▉\\n▉ ◢◤ ◢◤ ▉\\n ◢◤ ◢▉◤ ▉▉▉\\n💩💩💩💩💩💩💩💩\")\r\n\telif message.content == \"juruli\":\r\n\t\tawait message.channel.send(f\"そのキャラはキャラデリしました\")\r\n\t\t\r\n\telif message.content == \"!ままん\":\r\n\t\tawait message.channel.send(f\"ままぁ\\nあああん\\nあああああん\\nままああああ\\nああん\\nあああああああああああああああああああああああああああああああああああああああああああ\\nあああああああああああああああああああああああああああbb\")\r\n\t\t\r\n\telif message.content == \"!にーと\":\r\n\t\tawait message.channel.send(f\"にーとくさい\")\t\r\n\t\t\r\n\telif message.content == \"!マルガリタ\":\r\n\t\tawait message.channel.send(f\"抜けませんでした\")\r\n\t\r\n\telif message.content == \"!かてぽん\":\r\n\t\tawait message.channel.send(f\"ブルブルブルブルアイ!:v:(՞ਊ՞:v:三:v:՞ਊ՞):v:アイ!:v:(՞ਊ՞:v:三:v:՞ਊ՞):v:ブ・ル・ベ・リ・アイ!!:v:(՞ਊ՞:v:三:v:՞ਊ՞):v:ブルブルブルブルアイ!:v:(՞ਊ՞:v:三:v:՞ਊ՞):v:アイ!:v:(՞ਊ՞:v:三:v:՞ਊ՞):v:ブ・ル・ベ・リ・アイ!!:v:(՞ਊ՞:v:三:v:՞ਊ՞):v:\")\r\n\t\r\n\t\r\n\telif message.content == \"!投票\":\r\n\t# リアクションアイコンを付けたい\r\n\t\tmsg = await message.channel.send(\"あなたは右利きですか?\")\r\n\t\t[await msg.add_reaction(i) for i in ('⭕')] # for文の内包表記\r\n\r\n\telif message.content == \"!おみくじ\":\r\n\t\t# Embedを使ったメッセージ送信 と ランダムで要素を選択\r\n\t\tembed = discord.Embed(title=\"おみくじ\", description=f\"{message.author.mention}さんの今日の運勢は!\",color=0x2ECC69)\r\n\t\tembed.set_thumbnail(url=message.author.avatar_url)\r\n\t\tembed.add_field(name=\"[運勢] \", value=random.choice(('大吉', '吉', '凶', '大凶')), inline=False)\r\n\t\tawait message.channel.send(embed=embed)\r\n\r\n\telif message.content == \"!ダイス\":\r\n\t\tembed = discord.Embed(title=\"ダイス\", description=f\"{message.author.mention}さんの結果\",color=0x2ECC69)\r\n\t\tembed.set_thumbnail(url=message.author.avatar_url)\r\n\t\tembed.add_field(name=\"[結果] \", value=random.randint(0,100), inline=False)\r\n\t\tawait message.channel.send(embed=embed)\r\n\t\t\r\n\telif message.content == \"!ダイレクトメッセージ\":\r\n\t\t# ダイレクトメッセージ送信\r\n\t\tdm = await message.author.create_dm()\r\n\t\tawait dm.send(f\"{message.author.mention}さんにダイレクトメッセージ\")\r\n\r\n\telif message.content == \"!おひつじ座\":\r\n\t\turl = \"https://fortune.yahoo.co.jp/12astro/aries\"\r\n\t\tkekka,ddd = uranai(url)\r\n\t\tembed = discord.Embed(title=\"星座占い\", description=f\"{message.author.mention}さんの今日の運勢は!\",color=0x00FF00)\r\n\t\tembed.set_thumbnail(url=message.author.avatar_url)\r\n\t\tembed.add_field(name=\"総合運\",value=kekka[0],inline=False)\r\n\t\tembed.add_field(name=\"恋愛運\",value=kekka[1],inline=False)\r\n\t\tembed.add_field(name=\"金運\",value=kekka[2],inline=False)\r\n\t\tembed.add_field(name=\"仕事運\",value=kekka[3],inline=False)\r\n\t\tembed.add_field(name=\"コメント\",value=kekka[4],inline=False)\r\n\t\tembed.add_field(name=\"====\",value=ddd,inline=False)\r\n\t\tawait message.channel.send(embed=embed)\r\n\r\n\telif message.content == \"!おうし座\":\r\n\t\turl = \"https://fortune.yahoo.co.jp/12astro/taurus\"\r\n\t\tkekka,ddd = uranai(url)\r\n\t\tembed = discord.Embed(title=\"星座占い\", description=f\"{message.author.mention}さんの今日の運勢は!\",color=0x00FF00)\r\n\t\tembed.set_thumbnail(url=message.author.avatar_url)\r\n\t\tembed.add_field(name=\"総合運\",value=kekka[0],inline=False)\r\n\t\tembed.add_field(name=\"恋愛運\",value=kekka[1],inline=False)\r\n\t\tembed.add_field(name=\"金運\",value=kekka[2],inline=False)\r\n\t\tembed.add_field(name=\"仕事運\",value=kekka[3],inline=False)\r\n\t\tembed.add_field(name=\"コメント\",value=kekka[4],inline=False)\r\n\t\tembed.add_field(name=\"====\",value=ddd,inline=False)\r\n\t\tawait message.channel.send(embed=embed)\r\n\r\n\telif message.content == \"!ふたご座\":\r\n\t\turl = \"https://fortune.yahoo.co.jp/12astro/gemini\"\r\n\t\tkekka,ddd = uranai(url)\r\n\t\tembed = discord.Embed(title=\"星座占い\", description=f\"{message.author.mention}さんの今日の運勢は!\",color=0x00FF00)\r\n\t\tembed.set_thumbnail(url=message.author.avatar_url)\r\n\t\tembed.add_field(name=\"総合運\",value=kekka[0],inline=False)\r\n\t\tembed.add_field(name=\"恋愛運\",value=kekka[1],inline=False)\r\n\t\tembed.add_field(name=\"金運\",value=kekka[2],inline=False)\r\n\t\tembed.add_field(name=\"仕事運\",value=kekka[3],inline=False)\r\n\t\tembed.add_field(name=\"コメント\",value=kekka[4],inline=False)\r\n\t\tembed.add_field(name=\"====\",value=ddd,inline=False)\r\n\t\tawait message.channel.send(embed=embed)\r\n\r\n\telif message.content == \"!かに座\":\r\n\t\turl = \"https://fortune.yahoo.co.jp/12astro/cancer\"\r\n\t\tkekka,ddd = uranai(url)\r\n\t\tembed = discord.Embed(title=\"星座占い\", description=f\"{message.author.mention}さんの今日の運勢は!\",color=0x00FF00)\r\n\t\tembed.set_thumbnail(url=message.author.avatar_url)\r\n\t\tembed.add_field(name=\"総合運\",value=kekka[0],inline=False)\r\n\t\tembed.add_field(name=\"恋愛運\",value=kekka[1],inline=False)\r\n\t\tembed.add_field(name=\"金運\",value=kekka[2],inline=False)\r\n\t\tembed.add_field(name=\"仕事運\",value=kekka[3],inline=False)\r\n\t\tembed.add_field(name=\"コメント\",value=kekka[4],inline=False)\r\n\t\tembed.add_field(name=\"====\",value=ddd,inline=False)\r\n\t\tawait message.channel.send(embed=embed)\r\n\r\n\telif message.content == \"!しし座\":\r\n\t\turl = \"https://fortune.yahoo.co.jp/12astro/leo\"\r\n\t\tkekka,ddd = uranai(url)\r\n\t\tembed = discord.Embed(title=\"星座占い\", description=f\"{message.author.mention}さんの今日の運勢は!\",color=0x00FF00)\r\n\t\tembed.set_thumbnail(url=message.author.avatar_url)\r\n\t\tembed.add_field(name=\"総合運\",value=kekka[0],inline=False)\r\n\t\tembed.add_field(name=\"恋愛運\",value=kekka[1],inline=False)\r\n\t\tembed.add_field(name=\"金運\",value=kekka[2],inline=False)\r\n\t\tembed.add_field(name=\"仕事運\",value=kekka[3],inline=False)\r\n\t\tembed.add_field(name=\"コメント\",value=kekka[4],inline=False)\r\n\t\tembed.add_field(name=\"====\",value=ddd,inline=False)\r\n\t\tawait message.channel.send(embed=embed)\r\n\r\n\telif message.content == \"!おとめ座\":\r\n\t\turl = \"https://fortune.yahoo.co.jp/12astro/virgo\"\r\n\t\tkekka,ddd = uranai(url)\r\n\t\tembed = discord.Embed(title=\"星座占い\", description=f\"{message.author.mention}さんの今日の運勢は!\",color=0x00FF00)\r\n\t\tembed.set_thumbnail(url=message.author.avatar_url)\r\n\t\tembed.add_field(name=\"総合運\",value=kekka[0],inline=False)\r\n\t\tembed.add_field(name=\"恋愛運\",value=kekka[1],inline=False)\r\n\t\tembed.add_field(name=\"金運\",value=kekka[2],inline=False)\r\n\t\tembed.add_field(name=\"仕事運\",value=kekka[3],inline=False)\r\n\t\tembed.add_field(name=\"コメント\",value=kekka[4],inline=False)\r\n\t\tembed.add_field(name=\"====\",value=ddd,inline=False)\r\n\t\tawait message.channel.send(embed=embed)\r\n\r\n\telif message.content == \"!てんびん座\":\r\n\t\turl = \"https://fortune.yahoo.co.jp/12astro/libra\"\r\n\t\tkekka,ddd = uranai(url)\r\n\t\tembed = discord.Embed(title=\"星座占い\", description=f\"{message.author.mention}さんの今日の運勢は!\",color=0x00FF00)\r\n\t\tembed.set_thumbnail(url=message.author.avatar_url)\r\n\t\tembed.add_field(name=\"総合運\",value=kekka[0],inline=False)\r\n\t\tembed.add_field(name=\"恋愛運\",value=kekka[1],inline=False)\r\n\t\tembed.add_field(name=\"金運\",value=kekka[2],inline=False)\r\n\t\tembed.add_field(name=\"仕事運\",value=kekka[3],inline=False)\r\n\t\tembed.add_field(name=\"コメント\",value=kekka[4],inline=False)\r\n\t\tembed.add_field(name=\"====\",value=ddd,inline=False)\r\n\t\tawait message.channel.send(embed=embed)\r\n\r\n\telif message.content == \"!さそり座\":\r\n\t\turl = \"https://fortune.yahoo.co.jp/12astro/scorpio\"\r\n\t\tkekka,ddd = uranai(url)\r\n\t\tembed = discord.Embed(title=\"星座占い\", description=f\"{message.author.mention}さんの今日の運勢は!\",color=0x00FF00)\r\n\t\tembed.set_thumbnail(url=message.author.avatar_url)\r\n\t\tembed.add_field(name=\"総合運\",value=kekka[0],inline=False)\r\n\t\tembed.add_field(name=\"恋愛運\",value=kekka[1],inline=False)\r\n\t\tembed.add_field(name=\"金運\",value=kekka[2],inline=False)\r\n\t\tembed.add_field(name=\"仕事運\",value=kekka[3],inline=False)\r\n\t\tembed.add_field(name=\"コメント\",value=kekka[4],inline=False)\r\n\t\tembed.add_field(name=\"====\",value=ddd,inline=False)\r\n\t\tawait message.channel.send(embed=embed)\r\n\r\n\telif message.content == \"!いて座\":\r\n\t\turl = \"https://fortune.yahoo.co.jp/12astro/sagittarius\"\r\n\t\tkekka,ddd = uranai(url)\r\n\t\tembed = discord.Embed(title=\"星座占い\", description=f\"{message.author.mention}さんの今日の運勢は!\",color=0x00FF00)\r\n\t\tembed.set_thumbnail(url=message.author.avatar_url)\r\n\t\tembed.add_field(name=\"総合運\",value=kekka[0],inline=False)\r\n\t\tembed.add_field(name=\"恋愛運\",value=kekka[1],inline=False)\r\n\t\tembed.add_field(name=\"金運\",value=kekka[2],inline=False)\r\n\t\tembed.add_field(name=\"仕事運\",value=kekka[3],inline=False)\r\n\t\tembed.add_field(name=\"コメント\",value=kekka[4],inline=False)\r\n\t\tembed.add_field(name=\"====\",value=ddd,inline=False)\r\n\t\tawait message.channel.send(embed=embed)\r\n\r\n\telif message.content == \"!やぎ座\":\r\n\t\turl = \"https://fortune.yahoo.co.jp/12astro/capricorn\"\r\n\t\tkekka,ddd = uranai(url)\r\n\t\tembed = discord.Embed(title=\"星座占い\", description=f\"{message.author.mention}さんの今日の運勢は!\",color=0x00FF00)\r\n\t\tembed.set_thumbnail(url=message.author.avatar_url)\r\n\t\tembed.add_field(name=\"総合運\",value=kekka[0],inline=False)\r\n\t\tembed.add_field(name=\"恋愛運\",value=kekka[1],inline=False)\r\n\t\tembed.add_field(name=\"金運\",value=kekka[2],inline=False)\r\n\t\tembed.add_field(name=\"仕事運\",value=kekka[3],inline=False)\r\n\t\tembed.add_field(name=\"コメント\",value=kekka[4],inline=False)\r\n\t\tembed.add_field(name=\"====\",value=ddd,inline=False)\r\n\t\tawait message.channel.send(embed=embed)\r\n\r\n\telif message.content == \"!みずがめ座\":\r\n\t\turl = \"https://fortune.yahoo.co.jp/12astro/aquarius\"\r\n\t\tkekka,ddd = uranai(url)\r\n\t\tembed = discord.Embed(title=\"星座占い\", description=f\"{message.author.mention}さんの今日の運勢は!\",color=0x00FF00)\r\n\t\tembed.set_thumbnail(url=message.author.avatar_url)\r\n\t\tembed.add_field(name=\"総合運\",value=kekka[0],inline=False)\r\n\t\tembed.add_field(name=\"恋愛運\",value=kekka[1],inline=False)\r\n\t\tembed.add_field(name=\"金運\",value=kekka[2],inline=False)\r\n\t\tembed.add_field(name=\"仕事運\",value=kekka[3],inline=False)\r\n\t\tembed.add_field(name=\"コメント\",value=kekka[4],inline=False)\r\n\t\tembed.add_field(name=\"====\",value=ddd,inline=False)\r\n\t\tawait message.channel.send(embed=embed)\r\n\r\n\telif message.content == \"!うお座\":\r\n\t\turl = \"https://fortune.yahoo.co.jp/12astro/pisces\"\r\n\t\tkekka,ddd = uranai(url)\r\n\t\tembed = discord.Embed(title=\"星座占い\", description=f\"{message.author.mention}さんの今日の運勢は!\",color=0x00FF00)\r\n\t\tembed.set_thumbnail(url=message.author.avatar_url)\r\n\t\tembed.add_field(name=\"総合運\",value=kekka[0],inline=False)\r\n\t\tembed.add_field(name=\"恋愛運\",value=kekka[1],inline=False)\r\n\t\tembed.add_field(name=\"金運\",value=kekka[2],inline=False)\r\n\t\tembed.add_field(name=\"仕事運\",value=kekka[3],inline=False)\r\n\t\tembed.add_field(name=\"コメント\",value=kekka[4],inline=False)\r\n\t\tembed.add_field(name=\"====\",value=ddd,inline=False)\r\n\t\tawait message.channel.send(embed=embed)\r\n\r\[email protected](minutes=15)\r\nasync def loop():\r\n\tscope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']\r\n\tsheet_token = os.environ['SHEET_TOKEN']\r\n\tbot_token = os.environ['DISCORD_BOT_TOKEN']\r\n\tclient = discord.Client() # 接続に使用するオブジェクト\r\n\tcredentials = ServiceAccountCredentials.from_json_keyfile_name('okashi-55fd53c0b60c.json', scope)\r\n\tgc = gspread.authorize(credentials)\r\n\tSPREADSHEET_KEY = sheet_token\r\n\tworkbook = gc.open_by_key(SPREADSHEET_KEY)\r\n\tworksheet = workbook.worksheet(\"!参加\")\r\n\r\nloop.start()\r\n# botの接続と起動\r\n# (botアカウントのアクセストークンを入れてください)\r\nclient.run(bot_token)\r\n"
] | [
[
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
c4dt/Garfield | [
"952c124490ba524e81ef45d5d7565ec408697bfc"
] | [
"pytorch_impl/libs/garfieldpp/worker.py"
] | [
"# coding: utf-8\n###\n # @file worker.py\n # @author Arsany Guirguis <[email protected]>\n #\n # @section LICENSE\n #\n # Copyright (c) 2020 Arsany Guirguis.\n #\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n #\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n #\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n #\n # @section DESCRIPTION\n #\n # Worker class.\n###\n\n#!/usr/bin/env python\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport garfieldpp.tools as tools\nfrom garfieldpp.datasets import DatasetManager\nimport torch.distributed.rpc as rpc\nimport torch.optim as optim\nfrom torch.distributed.rpc import RRef, rpc_async, remote\nfrom time import sleep, time\nimport sys\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass Worker:\n \"\"\" Byzantine-resilient worker \"\"\"\n def __init__(self, rank, world_size, num_workers, batch_size, model, dataset, loss, train_size=None):\n \"\"\" Constructor of worker Object\n Args\n rank unique ID of this worker node in the deployment\n world_size total number of nodes in the deployment\n num_workers total number of workers in the deployment\n batch_size size of the batch to be used for training\n model the name of the NN model to be used FIXME: not used?\n dataset the name of the dataset to be used for training\n loss the name of the loss function to be applied\n train_size number of training samples to partition between workers (if None, use all training set)\n \"\"\"\n if torch.cuda.device_count() > 0:\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(\"cpu:0\")\n logger.debug(f\"Using CPU at rank {rank}\")\n self.rank = rank\n self.batch_size = batch_size\n self.loss = tools.select_loss(loss)\n manager = DatasetManager(dataset, batch_size, num_workers, world_size, rank, train_size)\n self.train_set = manager.get_train_set() #train_set is a list of pairs: (data, target)\n self.num_train_samples = len(self.train_set)\n tools.worker_instance = self\n\n def compute_gradients(self, iter_num, model):\n \"\"\" compute gradients using the submitted model and a local batch size\n Args\n iter_num the number of current iteration; this determines the local batch to be used for training\n model the model state using which training should happen\n \"\"\"\n with torch.autograd.profiler.profile(enabled=False) as prof:\n #First, fetch the correct batch from the training set, using iter_num\n model = model.to(self.device)\n model.train()\n data, target = self.train_set[iter_num%self.num_train_samples]\n data, target = data.to(self.device), target.to(self.device)\n output = model(data)\n loss = self.loss(output, target)\n loss.backward()\n #Now, we need to extract the full gradient from the model parameters\n grad = [torch.reshape(param.grad, (-1,)) for param in model.parameters()]\n grad_cat = torch.cat(grad).to(\"cpu\")\n# print(prof.key_averages().table(sort_by=\"self_cpu_time_total\"))\n return self.rank, grad_cat, loss.item()\n"
] | [
[
"torch.cat",
"torch.reshape",
"torch.device",
"torch.cuda.device_count",
"torch.autograd.profiler.profile"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SokolovYaroslav/tranX | [
"73713c261a184c78ea7804f235c0ffb37385a3c4",
"73713c261a184c78ea7804f235c0ffb37385a3c4"
] | [
"model/transformer_utils.py",
"model/nn_utils.py"
] | [
"# coding=utf-8\nfrom __future__ import print_function\n\nimport copy\nimport math\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass EncoderDecoder(nn.Module):\n \"\"\"\n A standard Encoder-Decoder architecture. Base for this and many\n other models.\n \"\"\"\n\n def __init__(self, encoder, decoder, src_embed, tgt_embed, generator):\n super(EncoderDecoder, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n self.src_embed = src_embed\n self.tgt_embed = tgt_embed\n self.generator = generator\n\n def forward(self, src, tgt, src_mask, tgt_mask):\n \"\"\"Take in and process masked src and target sequences.\"\"\"\n return self.decode(self.encode(src, src_mask), src_mask, tgt, tgt_mask)\n\n def encode(self, src, src_mask):\n return self.encoder(self.src_embed(src), src_mask)\n\n def decode(self, memory, src_mask, tgt, tgt_mask):\n return self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask)\n\n\nclass Generator(nn.Module):\n \"\"\"Define standard linear + softmax generation step.\"\"\"\n\n def __init__(self, d_model, vocab):\n super(Generator, self).__init__()\n self.projection = nn.Linear(d_model, vocab)\n\n def forward(self, x):\n return F.log_softmax(self.projection(x), dim=-1)\n\n\ndef clones(module, num_layers):\n \"\"\"Produce N identical layers.\"\"\"\n return nn.ModuleList([copy.deepcopy(module) for _ in range(num_layers)])\n\n\nclass Encoder(nn.Module):\n \"\"\"Core encoder is a stack of N layers\"\"\"\n\n def __init__(self, layer, num_layers):\n super(Encoder, self).__init__()\n self.layers = clones(layer, num_layers)\n self.norm = nn.LayerNorm(layer.size)\n\n def forward(self, x, mask):\n \"\"\"Pass the input (and mask) through each layer in turn.\"\"\"\n for layer in self.layers:\n x = layer(x, mask)\n return self.norm(x)\n\n\nclass SublayerConnection(nn.Module):\n \"\"\"\n A residual connection followed by a layer norm.\n Note for code simplicity the norm is first as opposed to last.\n \"\"\"\n\n def __init__(self, size, dropout):\n super(SublayerConnection, self).__init__()\n self.norm = nn.LayerNorm(size)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x, sublayer):\n \"\"\"Apply residual connection to any sublayer with the same size.\"\"\"\n return x + self.dropout(sublayer(self.norm(x)))\n\n\nclass EncoderLayer(nn.Module):\n \"\"\"Encoder is made up of self-attn and feed forward (defined below)\"\"\"\n\n def __init__(self, size, self_attn, feed_forward, dropout):\n super(EncoderLayer, self).__init__()\n self.self_attn = self_attn\n self.feed_forward = feed_forward\n self.sublayer = clones(SublayerConnection(size, dropout), 2)\n self.size = size\n\n def forward(self, x, mask):\n \"\"\"Follow Figure 1 (left) for connections.\"\"\"\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))\n return self.sublayer[1](x, self.feed_forward)\n\n\nclass Decoder(nn.Module):\n \"\"\"Generic N layer decoder with masking.\"\"\"\n\n def __init__(self, layer, N):\n super(Decoder, self).__init__()\n self.layers = clones(layer, N)\n self.norm = nn.LayerNorm(layer.size)\n\n def forward(self, x, memory, parent_indxs, src_mask, tgt_mask):\n for layer in self.layers:\n x = layer(x, memory, parent_indxs, src_mask, tgt_mask)\n return self.norm(x)\n\n\nclass DecoderLayer(nn.Module):\n \"\"\"Decoder is made of self-attn, src-attn, and feed forward (defined below)\"\"\"\n\n def __init__(self, size, self_attn, src_attn, feed_forward, dropout):\n super(DecoderLayer, self).__init__()\n self.size = size\n self.self_attn = self_attn\n self.src_attn = src_attn\n self.feed_forward = feed_forward\n self.sublayer = clones(SublayerConnection(size, dropout), 3)\n\n def forward(self, x, memory, parent_indxs, src_mask, tgt_mask):\n \"\"\"Follow Figure 1 (right) for connections.\"\"\"\n m = memory\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, parent_indxs, tgt_mask))\n x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))\n return self.sublayer[2](x, self.feed_forward)\n\n\ndef subsequent_mask(size):\n \"\"\"Mask out subsequent positions.\"\"\"\n attn_shape = (1, size, size)\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype(\"uint8\")\n return torch.from_numpy(subsequent_mask) == 0\n\n\ndef attention(query, key, value, mask=None, dropout=None):\n \"\"\"Compute 'Scaled Dot Product Attention'\"\"\"\n d_k = query.size(-1)\n scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)\n if mask is not None:\n scores = scores.masked_fill(mask == 0, -1e9)\n p_attn = F.softmax(scores, dim=-1)\n if dropout is not None:\n p_attn = dropout(p_attn)\n ret = torch.matmul(p_attn, value), p_attn\n return ret\n\n\nclass MultiHeadedAttention(nn.Module):\n def __init__(self, h, d_model, dropout=0.1):\n \"\"\"Take in model size and number of heads.\"\"\"\n super(MultiHeadedAttention, self).__init__()\n assert d_model % h == 0\n # We assume d_v always equals d_k\n self.d_k = d_model // h\n self.h = h\n self.linears = clones(nn.Linear(d_model, d_model), 4)\n self.attn = None\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, query, key, value, mask=None):\n \"\"\"Implements Figure 2\"\"\"\n if mask is not None:\n # Same mask applied to all h heads.\n mask = mask.unsqueeze(1)\n n_batches = query.size(0)\n\n # 1) Do all the linear projections in batch from d_model => h x d_k\n query, key, value = [\n l(x).view(n_batches, -1, self.h, self.d_k).transpose(1, 2)\n for l, x in zip(self.linears, (query, key, value))\n ]\n\n # 2) Apply attention on all the projected vectors in batch.\n x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)\n\n # 3) \"Concat\" using a view and apply a final linear.\n x = x.transpose(1, 2).contiguous().view(n_batches, -1, self.h * self.d_k)\n return self.linears[-1](x)\n\n\nclass StrictMultiHeadedAttention(nn.Module):\n def __init__(self, h, h_strict, d_model, dropout=0.1):\n \"\"\"Take in model size and number of heads.\"\"\"\n super(StrictMultiHeadedAttention, self).__init__()\n assert d_model % h == 0\n # We assume d_v always equals d_k\n self.d_k = d_model // h\n self.h = h\n self.h_strict = h_strict\n\n self.q_linear = nn.Linear(d_model, d_model)\n self.k_linear = nn.Linear(d_model, d_model)\n self.v_linear = nn.Linear(d_model, (self.h + self.h_strict) * self.d_k)\n self.out_linear = nn.Linear((self.h + self.h_strict) * self.d_k, d_model)\n\n self.attn = None\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, query, key, value, strict_value_indices, mask=None):\n \"\"\"Implements Figure 2\"\"\"\n if mask is not None:\n # Same mask applied to all h heads.\n mask = mask.unsqueeze(1)\n n_batches = query.size(0)\n\n # 1) Do all the linear projections in batch from d_model => h x d_k\n query, key = [\n l(x).view(n_batches, -1, self.h, self.d_k).transpose(1, 2)\n for l, x in zip((self.q_linear, self.k_linear), (query, key))\n ]\n value = self.v_linear(value).view(n_batches, -1, self.h + self.h_strict, self.d_k).transpose(1, 2)\n value, value_strict = value[:, : self.h], value[:, self.h :]\n\n # 2) Apply attention on all the projected vectors in batch.\n x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)\n\n # 3) \"Concat\" using a view and apply a final linear.\n x = x.transpose(1, 2).contiguous().view(n_batches, -1, self.h * self.d_k)\n to_cat = [\n value_strict[np.repeat(np.arange(value_strict.shape[0])[:, None], value_strict.shape[1], axis=1), i, ind]\n for i, ind in enumerate(strict_value_indices)\n ]\n x = torch.cat([x] + to_cat, dim=-1)\n return self.out_linear(x)\n\n\nclass PositionwiseFeedForward(nn.Module):\n \"\"\"Implements FFN equation.\"\"\"\n\n def __init__(self, d_model, d_ff, dropout=0.1):\n super(PositionwiseFeedForward, self).__init__()\n self.w_1 = nn.Linear(d_model, d_ff)\n self.w_2 = nn.Linear(d_ff, d_model)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n return self.w_2(self.dropout(F.relu(self.w_1(x))))\n\n\nclass Embeddings(nn.Module):\n def __init__(self, d_model, vocab):\n super(Embeddings, self).__init__()\n self.lut = nn.Embedding(vocab, d_model)\n self.d_model = d_model\n\n def forward(self, x):\n return self.lut(x) * math.sqrt(self.d_model)\n\n\nclass PositionalEncoding(nn.Module):\n \"\"\"Implement the PE function.\"\"\"\n\n def __init__(self, d_model, dropout, max_len=5000):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n # Compute the positional encodings once in log space.\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, out=torch.DoubleTensor()).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2, out=torch.DoubleTensor()) * -(math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0)\n self.register_buffer(\"pe\", pe)\n\n def forward(self, x):\n x = x + self.pe[:, : x.size(1)].clone().detach()\n return self.dropout(x)\n",
"# coding=utf-8\n\nimport torch\nimport torch.nn.functional as F\nimport torch.nn.init as init\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport numpy as np\n\nfrom six.moves import xrange\n\n\ndef dot_prod_attention(h_t, src_encoding, src_encoding_att_linear, mask=None):\n \"\"\"\n :param h_t: (batch_size, hidden_size)\n :param src_encoding: (batch_size, src_sent_len, hidden_size * 2)\n :param src_encoding_att_linear: (batch_size, src_sent_len, hidden_size)\n :param mask: (batch_size, src_sent_len)\n \"\"\"\n # (batch_size, src_sent_len)\n att_weight = torch.bmm(src_encoding_att_linear, h_t.unsqueeze(2)).squeeze(2)\n if mask is not None:\n att_weight.data.masked_fill_(mask, -float('inf'))\n att_weight = F.softmax(att_weight, dim=-1)\n\n att_view = (att_weight.size(0), 1, att_weight.size(1))\n # (batch_size, hidden_size)\n ctx_vec = torch.bmm(att_weight.view(*att_view), src_encoding).squeeze(1)\n\n return ctx_vec, att_weight\n\n\ndef length_array_to_mask_tensor(length_array, cuda=False, valid_entry_has_mask_one=False):\n max_len = max(length_array)\n batch_size = len(length_array)\n\n mask = np.zeros((batch_size, max_len), dtype=np.uint8)\n for i, seq_len in enumerate(length_array):\n if valid_entry_has_mask_one:\n mask[i][:seq_len] = 1\n else:\n mask[i][seq_len:] = 1\n\n mask = torch.ByteTensor(mask)\n return mask.cuda() if cuda else mask\n\n\ndef input_transpose(sents, pad_token):\n \"\"\"\n transform the input List[sequence] of size (batch_size, max_sent_len)\n into a list of size (max_sent_len, batch_size), with proper padding\n \"\"\"\n max_len = max(len(s) for s in sents)\n batch_size = len(sents)\n\n sents_t = []\n for i in xrange(max_len):\n sents_t.append([sents[k][i] if len(sents[k]) > i else pad_token for k in xrange(batch_size)])\n\n return sents_t\n\n\ndef word2id(sents, vocab):\n if type(sents[0]) == list:\n return [[vocab[w] for w in s] for s in sents]\n else:\n return [vocab[w] for w in sents]\n\n\ndef id2word(sents, vocab):\n if type(sents[0]) == list:\n return [[vocab.id2word[w] for w in s] for s in sents]\n else:\n return [vocab.id2word[w] for w in sents]\n\n\ndef to_input_variable(sequences, vocab, cuda=False, training=True, append_boundary_sym=False):\n \"\"\"\n given a list of sequences,\n return a tensor of shape (max_sent_len, batch_size)\n \"\"\"\n if append_boundary_sym:\n sequences = [['<s>'] + seq + ['</s>'] for seq in sequences]\n\n word_ids = word2id(sequences, vocab)\n sents_t = input_transpose(word_ids, vocab['<pad>'])\n\n sents_var = torch.tensor(sents_t, requires_grad=False, dtype=torch.long)\n if cuda:\n sents_var = sents_var.cuda()\n\n return sents_var\n\n\ndef variable_constr(x, v, cuda=False):\n return Variable(torch.cuda.x(v)) if cuda else Variable(torch.x(v))\n\n\ndef batch_iter(examples, batch_size, shuffle=False):\n index_arr = np.arange(len(examples))\n if shuffle:\n np.random.shuffle(index_arr)\n\n batch_num = int(np.ceil(len(examples) / float(batch_size)))\n for batch_id in xrange(batch_num):\n batch_ids = index_arr[batch_size * batch_id: batch_size * (batch_id + 1)]\n batch_examples = [examples[i] for i in batch_ids]\n\n yield batch_examples\n\n\ndef isnan(data):\n data = data.cpu().numpy()\n return np.isnan(data).any() or np.isinf(data).any()\n\n\ndef log_sum_exp(inputs, dim=None, keepdim=False):\n \"\"\"Numerically stable logsumexp.\n source: https://github.com/pytorch/pytorch/issues/2591\n\n Args:\n inputs: A Variable with any shape.\n dim: An integer.\n keepdim: A boolean.\n\n Returns:\n Equivalent of log(sum(exp(inputs), dim=dim, keepdim=keepdim)).\n \"\"\"\n # For a 1-D array x (any array along a single dimension),\n # log sum exp(x) = s + log sum exp(x - s)\n # with s = max(x) being a common choice.\n\n if dim is None:\n inputs = inputs.view(-1)\n dim = 0\n s, _ = torch.max(inputs, dim=dim, keepdim=True)\n outputs = s + (inputs - s).exp().sum(dim=dim, keepdim=True).log()\n if not keepdim:\n outputs = outputs.squeeze(dim)\n return outputs\n\n\ndef uniform_init(lower, upper, params):\n for p in params:\n p.data.uniform_(lower, upper)\n\n\ndef glorot_init(params):\n for p in params:\n if len(p.data.size()) > 1:\n init.xavier_normal_(p.data)\n\n\ndef identity(x):\n return x\n\n\nclass LabelSmoothing(nn.Module):\n \"\"\"Implement label smoothing.\n\n Reference: the annotated transformer\n \"\"\"\n\n def __init__(self, smoothing, tgt_vocab_size, ignore_indices=None):\n if ignore_indices is None: ignore_indices = []\n\n super(LabelSmoothing, self).__init__()\n\n self.criterion = nn.KLDivLoss(size_average=False, reduce=False)\n smoothing_value = smoothing / float(tgt_vocab_size - 1 - len(ignore_indices))\n one_hot = torch.zeros((tgt_vocab_size,)).fill_(smoothing_value)\n for idx in ignore_indices:\n one_hot[idx] = 0.\n\n self.confidence = 1.0 - smoothing\n self.register_buffer('one_hot', one_hot.unsqueeze(0))\n\n def forward(self, model_prob, target):\n # (batch_size, *, tgt_vocab_size)\n dim = list(model_prob.size())[:-1] + [1]\n true_dist = Variable(self.one_hot, requires_grad=False).repeat(*dim)\n true_dist.scatter_(-1, target.unsqueeze(-1), self.confidence)\n # true_dist = model_prob.data.clone()\n # true_dist.fill_(self.smoothing / (model_prob.size(1) - 1)) # FIXME: no label smoothing for <pad> <s> and </s>\n # true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)\n\n return self.criterion(model_prob, true_dist).sum(dim=-1)\n\n\nclass FeedForward(nn.Module):\n \"\"\"Feed forward neural network adapted from AllenNLP\"\"\"\n\n def __init__(self, input_dim, num_layers, hidden_dims, activations, dropout):\n super(FeedForward, self).__init__()\n\n if not isinstance(hidden_dims, list):\n hidden_dims = [hidden_dims] * num_layers # type: ignore\n if not isinstance(activations, list):\n activations = [activations] * num_layers # type: ignore\n if not isinstance(dropout, list):\n dropout = [dropout] * num_layers # type: ignore\n\n self.activations = activations\n input_dims = [input_dim] + hidden_dims[:-1]\n linear_layers = []\n for layer_input_dim, layer_output_dim in zip(input_dims, hidden_dims):\n linear_layers.append(nn.Linear(layer_input_dim, layer_output_dim))\n\n self.linear_layers = nn.ModuleList(linear_layers)\n dropout_layers = [nn.Dropout(p=value) for value in dropout]\n self.dropout = nn.ModuleList(dropout_layers)\n self.output_dim = hidden_dims[-1]\n self.input_dim = input_dim\n\n def forward(self, x):\n output = x\n for layer, activation, dropout in zip(self.linear_layers, self.activations, self.dropout):\n output = dropout(activation(layer(output)))\n return output\n"
] | [
[
"torch.nn.Dropout",
"torch.nn.functional.softmax",
"torch.cat",
"torch.zeros",
"torch.sin",
"numpy.arange",
"torch.from_numpy",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"numpy.ones",
"torch.nn.Linear",
"torch.matmul",
"torch.DoubleTensor",
"torch.cos"
],
[
"torch.ByteTensor",
"torch.nn.functional.softmax",
"torch.nn.KLDivLoss",
"torch.nn.Dropout",
"torch.max",
"torch.zeros",
"numpy.isnan",
"torch.nn.ModuleList",
"torch.cuda.x",
"torch.nn.init.xavier_normal_",
"numpy.random.shuffle",
"torch.tensor",
"torch.x",
"torch.nn.Linear",
"numpy.zeros",
"numpy.isinf",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Xiatian-Zhu/FEAT | [
"48331e00dec8b1aa20f6cd7c397cef16f06ea2f6",
"47bdc7c1672e00b027c67469d0291e7502918950"
] | [
"model/models/featv2.py",
"model/models/bilstm.py"
] | [
"import torch\nimport torch.nn as nn\nimport numpy as np\nimport torch.nn.functional as F\n\nfrom model.models import FewShotModel\n\nclass ScaledDotProductAttention(nn.Module):\n ''' Scaled Dot-Product Attention '''\n\n def __init__(self, temperature, attn_dropout=0.1):\n super().__init__()\n self.temperature = temperature\n self.dropout = nn.Dropout(attn_dropout)\n self.softmax = nn.Softmax(dim=2)\n\n def forward(self, q, k, v):\n\n attn = torch.bmm(q, k.transpose(1, 2))\n # print(f'o==> A attn: {attn.shape}')\n attn = attn / self.temperature\n # print(f'o==> B attn: {attn.shape}')\n log_attn = F.log_softmax(attn, 2)\n # print(f'o==> C attn: {log_attn.shape}')\n attn = self.softmax(attn)\n # print(f'o==> D attn: {attn.shape}')\n attn = self.dropout(attn)\n output = torch.bmm(attn, v)\n # print(f'o==> E V: {v.shape}')\n # print(f'o==> F output: {output.shape}')\n return output, attn, log_attn\n\nclass MultiHeadAttention(nn.Module):\n ''' Multi-Head Attention module '''\n\n def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):\n super().__init__()\n self.n_head = n_head\n self.d_k = d_k\n self.d_v = d_v\n\n self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)\n self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)\n self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)\n nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))\n nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))\n nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))\n\n self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))\n self.layer_norm = nn.LayerNorm(d_model)\n\n self.fc = nn.Linear(n_head * d_v, d_model)\n nn.init.xavier_normal_(self.fc.weight)\n self.dropout = nn.Dropout(dropout)\n \n def forward(self, q, k, v):\n d_k, d_v, n_head = self.d_k, self.d_v, self.n_head\n sz_b, len_q, _ = q.size()\n sz_b, len_k, _ = k.size()\n sz_b, len_v, _ = v.size()\n\n residual = q\n q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)\n k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)\n v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)\n \n q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk\n k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk\n v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv\n\n output, attn, log_attn = self.attention(q, k, v)\n\n output = output.view(n_head, sz_b, len_q, d_v)\n output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)\n\n output = self.dropout(self.fc(output))\n output = self.layer_norm(output + residual)\n\n return output\n \nclass FEATV2(FewShotModel):\n def __init__(self, args):\n super().__init__(args)\n if args.backbone_class == 'ConvNet':\n hdim = 64\n elif args.backbone_class == 'Res12':\n hdim = 640\n elif args.backbone_class == 'Res18':\n hdim = 512\n elif args.backbone_class == 'WRN':\n hdim = 640\n else:\n raise ValueError('')\n \n self.slf_attn = MultiHeadAttention(1, hdim, hdim, hdim, dropout=0.5) \n self.slf_attn_reg = MultiHeadAttention(1, hdim, hdim, hdim, dropout=0.5) \n \n def _forward(self, instance_embs, support_idx, query_idx):\n emb_dim = instance_embs.size(-1)\n\n # organize support/query data\n support = instance_embs[support_idx.contiguous().view(-1)].contiguous().view(*(support_idx.shape + (-1,)))\n query = instance_embs[query_idx.contiguous().view(-1)].contiguous().view( *(query_idx.shape + (-1,)))\n \n # get mean of the support\n proto = support.mean(dim=1) # Ntask x NK x d\n num_batch = proto.shape[0]\n num_proto = proto.shape[1]\n num_query = np.prod(query_idx.shape[-2:])\n \n # query: (num_batch, num_query, num_proto, num_emb)\n # proto: (num_batch, num_proto, num_emb)\n proto = self.slf_attn(proto, proto, proto) \n if self.args.use_euclidean:\n query = query.view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)\n proto = proto.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()\n proto = proto.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)\n\n logits = - torch.sum((proto - query) ** 2, 2) / self.args.temperature\n else:\n proto = F.normalize(proto, dim=-1) # normalize for cosine distance\n query = query.view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)\n\n logits = torch.bmm(query, proto.permute([0,2,1])) / self.args.temperature\n logits = logits.view(-1, num_proto)\n \n # for regularization\n if self.training:\n aux_task = torch.cat([support.view(1, self.args.shot, self.args.way, emb_dim), \n query.view(1, self.args.query, self.args.way, emb_dim)], 1) # T x (K+Kq) x N x d\n # print(f'aux A: {aux_task.shape}')\n num_query = np.prod(aux_task.shape[1:3])\n aux_task = aux_task.permute([0, 2, 1, 3])\n # print(f'aux B: {aux_task.shape}')\n aux_task = aux_task.contiguous().view(-1, self.args.shot + self.args.query, emb_dim)\n # print(f'aux C: {aux_task.shape}')\n # apply the transformation over the Aug Task\n # aux_emb = self.slf_attn(aux_task, aux_task, aux_task) # T x N x (K+Kq) x d\n aux_emb = self.slf_attn_reg(aux_task, aux_task, aux_task) # T x N x (K+Kq) x d\n # print(f'aux D: {aux_emb.shape}')\n # compute class mean\n aux_emb = aux_emb.view(num_batch, self.args.way, self.args.shot + self.args.query, emb_dim)\n # print(f'aux E: {aux_emb.shape}')\n aux_center = torch.mean(aux_emb, 2) # T x N x d\n # print(f'aux F: {aux_center.shape}')\n \n # import pdb\n # pdb.set_trace()\n\n if self.args.use_euclidean:\n aux_task = aux_task.permute([1,0,2]).contiguous().view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)\n aux_center = aux_center.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()\n aux_center = aux_center.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)\n \n logits_reg = - torch.sum((aux_center - aux_task) ** 2, 2) / self.args.temperature2\n else:\n aux_center = F.normalize(aux_center, dim=-1) # normalize for cosine distance\n aux_task = aux_task.permute([1,0,2]).contiguous().view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)\n \n logits_reg = torch.bmm(aux_task, aux_center.permute([0,2,1])) / self.args.temperature2\n logits_reg = logits_reg.view(-1, num_proto) \n \n return logits, logits_reg \n else:\n return logits, None \n",
"import torch\nimport torch.nn as nn\nimport numpy as np\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom model.models import FewShotModel\n\nclass BidirectionalLSTM(nn.Module):\n def __init__(self, layer_sizes, vector_dim):\n super(BidirectionalLSTM, self).__init__()\n \"\"\"\n Initializes a multi layer bidirectional LSTM\n :param layer_sizes: A list containing the neuron numbers per layer \n e.g. [100, 100, 100] returns a 3 layer, 100\n :param batch_size: The experiments batch size\n \"\"\"\n self.hidden_size = layer_sizes[0]\n self.vector_dim = vector_dim\n self.num_layers = len(layer_sizes)\n\n self.lstm = nn.LSTM(input_size=self.vector_dim,\n num_layers=self.num_layers,\n hidden_size=self.hidden_size,\n bidirectional=True)\n\n def forward(self, inputs, batch_size):\n \"\"\"\n Runs the bidirectional LSTM, produces outputs and saves both forward and backward states as well as gradients.\n :param x: The inputs should be a list of shape [sequence_length, batch_size, 64]\n :return: Returns the LSTM outputs, as well as the forward and backward hidden states.\n \"\"\"\n c0 = Variable(torch.rand(self.lstm.num_layers*2, batch_size, self.lstm.hidden_size),\n requires_grad=False)\n h0 = Variable(torch.rand(self.lstm.num_layers*2, batch_size, self.lstm.hidden_size),\n requires_grad=False)\n if torch.cuda.is_available():\n c0 = c0.cuda()\n h0 = h0.cuda()\n output, (hn, cn) = self.lstm(inputs, (h0, c0))\n # residual addition\n output = output + inputs\n return output # , hn, cn\n\n \nclass BILSTM(FewShotModel):\n def __init__(self, args):\n super().__init__(args)\n if args.backbone_class == 'ConvNet':\n hdim = 64\n elif args.backbone_class == 'Res12':\n hdim = 640\n elif args.backbone_class == 'Res18':\n hdim = 512\n elif args.backbone_class == 'WRN':\n hdim = 640 \n else:\n raise ValueError('')\n \n self.bilstm = BidirectionalLSTM(layer_sizes=[hdim // 2], \n vector_dim = hdim) \n \n def _forward(self, instance_embs, support_idx, query_idx):\n emb_dim = instance_embs.size(-1)\n\n # organize support/query data\n support = instance_embs[support_idx.contiguous().view(-1)].contiguous().view(*(support_idx.shape + (-1,)))\n query = instance_embs[query_idx.contiguous().view(-1)].contiguous().view( *(query_idx.shape + (-1,)))\n \n # get mean of the support\n proto = support.mean(dim=1) # Ntask x NK x d\n num_batch = proto.shape[0]\n num_proto = proto.shape[1]\n num_query = np.prod(query_idx.shape[-2:])\n \n # query: (num_batch, num_query, num_proto, num_emb)\n # proto: (num_batch, num_proto, num_emb)\n proto = self.bilstm(proto.permute([1, 0, 2]), num_batch) \n proto = proto.permute([1, 0, 2])\n if self.args.use_euclidean:\n query = query.view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)\n proto = proto.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()\n proto = proto.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)\n\n logits = - torch.sum((proto - query) ** 2, 2) / self.args.temperature\n else:\n proto = F.normalize(proto, dim=-1) # normalize for cosine distance\n query = query.view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)\n\n logits = torch.bmm(query, proto.permute([0,2,1])) / self.args.temperature\n logits = logits.view(-1, num_proto)\n \n # for regularization\n if self.training:\n aux_task = torch.cat([support.view(1, self.args.shot, self.args.way, emb_dim), \n query.view(1, self.args.query, self.args.way, emb_dim)], 1) # T x (K+Kq) x N x d\n num_query = np.prod(aux_task.shape[1:3])\n aux_task = aux_task.permute([0, 2, 1, 3])\n aux_task = aux_task.contiguous().view(-1, self.args.shot + self.args.query, emb_dim)\n # apply the transformation over the Aug Task\n aux_emb = self.bilstm(aux_task.permute([1, 0, 2]), num_batch * self.args.way) # T x N x (K+Kq) x d\n aux_emb = aux_emb.permute([1, 0, 2])\n # compute class mean\n aux_emb = aux_emb.view(num_batch, self.args.way, self.args.shot + self.args.query, emb_dim)\n aux_center = torch.mean(aux_emb, 2) # T x N x d\n \n if self.args.use_euclidean:\n aux_task = aux_task.contiguous().view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)\n aux_center = aux_center.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()\n aux_center = aux_center.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)\n \n logits_reg = - torch.sum((aux_center - aux_task) ** 2, 2) / self.args.temperature2\n else:\n aux_center = F.normalize(aux_center, dim=-1) # normalize for cosine distance\n aux_task = aux_task.contiguous().view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)\n \n logits_reg = torch.bmm(aux_task, aux_center.permute([0,2,1])) / self.args.temperature2\n logits_reg = logits_reg.view(-1, num_proto) \n \n return logits, logits_reg \n else:\n return logits \n"
] | [
[
"torch.nn.functional.normalize",
"torch.nn.Softmax",
"torch.nn.Dropout",
"torch.mean",
"numpy.sqrt",
"torch.nn.functional.log_softmax",
"numpy.power",
"torch.nn.init.xavier_normal_",
"torch.sum",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"torch.bmm",
"numpy.prod"
],
[
"torch.nn.functional.normalize",
"torch.mean",
"torch.nn.LSTM",
"torch.sum",
"torch.rand",
"numpy.prod",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Knight13/Exploring-Deep-Neural-Decision-Trees | [
"27f33fc7542c46cf3b793f7c1a74d76eb04b47a2"
] | [
"Breast Cancer/DT.py"
] | [
"import numpy as np\nimport random\nimport cancer_data\nfrom sklearn import tree\nimport time\nfrom sklearn.model_selection import train_test_split\n\nx = cancer_data.feature\ny = cancer_data.label\n\nseed = random.seed(1990)\n\nX_train, X_test, y_train, y_test = train_test_split(x, y, train_size=0.70, random_state=seed)\n\nclf = tree.DecisionTreeClassifier()\n\nstart_time = time.time()\n\nclf = clf.fit(X_train, y_train)\n \ny_pred = clf.predict(X_test)\n\nprint('error rate %.5f' %(np.mean(1 - np.mean(np.argmax(y_pred, axis=1) == np.argmax(y_test, axis=1)))))\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n"
] | [
[
"sklearn.tree.DecisionTreeClassifier",
"numpy.argmax",
"sklearn.model_selection.train_test_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wangjiehui11235/panther | [
"cf1ca2f0c7107c5cdacf2f7ff4002d43427d9b07",
"cf1ca2f0c7107c5cdacf2f7ff4002d43427d9b07",
"cf1ca2f0c7107c5cdacf2f7ff4002d43427d9b07"
] | [
"utilities/calc_tools.py",
"alphax/factor_alpha101.py",
"client_integrated.py"
] | [
"#!/usr/bin/env python\n# coding=utf-8\nimport numpy as np\nimport pandas as pd\n\n\nclass CalcTools(object):\n @classmethod\n def is_zero(cls, data_frame):\n return np.where(data_frame > -0.000001,\n np.where(data_frame < 0.000001, True, False)\n , False)\n\n def change_single(params):\n fundamentals_sets_year = params['fundamentals_sets_year']\n sub_columns = params['sub_columns']\n\n def year_update(df):\n df.loc[df.index, sub_columns] = df[sub_columns] - df[sub_columns].shift(-1).fillna(0)\n return df\n\n stock_list = list(set(fundamentals_sets_year['code']))\n new_fundamentals_sets = pd.DataFrame()\n i = 0\n for stock in stock_list:\n i += 1\n if i % 100 == 0:\n print(params['year'], ':', i, '/', len(stock_list))\n new_fundamentals_sets = new_fundamentals_sets.append(year_update(\n fundamentals_sets_year[\n fundamentals_sets_year['code'] == stock]))\n return new_fundamentals_sets\n\n def change_single_by_symbol(params):\n fundamentals_sets = params['fundamentals_sets_symbol']\n sub_columns = params['sub_columns']\n\n def year_update(df):\n df.loc[df.index, sub_columns] = df[sub_columns] - df[sub_columns].shift(-1).fillna(0)\n return df\n\n new_fundamentals_sets = pd.DataFrame()\n year_list = list(set(fundamentals_sets['year']))\n year_list.sort(reverse=True)\n stock_list = list(set(fundamentals_sets['code']))\n i = 0\n for stock in stock_list:\n i += 1\n if i % 100 == 0:\n print('cpu', params['cpu'], ':', i, '/', len(stock_list))\n for year in year_list:\n fundamentals_sets_stock = fundamentals_sets[fundamentals_sets['code'] == stock]\n new_fundamentals_sets = new_fundamentals_sets.append(year_update(\n fundamentals_sets_stock[\n fundamentals_sets_stock['year'] == year]))\n return new_fundamentals_sets\n",
"# -*- coding: utf-8 -*-\n\nimport pdb\nimport numba\nimport six\nimport pandas as pd\nimport numpy as np\nimport inspect\nimport datetime\nfrom sklearn import preprocessing\nfrom numpy import log\nfrom utilities.singleton import Singleton\n\n# rolling corr of two pandas dataframes\ndef rolling_corr(x, y, win):\n corr_df = pd.DataFrame(data=np.NaN, index=x.index, columns=x.columns)\n for begin, end in zip(x.index[:-win + 1], x.index[win - 1:]):\n corr_df.loc[end] = x.loc[begin:end].corrwith(y.loc[begin:end])\n return corr_df\n\n\n# rolling cov of two pandas dataframes\ndef rolling_cov(x, y, win):\n cov_df = pd.DataFrame(data=np.NaN, index=x.index, columns=x.columns)\n for begin, end in zip(x.index[:-win + 1], x.index[win - 1:]):\n x_std = x.loc[begin:end].std()\n y_std = y.loc[begin:end].std()\n cov_df.loc[end] = x.loc[begin:end].corrwith(y.loc[begin:end]) * x_std * y_std\n return cov_df\n\n\n# rolling rank of a pandas dataframe\ndef rolling_rank(df, win):\n rank_df = pd.DataFrame(data=np.NaN, index=df.index, columns=df.columns)\n for begin, end in zip(df.index[:-win + 1], df.index[win - 1:]):\n rank_df.loc[end] = df.loc[begin:end].rank(axis=0, pct=True).iloc[-1]\n return rank_df\n\n\n# rolling dot of a pandas dataframe\ndef rolling_dot(df, x, win):\n dot_df = pd.DataFrame(data=np.NaN, index=df.index, columns=df.columns)\n for begin, end in zip(df.index[:-win + 1], df.index[win - 1:]):\n # dot_df.loc[end] = x.dot(df.loc[begin:end])\n dot_df.loc[end] = np.dot(x, df.loc[begin:end].values)\n return dot_df\n\n\n# rolling regression residual\ndef rolling_regresi(y, x, win):\n resi_df = pd.DataFrame(data=np.NaN, index=y.index, columns=y.columns)\n for begin, end in zip(y.index[:-win + 1], y.index[win - 1:]):\n yy = y.loc[begin:end]\n xx = x.loc[begin:end]\n resi_df.loc[end] = sm.OLS(yy, sm.add_constant(xx)).fit().resid.loc[end]\n return resi_df\n\n\n# columns covariance of two dataframes\ndef df_covariance(x, y):\n y = y[x.columns]\n corr_se = x.corrwith(y)\n x_cov, y_cov = np.diag(np.cov(x.T)), np.diag(np.cov(y.T))\n cov_se = (corr_se * np.sqrt(x_cov) * np.sqrt(y_cov))\n return cov_se\n\n\n# return a series of decay linear sum value of last win rows of dataframe df.\ndef decay_linear(df, win):\n weights = list(range(1, win + 1))\n weights = [x * 1. / np.sum(weights) for x in weights]\n dot_df = rolling_dot(df.iloc[-win:], weights, win)\n return dot_df.iloc[-1]\n\n\n# return a series of decay linear sum value of last win rows of dataframe df.\ndef decay_linear(df, win):\n weights = list(range(1, win + 1))\n weights = [x * 1. / np.sum(weights) for x in weights]\n dot_df = rolling_dot(df.iloc[-win:], weights, win)\n return dot_df.iloc[-1]\n\n\n# return a dataframe of rolling decay linear sum value of dataframe df.\ndef rolling_decay(df, win):\n weights = list(range(1, win + 1))\n weights = [x * 1. / np.sum(weights) for x in weights]\n dot_df = rolling_dot(df, weights, win)\n return dot_df\n\n\n# return winsorized series\ndef se_winsorize(se, method='sigma', limits=(3.0, 3.0), drop=False):\n se = se.copy(deep=True)\n if method == 'quantile':\n down, up = se.quantile([limits[0], 1.0 - limits[1]])\n elif method == 'sigma':\n std, mean = se.std(), se.mean()\n down, up = mean - limits[0]*std, mean + limits[1]*std\n\n if drop:\n se[se<down] = np.NaN\n se[se>up] = np.NaN\n else:\n se[se<down] = down\n se[se>up] = up\n return se\n\n\n# return standardized series\ndef se_standardize(se):\n try:\n res = (se - se.mean()) / se.std()\n except:\n res = pd.Series(data=np.NaN, index=se.index)\n return res\n\n\n# return indneutralized series\ndef se_indneutralize(se, indu_dict):\n date = se.name[0] if type(se.name) is tuple else se.name\n indu = indu_dict[date]\n\n try:\n se = se_winsorize(se, method='quantile', limits=(0.05, 0.05))\n se = se_winsorize(se, method='sigma', limits=(3.0, 3.0))\n se = se.dropna()\n if se.empty:\n return se\n codes = se.index.intersection(indu.index)\n se = se.loc[codes]\n indu = indu.loc[codes]\n\n x = np.linalg.lstsq(indu.values, np.matrix(se).T)[0]\n y = se - indu.dot(x)[0]\n except:\n print(date, ': neutralize error!')\n return y\n\n\n# return indneutralized pandas dataframe\ndef df_indneutralize(df, indu_dict):\n neu_dict = {}\n for bar_id in df.index:\n se = df.loc[bar_id]\n neu_dict[bar_id] = se_indneutralize(se, indu_dict)\n\n neu_df = pd.DataFrame(neu_dict).T\n return neu_df\n\n\n\[email protected]_metaclass(Singleton)\nclass FactorAlpha101(object):\n def __init__(self):\n __str__ = 'factor_alpha101'\n self.name = 'Alpha101'\n self.factor_type1 = 'Features'\n self.factor_type2 = 'Features'\n self.description = 'price and volumns features'\n \n def alpha101_2(self, data, param1=2, param2=6, dependencies=['turnover_vol', 'close_price', 'open_price'], \n max_window=10):\n # -1 * correlation(rank(delta(LOG(VOLUME), 2)), rank(((CLOSE - OPEN) / OPEN)), 6)\n # 价格和成交量都是一阶diff,所以可以都用复权价\n \n rank_price = ((data['close_price'] - data['open_price']) / data['open_price']).rank(axis=1, pct=True)\n rank_volume = (np.log(data['turnover_vol'])).diff(periods=param1).rank(axis=1, pct=True)\n \n corr_win = param2\n id_begin = rank_price.index[-corr_win]\n alpha = rank_price.loc[id_begin:].corrwith(rank_volume.loc[id_begin:])\n return -1.0 * alpha\n \n def alpha101_3(self, data, param1=10, param2=-1, dependencies=['open_price', 'turnover_vol'], max_window=11):\n # -1 * correlation(rank(OPEN), rank(VOLUME), 10)\n\n rank_open_df = data['open_price'].rank(axis=1, pct=True)\n rank_volume_df = data['turnover_vol'].rank(axis=1, pct=True)\n\n corr_win = param1\n id_begin = rank_open_df.index[-corr_win]\n alpha = rank_open_df.loc[id_begin:].corrwith(rank_volume_df.loc[id_begin:])\n return float(param2) * alpha\n \n def alpha101_4(self, data, param1=9, param2=-1, dependencies=['lowest_price'], max_window=10):\n # -1 * ts_rank(rank(LOW), 9)\n\n rank_low = data['lowest_price'].rank(axis=1, pct=True)\n ts_win = param1\n id_begin = rank_low.index[-ts_win]\n alpha = rank_low.loc[id_begin:].rank(axis=0, pct=True).iloc[-1]\n return float(param2) * alpha\n \n \n def alpha101_5(self, data, param1=10, dependencies=['open_price', \n 'close_price', 'vwap'], max_window=10):\n # rank((OPEN - (sum(VWAP, 10) / 10))) * (-1 * abs(rank((CLOSE - VWAP))))\n\n mean_win = param1\n open_vwap = data['open_price'] - data['vwap'].rolling(window=mean_win).mean()\n rank_open = open_vwap.rank(axis=1, pct=True)\n abs_rank = (data['close_price'] - data['vwap']).rank(axis=1, pct=True).abs() * (-1.0)\n alpha = (rank_open * abs_rank).iloc[-1]\n return alpha\n \n def alpha101_6(self, data, param1=10, param2=-1, dependencies=['open_price', 'turnover_vol'], max_window=11):\n # -1 * correlation(OPEN, VOLUME, 10)\n # correlation of window history price and volume, use adjusted data here.\n\n corr_win = param1\n id_begin = data['open_price'].index[-corr_win]\n alpha = data['open_price'].loc[id_begin:].corrwith(data['turnover_vol'].loc[id_begin:])\n return float(param2) * alpha\n \n def alpha101_11(self, data, param1=3, param2=3, \n dependencies=['close_price', 'vwap', 'turnover_vol'], max_window=5):\n # (rank(ts_max((VWAP - CLOSE), 3)) + rank(ts_min((VWAP - CLOSE), 3))) * rank(delta(VOLUME, 3))\n\n ts_max = (data['vwap'] - data['close_price']).rolling(window=param1).max()\n ts_min = (data['vwap'] - data['close_price']).rolling(window=param1).min()\n delta_volume = data['turnover_vol'].diff(periods=param2)\n\n rank_ts_max = ts_max.rank(axis=1, pct=True)\n rank_ts_min = ts_min.rank(axis=1, pct=True)\n rank_vol = delta_volume.rank(axis=1, pct=True)\n alpha = ((rank_ts_max + rank_ts_min) * rank_vol).iloc[-1]\n return alpha\n \n def alpha101_12(self, data, param1=1, param2=-1, dependencies=['close_price', 'turnover_vol'], max_window=2):\n # sign(delta(VOLUME, 1)) * (-1 * delta(CLOSE, 1))\n\n alpha = np.sign(data['turnover_vol'].diff(periods=param1)) * data['close_price'].diff(periods=param1)\n alpha = alpha.iloc[-1] * float(param2)\n return alpha\n \n def alpha101_13(self, data, param1=5, param2=-1, dependencies=['close_price', 'turnover_vol'], max_window=6):\n # -1 * rank(covariance(rank(CLOSE), rank(VOLUME), 5))\n\n rank_close_df = data['close_price'].rank(axis=1, pct=True)\n rank_volume_df = data['turnover_vol'].rank(axis=1, pct=True)\n\n corr_win = param1\n id_begin = rank_close_df.index[-corr_win]\n alpha = df_covariance(rank_close_df.loc[id_begin:], rank_volume_df.loc[id_begin:])\n return float(param2) * alpha.rank(pct=True)\n \n def alpha101_14(self, data, param1=10, param2=3, param3=-1, \n dependencies=['open_price', 'turnover_vol', 'returns'], max_window=10):\n # (-1 * rank(delta(RETURNS, 3))) * correlation(OPEN, VOLUME, 10)\n\n corr_win = param1\n id_begin = data['open_price'].index[-corr_win]\n corr_se = data['open_price'].loc[id_begin:].corrwith(data['turnover_vol'].loc[id_begin:])\n rank_ret_se = (param3) * data['returns'].diff(periods=param2).rank(axis=1, pct=True).iloc[-1]\n alpha = rank_ret_se * corr_se\n return alpha\n \n def alpha101_15(self, data, param1=3, param2=3, param3=-1,\n dependencies=['highest_price', 'turnover_vol'], max_window=6):\n # -1 * sum(rank(correlation(rank(HIGH), rank(VOLUME), 3)), 3)\n\n rank_high_df = data['highest_price'].rank(axis=1, pct=True)\n rank_volume_df = data['turnover_vol'].rank(axis=1, pct=True)\n\n corr_win = param1\n corr_df = rolling_corr(rank_high_df, rank_volume_df, win=corr_win)\n sum_win = param2\n id_begin = corr_df.index[-sum_win]\n alpha = corr_df.loc[id_begin:].rank(axis=1, pct=True).sum()\n return float(param3) * alpha\n \n def alpha101_16(self, data, param1=5, param2=-1,\n dependencies=['highest_price', 'turnover_vol'], max_window=6):\n # -1 * rank(covariance(rank(HIGH), rank(VOLUME), 5))\n\n rank_high_df = data['highest_price'].rank(axis=1, pct=True)\n rank_volume_df = data['turnover_vol'].rank(axis=1, pct=True)\n\n corr_win = param1\n id_begin = rank_high_df.index[-corr_win]\n alpha = df_covariance(rank_high_df.loc[id_begin:], rank_volume_df.loc[id_begin:])\n return float(param2) * alpha.rank(pct=True)\n \n def alpha101_18(self, data, param1=10, param2=5, param3=-1,\n dependencies=['open_price', 'close_price'], max_window=10):\n # -1 * rank((stddev(abs((CLOSE - OPEN)), 5) + (CLOSE - OPEN)) + correlation(CLOSE, OPEN, 10))\n\n corr_win = param1\n id_begin = data['open_price'].index[-corr_win]\n corr_se = data['open_price'].loc[id_begin:].corrwith(data['close_price'].loc[id_begin:])\n\n price = data['close_price'] - data['open_price']\n price_se = (price.abs().rolling(window=param2).std() + price).iloc[-1]\n\n alpha = float(param3) * (price_se + corr_se).rank(pct=True)\n return alpha\n \n def alpha101_19(self, data, param1=7, param2=70, param3=1, param4=-1,\n dependencies=['close_price', 'returns'], max_window=70):\n # -1 * sign(((CLOSE - delay(CLOSE, 7)) + delta(CLOSE, 7))) * (1 + rank(1 + sum(RETURNS, 250)))\n\n sign_se = np.sign(data['close_price'].diff(param1)).iloc[-1]\n # rank_se = (1.0 + data['returns'].rolling(window=250).sum()).iloc[-1].rank(pct=True) + 1.0\n ret_win = param2\n rank_se = (float(param3) + data['returns'].rolling(window=ret_win).sum()).iloc[-1].rank(pct=True)\n alpha = float(param4) * sign_se * rank_se\n return alpha\n \n def alpha101_22(self, data, param1=5, param2=20, param3=-1, \n dependencies=['close_price', 'highest_price', 'turnover_vol'], max_window=20):\n # -1 * (delta(correlation(HIGH, VOLUME, 5), 5) * rank(stddev(CLOSE, 20)))\n\n corr_df = rolling_corr(data['highest_price'], data['turnover_vol'], win=param1)\n delta_corr_se = corr_df.diff(periods=param1).iloc[-1]\n rank_std_se = data['close_price'].rolling(window=param2).std().rank(axis=1, pct=True).iloc[-1]\n alpha = float(param3) * delta_corr_se * rank_std_se\n return alpha\n \n \n def alpha101_23(self, data, param1=20, param2=-1, param3=2, param4=0.25,\n dependencies=['highest_price', 'highest_price'], max_window=20):\n # ((sum(HIGH, 20) / 20) < HIGH) ? (-1 * delta(HIGH, 2)) : 0\n\n # # original factor calc method\n # mark = data['high'].rolling(window=20).mean() < data['high']\n # delta_high = -1.0 * data['high_raw'].diff(2)\n # delta_high[mark==False] = 0.0\n # alpha = delta_high.iloc[-1]\n\n # adjusted factor calc method\n mark = data['highest_price'].rolling(window=param1).mean() < data['highest_price']\n delta_high = float(param2) * data['highest_price'].diff(param3)\n delta_high[mark==False] = delta_high[mark==False] * param4\n alpha = delta_high.iloc[-1]\n return alpha\n \n def alpha101_24(self, data, param1=40, param2=20, param3=6, param4=-1,\n dependencies=['close_price'], max_window=70):\n # (((delta((sum(CLOSE, 100) / 100), 100) / delay(CLOSE, 100)) < 0.05) ||\n # ((delta((sum(CLOSE, 100) / 100), 100) / delay(CLOSE, 100))== 0.05)) ?\n # (-1 * (CLOSE - ts_min(CLOSE, 100))) : (-1 * delta(CLOSE, 3))\n\n # # rearranged\n # mask = (delta((sum(CLOSE, 100) / 100), 100) / delay(CLOSE, 100))\n # mask > 0.05 ? (-1 * delta(CLOSE, 3)) : (-1 * (CLOSE - ts_min(CLOSE, 100)))\n\n # # original factor calc method\n # delta_close = data['close'].rolling(window=100).mean().diff(periods=100)\n # delay_close = data['close'].shift(periods=100)\n # mask = delta_close / delay_close\n # mask_se = mask.iloc[-1] > 0.05\n\n # true_se = -1.0 * data['close_raw'].diff(periods=3).iloc[-1]\n # false_se = -1.0 * (data['close_raw'] - data['close_raw'].rolling(window=100).min()).iloc[-1]\n # true_se = true_se.reindex(mask_se.index)\n # false_index = mask_se[mask_se==False].index\n # true_se.loc[false_index] = false_se.loc[false_index]\n\n # # adjusted factor calc method\n delta_close = data['close_price'].rolling(window=param1).mean().diff(periods=param2)\n delay_close = data['close_price'].shift(periods=param2)\n mask = delta_close / delay_close\n mask_se = mask.iloc[-1] > mask.iloc[-1].median()\n true_se = float(param4) * data['close_price'].diff(periods=param3).iloc[-1]\n false_se = float(param4) * (data['close_price'] - data['close_price'].rolling(\n window=param1).min()).iloc[-1]\n true_se = true_se.reindex(mask_se.index)\n false_index = mask_se[mask_se==False].index\n true_se.loc[false_index] = false_se.loc[false_index]\n return true_se\n \n def alpha101_26(self, data, param1=10, param2=10, param3=5, param4=-1,\n dependencies=['highest_price', 'turnover_vol'], max_window=30):\n # -1 * ts_max(correlation(ts_rank(VOLUME, 5), ts_rank(HIGH, 5), 5), 3)\n\n ts_rank_vol = rolling_rank(data['turnover_vol'], win=param1)\n ts_rank_high = rolling_rank(data['highest_price'], win=param1)\n\n corr_df = rolling_corr(ts_rank_vol, ts_rank_high, win=param2)\n alpha = float(param4) * corr_df.rolling(window=param3).max().iloc[-1]\n return alpha\n \n def alpha101_27(self, data, param1=10, param2=2, param3=-1,\n dependencies=['vwap', 'turnover_vol'], max_window=12):\n # (0.5 < rank((sum(correlation(rank(VOLUME), rank(VWAP), 6), 2) / 2.0))) ? (-1 * 1) : 1\n\n rank_vol = data['turnover_vol'].rank(axis=1, pct=True)\n rank_vwap = data['vwap'].rank(axis=1, pct=True)\n\n # # original factor calc method\n # corr_df = rolling_corr(rank_vol, rank_vwap, win=10)\n # corr_mean = corr_df.rolling(window=2).mean()\n # alpha = corr_mean.rank(axis=1, pct=True).iloc[-1]\n # alpha = -1.0 * np.sign((alpha - 0.5))\n\n # adjusted factor calc method\n # sum(correlation(rank(VOLUME), rank(VWAP), 6), 2) / 2.0\n corr_df = rolling_corr(rank_vol, rank_vwap, win=param1)\n corr_mean = corr_df.rolling(window=param2).mean()\n alpha = float(param3)* corr_mean.iloc[-1]\n return alpha\n \n def alpha101_29(self, data, param1=5, param2=4, param3=3,param4=-1,param5=6,param7=20,\n dependencies=['close_price', 'returns'], max_window=30):\n # # original formula\n # min(product(rank(sum(ts_min(rank(-1 * rank(delta(CLOSE, 5))), 2), 1)), 1), 5) +\n # ts_rank(delay((-1 * RETURNS), 6), 5)\n\n # # adjusted formula\n # min(product(rank(sum(ts_min(rank(-1 * rank(delta(CLOSE, 5))), 4), 3)), 3), 5) +\n # ts_rank(delay((-1 * RETURNS), 6), 20)\n\n df = (float(param4) * data['close_price'].diff(periods=param1).rank(axis=1, pct=True)).rank(axis=1, pct=True)\n df = np.log(df.rolling(window=param3).min().rolling(window=param3).sum()).rank(axis=1, pct=True)\n df = df.rolling(window=param3).apply(lambda x: np.prod(x)).rolling(window=param1).min()\n\n delay_ret = (float(param4) * data['returns']).shift(periods=param5)\n rank_win = param7\n id_begin = data['returns'].index[-rank_win]\n ts_rank_ret = delay_ret.loc[id_begin:].rank(axis=0, pct=True)\n\n alpha = df.iloc[-1] + ts_rank_ret.iloc[-1]\n return alpha\n '''\n def alpha101_32(self, data, param1=7, param2=40, param3=5, param4=20,\n dependencies=['close_price', 'vwap'], max_window=50):\n # # original formula\n # scale((sum(CLOSE, 7) / 7) - CLOSE) + 20 * scale(correlation(VWAP, delay(CLOSE, 5), 230))\n\n # # adjusted formula\n # scale((sum(CLOSE, 7) / 7) - CLOSE) + 20 * scale(correlation(VWAP, delay(CLOSE, 5), 40))\n\n close_se = (data['close_price'].rolling(window=param1).mean() - data['close_price']).iloc[-1]\n scale_close_se = close_se / close_se.abs().sum()\n corr_win = param2\n id_begin = data['close_price'].index[-corr_win]\n corr_se = data['close_price'].shift(periods=param3).loc[id_begin:].corrwith(data['vwap'].loc[id_begin:])\n scale_corr_se = corr_se / corr_se.abs().sum()\n\n alpha = scale_close_se + param4 * scale_corr_se\n return alpha\n '''\n \n def alpha101_36(self, data, param1=15, param2=6, param3=10, param4=20, param5=50,\n param6=2.21, param7=0.7, param8=0.73, param9=0.6, param10=-1,\n dependencies=['close_price', 'open_price', 'close_price', \n 'vwap', 'turnover_vol', 'returns'], max_window=60):\n # # original formula\n # 2.21 * rank(correlation((CLOSE - OPEN), delay(VOLUME, 1), 15)) +\n # 0.7 * rank(OPEN - CLOSE) + 0.73 * rank(ts_rank(delay((-1 * RETURNS), 6), 5)) +\n # rank(abs(correlation(VWAP, ADV20, 6))) + 0.6 * rank((sum(CLOSE, 200) / 200 - OPEN) * (CLOSE - OPEN))\n\n # rank(correlation((CLOSE - OPEN), delay(VOLUME, 1), 15))\n corr_win = param1\n id_begin = data['close_price'].index[-corr_win]\n corr_se = data['turnover_vol'].shift(periods=1\n ).loc[id_begin:].corrwith((data['close_price'] - data['open_price']).loc[id_begin:])\n part1 = corr_se.rank(pct=True)\n\n # rank(OPEN - CLOSE)\n part2 = (data['open_price'] - data['close_price']).iloc[-1].rank(pct=True)\n\n # rank(ts_rank(delay((-1 * RETURNS), 6), 5))\n ts_rank_win = param1 # change from orignal 5 to 15\n id_begin = data['returns'].index[-ts_rank_win]\n ts_rank_df = (float(param10) * data['returns']).shift(periods=param2).loc[id_begin:].rank(axis=0, pct=True)\n part3 = ts_rank_df.iloc[-1].rank(pct=True)\n\n # rank(abs(correlation(VWAP, ADV20, 6)))\n corr_win = param3 # change from orignal 6 to 10\n id_begin = data['vwap'].index[-corr_win]\n adv20 = data['turnover_vol'].rolling(window=param4).mean()\n corr_se = data['vwap'].loc[id_begin:].corrwith(adv20.loc[id_begin:])\n part4 = corr_se.abs().rank(pct=True)\n\n # rank((sum(CLOSE, 200) / 200 - OPEN) * (CLOSE - OPEN))\n sum_win = param5 # change from orignal 200 to 50\n sum_close = data['close_price'].rolling(window=sum_win).mean() - data['open_price']\n close_open = data['close_price'] - data['open_price']\n part5 = (sum_close * close_open).iloc[-1].rank(pct=True)\n\n alpha = param6 * part1 + param7 * part2 + param8 * part3 + part4 + param9 * part5\n return alpha\n \n def alpha101_40(self, data, param1=10, param2=10, param3=-1,\n dependencies=['highest_price', 'turnover_vol'], max_window=12):\n # (-1 * rank(stddev(HIGH, 10))) * correlation(HIGH, VOLUME, 10)\n\n part1 = float(param3) * data['highest_price'].rolling(window=param1).std().iloc[-1].rank(pct=True)\n corr_win = param2\n id_begin = data['highest_price'].index[-corr_win]\n part2 = data['highest_price'].loc[id_begin:].corrwith(data['turnover_vol'].loc[id_begin:])\n\n alpha = part1 * part2\n return alpha\n \n def alpha101_44(self, data, param1=5, param2=-1,\n dependencies=['highest_price', 'turnover_value'], max_window=11):\n # -1 * correlation(HIGH, rank(VOLUME), 5)\n\n high_df = data['highest_price']\n rank_volume_df = data['turnover_value'].rank(axis=1, pct=True)\n\n corr_win = param1\n id_begin = high_df.index[-corr_win]\n alpha = high_df.loc[id_begin:].corrwith(rank_volume_df.loc[id_begin:])\n return float(param2) * alpha\n \n def alpha101_45(self, data, param1=5, param2=20, param3=6, param4=6, param5=5, param6=20, param7=-1,\n dependencies=['close_price', 'turnover_vol'], max_window=30):\n # -1 * rank(sum(delay(CLOSE, 5), 20) / 20) * correlation(CLOSE, VOLUME, 2) * rank(correlation(sum(CLOSE, 5), sum(CLOSE, 20), 2))\n\n # rank(sum(delay(CLOSE, 5), 20) / 20)\n part1 = data['close_price'].shift(periods=param1).rolling(window=param2).mean().iloc[-1].rank(pct=True)\n\n # correlation(CLOSE, VOLUME, 2)\n corr_win = param3 # change from orignal 2 to 6\n id_begin = data['close_price'].index[-corr_win]\n part2 = data['close_price'].loc[id_begin:].corrwith(data['turnover_vol'].loc[id_begin:])\n\n # rank(correlation(sum(CLOSE, 5), sum(CLOSE, 20), 2))\n corr_win = param4 # change from orignal 2 to 6\n id_begin = data['close_price'].index[-corr_win]\n close_sum5 = data['close_price'].rolling(window=param5).sum()\n close_sum20 = data['close_price'].rolling(window=param6).sum()\n part3 = (close_sum5.loc[id_begin:].corrwith(close_sum20.loc[id_begin:])).rank(pct=True)\n\n alpha = float(param7) * part1 * part2 * part3\n return alpha\n \n def alpha101_50(self, data, param1=5, param2=5, param3=-1,\n dependencies=['vwap', 'turnover_vol'], max_window=10):\n # -1 * ts_max(rank(correlation(rank(VOLUME), rank(VWAP), 5)), 5)\n\n rank_vwap_df = data['vwap'].rank(axis=1, pct=True)\n rank_volume_df = data['turnover_vol'].rank(axis=1, pct=True)\n\n corr_win = param1\n corr_df = rolling_corr(rank_vwap_df, rank_volume_df, win=corr_win)\n ts_max_win = param2\n id_begin = corr_df.index[-ts_max_win]\n alpha = corr_df.loc[id_begin:].rank(axis=1, pct=True).max()\n return float(param3) * alpha\n \n def alpha101_52(self, data, param1=8, param2=8, param3=80, param4=8, param5=8,\n dependencies=['lowest_price', 'returns', 'turnover_vol'], max_window=10):\n # (-ts_min(LOW, 5) + delay(ts_min(LOW, 5), 5)) *\n # rank(((sum(RETURNS, 240) - sum(RETURNS, 20)) / 220)) * ts_rank(VOLUME, 5)\n\n # (-ts_min(LOW, 5) + delay(ts_min(LOW, 5), 5))\n ts_max_win = param1\n id_begin = data['lowest_price'].index[-ts_max_win]\n part1 = data['lowest_price'].shift(periods=param2\n ).loc[id_begin:].min() - data['lowest_price'].loc[id_begin:].min()\n\n # rank(((sum(RETURNS, 240) - sum(RETURNS, 20)) / 220))\n long_win, short_win = param3, param4 # change from original 240,20 to 80,8\n ret_se = data['returns'].iloc[-long_win:].sum() - data['returns'].iloc[-short_win:].sum()\n part2 = (ret_se / (1.0 * (long_win - short_win))).rank(pct=True)\n\n # ts_rank(VOLUME, 5)\n ts_rank_win = param5\n part3 = data['turnover_vol'].iloc[-ts_rank_win:].rank(axis=0, pct=True).iloc[-1]\n\n alpha = part1 * part2 * part3\n return alpha\n \n def alpha101_53(self, data, param1=2, param2=9, param3=0.001,\n dependencies=['close_price', 'lowest_price', 'highest_price'], max_window=12):\n # -1 * delta((((CLOSE - LOW) - (HIGH - CLOSE)) / (CLOSE - LOW)), 9)\n # rearranged formula\n # -1 * delta(((2 * CLOSE - LOW - HIGH) / (CLOSE - LOW)), 9)\n\n price_df = (data['close_price'] * float(param1) - data['lowest_price'] - data['highest_price']) / (\n (data['close_price'] - data['lowest_price']) + param3)\n alpha = price_df.diff(periods=param2).iloc[-1]\n return alpha\n \n def alpha101_54(self, data, param1=0.001, param2=5, param3=4, param5=-1,\n dependencies=['close_price', 'lowest_price', 'highest_price', 'open_price'], max_window=5):\n # (-1 * ((LOW - CLOSE) * (OPEN^5))) / ((LOW - HIGH) * (CLOSE^5))\n\n numerator = (data['lowest_price'] - data['close_price'] + param1) * (data['open_price'] ** param2)\n denominator = (data['lowest_price'] - data['highest_price'] + param1) * (data['close_price'] ** param2)\n\n # use mean average factor of ma_win bars\n ma_win = param3\n alpha = (float(param5)* numerator / denominator).iloc[-ma_win:].mean()\n alpha[alpha==float(param5)] = np.NaN\n return alpha\n \n def alpha101_55(self, data, param1=12, param2=12, param3=6, param4=-1,\n dependencies=['close_price','lowest_price', 'highest_price', \n 'turnover_value'], max_window=20):\n # -1 * correlation(rank((CLOSE - ts_min(LOW, 12)) / (ts_max(HIGH, 12) - ts_min(LOW, 12))), rank(VOLUME), 6)\n # CLOSE - ts_min(LOW, 12)) / (ts_max(HIGH, 12) - ts_min(LOW, 12)): 此项价格相除无量纲,所以\n # 用复权价格计算; 后续的volume使用value代替\n\n ts_min_low = data['lowest_price'].rolling(window=param1).min()\n ts_max_high = data['highest_price'].rolling(window=param2).max()\n\n price_df = (data['close_price'] - ts_min_low) / (ts_max_high - ts_min_low)\n rank_price = price_df.rank(axis=1, pct=True)\n rank_volume = data['turnover_value'].rank(axis=1, pct=True)\n\n corr_win = param3\n corr_df = rolling_corr(rank_price, rank_volume, win=corr_win)\n return float(param4) * corr_df.iloc[-1]\n \n def alpha101_57(self, data, param1=2, param2=30, param3=4, param4=-1,\n dependencies=['close_price', 'vwap'], max_window=35):\n # -1 * (CLOSE - VWAP) / decay_linear(rank(ts_argmax(CLOSE, 30)), 2)\n\n # (CLOSE - VWAP)\n ma_win = param1\n numerator = (data['close_price'] - data['vwap']).iloc[-ma_win:].mean()\n\n # decay_linear(rank(ts_argmax(CLOSE, 30)), 2)\n rank_df = data['close_price'].rolling(window=param2).apply(lambda x: x.argmax()).rank(axis=1, pct=True)\n denominator = decay_linear(rank_df, win=param3) # change win from original 2 to 4\n\n alpha = (float(param4) * numerator / denominator)\n return alpha\n \n def alpha101_58(self, data, param1=9, param2=8, param3=7, param4=-1,\n dependencies=['vwap', 'turnover_vol','indu'], max_window=25):\n # -1 * ts_rank(decay_linear(correlation(indneutralize(VWAP, indclass), VOLUME, 3.92795), 7.89291), 5.50322)\n \n # indneutralize(VWAP, indclass)\n neu_df = df_indneutralize(data['vwap'], data['indu'])\n \n # # original formula\n # corr_win, decay_win, ts_rank_win = 9, 8, 7\n # decay_df = rolling_decay(rolling_corr(neu_df, data['volume_raw'], win=corr_win), win=decay_win)\n # ts_rank_se = decay_df.iloc[-ts_rank_win:].rank(axis=0, pct=True).iloc[-1]\n # alpha = -1.0 * ts_rank_se\n \n # adjusted formula --- use a new method instead of ts rank.\n corr_win, decay_win, ts_mean_win = param1, param2, param3\n decay_df = rolling_decay(rolling_corr(neu_df, data['turnover_vol'], win=corr_win), win=decay_win)\n data_se = decay_df.iloc[-1] - decay_df.iloc[-ts_mean_win:].mean(axis=0)\n alpha = float(param4)* data_se\n \n return alpha\n\n def alpha101_59(self, data, param1=0.7, param2=0.3, param3=9, param4=12, param5=10, param6=-1,\n dependencies=['vwap', 'close_price', 'turnover_vol','indu'], max_window=30):\n # -1 * ts_rank(decay_linear(correlation(indneutralize(((VWAP * 0.728317) + (VWAP * (1 - 0.728317))),\n # indclass), VOLUME, 4.25197), 16.2289), 8.19648)\n # Caution: the original formula is almost same with alpha101_58 (\n # ((VWAP * 0.728317) + (VWAP * (1 - 0.728317))) == VWAP), so we take an adjusted formula here.\n # adjusted formula\n # -1 * ts_rank(decay_linear(correlation(indneutralize(((VWAP * 0.728317) + (CLOSE * (1 - 0.728317))),\n # indclass), VOLUME, 4.25197), 16.2289), 8.19648)\n\n # indneutralize(VWAP, indclass)\n neu_df = df_indneutralize(data['vwap'] * param1 + data['close_price'] * param2, data['indu'])\n\n # # original formula\n # corr_win, decay_win, ts_rank_win = 9, 12, 10\n # decay_df = rolling_decay(rolling_corr(neu_df, data['volume_raw'], win=corr_win), win=decay_win)\n # ts_rank_se = decay_df.iloc[-ts_rank_win:].rank(axis=0, pct=True).iloc[-1]\n # alpha = -1.0 * ts_rank_se\n\n # adjusted formula --- use a new method instead of ts rank.\n corr_win, decay_win, ts_mean_win = param3, param4, param5\n decay_df = rolling_decay(rolling_corr(neu_df, data['turnover_vol'], win=corr_win), win=decay_win)\n data_se = decay_df.iloc[-1] - decay_df.iloc[-ts_mean_win:].mean(axis=0)\n alpha = float(param6) * data_se\n\n return alpha\n \n def alpha101_62(self, data, param1=20, param2=10, param3=10, param4=2, param5=-1,\n dependencies=['turnover_vol', 'vwap', 'open_price', 'highest_price', \n 'lowest_price'], max_window=40):\n # -1.0 * (rank(correlation(VWAP, sum(ADV20, 22.4101), 9.91009)) <\n # rank(((rank(OPEN) * 2) < (rank(((HIGH + LOW) / 2)) + rank(HIGH)))))\n\n # adjusted formula: between two parts, use - instead of <; between two parts\n # in the second condition, use - instead of < too;\n # -1.0 * (rank(correlation(VWAP, sum(ADV20, 22.4101), 9.91009)) -\n # rank(((rank(OPEN) * 2) - (rank(((HIGH + LOW) / 2)) + rank(HIGH)))))\n\n # rank(correlation(VWAP, sum(ADV20, 22.4101), 9.91009))\n adv_win, sum_adv_win, corr_win = param1, param2, param3\n sum_adv = data['turnover_vol'].rolling(window=adv_win).mean().rolling(window=sum_adv_win).mean()\n part1 = data['vwap'].iloc[-corr_win:].corrwith(sum_adv.iloc[-corr_win:]).rank(pct=True)\n\n # rank(((rank(OPEN) * 2) - (rank(((HIGH + LOW) / 2)) + rank(HIGH))))\n rank_open = data['open_price'].rank(axis=1, pct=True)\n rank_high_low = ((data['highest_price'] + data['lowest_price']) / float(param4)).rank(axis=1, pct=True)\n rank_high = data['highest_price'].rank(axis=1, pct=True)\n part2 = (rank_open - rank_high_low - rank_high).rank(axis=1, pct=True).iloc[-1]\n\n alpha = float(param5) * (part1 - part2)\n return alpha\n \n def alpha101_66(self, data, param1=0.001, param2=2, param3=12, param4=7, param5=-1,\n dependencies=['vwap', 'lowest_price', 'open_price', 'highest_price'], max_window=20):\n # -1 * (rank(decay_linear(delta(VWAP, 3.51013), 7.23052)) +\n # ts_rank(decay_linear((((LOW * 0.96633) + (LOW * (1 - 0.96633))) - VWAP) / (OPEN - ((HIGH + LOW) / 2)), 11.4157), 6.72611))\n\n # rank(decay_linear(delta(VWAP, 3.51013), 7.23052))\n part1 = decay_linear(data['vwap'].diff(periods=4), win=8).rank(pct=True)\n\n # ts_rank(decay_linear((((LOW * 0.96633) + (LOW * (1 - 0.96633))) - VWAP) / (OPEN - ((HIGH + LOW) / 2)), 11.4157), 6.72611)\n # rearranged\n # ts_rank(decay_linear((LOW - VWAP) / (OPEN - ((HIGH + LOW) / 2)), 11.4157), 6.72611)\n price_df = (data['lowest_price'] - data['lowest_price'] + param1\n ) / (data['open_price'] - (data['highest_price'] + data['lowest_price']) / float(param2) + param1)\n price_df = (data['lowest_price'] - data['vwap']) / (\n data['open_price'] - (data['highest_price'] + data['lowest_price']) / float(param2))\n decay_win, ts_win = param3, param4\n part2 = rolling_decay(price_df, win=decay_win).iloc[-ts_win:].rank(axis=0, pct=True).iloc[-1]\n\n alpha = float(param5) * (part1 + part2)\n return alpha\n \n def alpha101_67(self, data, param1=20, param2=10, param3=5, param4=8, param5=-1,\n dependencies=['highest_price', 'vwap', 'turnover_vol','indu'], max_window=30):\n # -1.0 * rank(HIGH - ts_min(HIGH, 2.14593))^\n # rank(correlation(indneutralize(VWAP, indclass), indneutralize(ADV20, indclass), 6.02936))\n\n # rank(HIGH - ts_min(HIGH, 2.14593))\n # use adjusted formula: mean(rank(HIGH - ts_min(HIGH, 10)), 10)\n high_df = data['highest_price'] - data['highest_price'].rolling(window=param1).min()\n part1 = high_df.rank(axis=1, pct=True).iloc[-param2:].mean()\n\n # rank(correlation(indneutralize(VWAP, indclass), indneutralize(ADV20, indclass), 6.02936))\n neu_vwap = df_indneutralize(data['vwap'], data['indu'])\n neu_adv = df_indneutralize(data['turnover_vol'].rolling(window=param3).mean(), data['indu'])\n corr_win = param4\n part2 = neu_vwap.iloc[-corr_win:].corrwith(neu_adv.iloc[-corr_win:]).rank(pct=True)\n\n alpha = float(param5) * part1 ** part2\n return alpha\n \n def alpha101_69(self, data, param1=3, param2=5, param3=8, param4=-1,\n dependencies=['vwap', 'turnover_vol','indu'], max_window=15):\n # -1 * rank(ts_max(delta(indneutralize(VWAP, indclass), 2.72412), 4.79344))^\n # ts_rank(correlation(((CLOSE * 0.490655) + (VWAP * (1 - 0.490655))), ADV20, 4.92416), 9.0615)\n\n neu_vwap = df_indneutralize(data['vwap'], data['indu'])\n neu_adv = df_indneutralize(data['turnover_vol'].rolling(window=5).mean(), data['indu'])\n\n # rank(ts_max(delta(indneutralize(VWAP, indclass), 2.72412), 4.79344))\n diff_win, ts_max_win = param1, param2\n ts_max_df = neu_vwap.diff(periods=diff_win).rolling(window=ts_max_win).max()\n part1 = ts_max_df.iloc[-1].rank(pct=True)\n\n # rank(correlation(indneutralize(VWAP, indclass), indneutralize(ADV20, indclass), 6.02936))\n corr_win = param3\n part2 = neu_vwap.iloc[-corr_win:].corrwith(neu_adv.iloc[-corr_win:]).rank(pct=True)\n\n alpha = float(param4) * (part1 ** part2)\n return alpha\n \n def alpha101_72(self, data, param1=5, param2=1.0e6, param3=9, param4=10, param5=2,\n param6=8, param7=20, param8=7,param9=3,\n dependencies=['turnover_vol', 'lowest_price', 'highest_price', 'vwap'], max_window=30):\n # rank(decay_linear(correlation(((HIGH + LOW) / 2), ADV40, 8.93345), 10.1519)) /\n # rank(decay_linear(correlation(ts_rank(VWAP, 3.72469), ts_rank(VOLUME, 18.5188), 6.86671), 2.95011))\n\n # rank(decay_linear(correlation(((HIGH + LOW) / 2), ADV40, 8.93345), 10.1519))\n ma_vol_win = param1\n avg_vol = data['turnover_vol'].rolling(window=ma_vol_win).mean() / param2\n corr_win, decay_win = param3, param4\n part1 = decay_linear(rolling_corr((data['highest_price'] + data['lowest_price'])/float(param5), \n avg_vol, win=corr_win), win=decay_win).rank(pct=True)\n\n # rank(decay_linear(correlation(ts_rank(VWAP, 3.72469), ts_rank(VOLUME, 18.5188), 6.86671), 2.95011))\n ts_rank_vwap = rolling_rank(data['vwap'], win=param6)\n ts_rank_vol = rolling_rank(data['turnover_vol'], win=param7)\n corr_win, decay_win = param8, param9\n part2 = decay_linear(rolling_corr(ts_rank_vwap, ts_rank_vol, win=corr_win), win=decay_win).rank(pct=True)\n\n alpha = part1 / part2\n return alpha\n \n def alpha101_73(self, data, param1=5, param2=3, param3=0.147155, param4=0.147155,\n param5=2, param6=4, param7=17,param8=-1,param9=-1,\n dependencies=['vwap', 'lowest_price', 'open_price'], max_window=25):\n # -1 * max(rank(decay_linear(delta(VWAP, 4.72775), 2.91864)),\n # ts_rank(decay_linear((delta((OPEN * 0.147155 + LOW * (1 - 0.147155)), 2.03608) /\n # (OPEN * 0.147155 + LOW * (1 - 0.147155))) * -1, 3.33829), 16.7411))\n\n # rank(decay_linear(delta(VWAP, 4.72775), 2.91864))\n diff_win, decay_win = param1, param2\n part1 = decay_linear(data['vwap'].diff(periods=diff_win), win=decay_win).rank(pct=True)\n\n # (OPEN * 0.147155 + LOW * (1 - 0.147155))\n price = data['open_price'] * param3 + data['lowest_price'] * (1 - param4)\n # ts_rank(decay_linear((delta((OPEN * 0.147155 + LOW * (1 - 0.147155)), 2.03608) /\n # (OPEN * 0.147155 + LOW * (1 - 0.147155))) * -1, 3.33829), 16.7411)\n diff_win, decay_win, ts_rank_win = param5, param6, param7\n decay_df = rolling_decay(float(param8) * price.diff(periods=diff_win) / price, win=decay_win)\n part2 = decay_df.iloc[-ts_rank_win:].rank(axis=0, pct=True).iloc[-1]\n\n alpha = (param9) * pd.DataFrame({'part1': part1, 'part2': part2}).max(axis=1)\n return alpha\n \n def alpha101_74(self, data, param1=10, param2=16, param3=15, param4=0.0261661, param5=12, param6=-1,\n dependencies=['turnover_vol', 'close_price', 'highest_price', 'vwap'], max_window=40):\n # -1 * (rank(correlation(CLOSE, sum(ADV30, 37.4843), 15.1365)) <\n # rank(correlation(rank(((HIGH * 0.0261661) + (VWAP * (1 - 0.0261661)))), rank(VOLUME), 11.4791)))\n # rearranged formula: between two parts, use - instead of <\n # -1 * (rank(correlation(CLOSE, sum(ADV30, 37.4843), 15.1365)) -\n # rank(correlation(rank(((HIGH * 0.0261661) + (VWAP * (1 - 0.0261661)))), rank(VOLUME), 11.4791)))\n\n # rank(correlation(CLOSE, sum(ADV30, 37.4843), 15.1365))\n mean_win, sum_win = param1, param2 # change from 30, 37.48 to 10, 16\n adv_sum = data['turnover_vol'].rolling(window=mean_win).mean().rolling(window=sum_win).sum()\n corr_win = param3 # change from orignal 15.13 to 15\n part1 = data['close_price'].iloc[-corr_win:].corrwith(adv_sum.iloc[-corr_win:]).rank(pct=True)\n\n # rank(correlation(rank(HIGH * 0.0261661 + VWAP * (1 - 0.0261661)), rank(VOLUME), 11.4791))\n rank_price = (data['highest_price'] * param4 + data['vwap'] * (1 - param4)).rank(axis=1, pct=True)\n rank_vol = data['turnover_vol'].rank(axis=1, pct=True)\n corr_win = param5 # change from orignal 11.4791 to 12\n part2 = rank_price.iloc[-corr_win:].corrwith(rank_vol.iloc[-corr_win:]).rank(pct=True)\n\n alpha = float(param6) * (part1 - part2)\n return alpha\n\n def alpha101_75(self, data, param1=8, param2=12, param3=12,\n dependencies=['vwap', 'turnover_vol', 'lowest_price', 'turnover_vol'], max_window=30):\n # rank(correlation(VWAP, VOLUME, 4.24304)) < rank(correlation(rank(LOW), rank(ADV50), 12.4413))\n # rearranged formula: between two parts, use - instead of <\n # rank(correlation(VWAP, VOLUME, 4.24304)) - rank(correlation(rank(LOW), rank(ADV50), 12.4413))\n\n # rank(correlation(VWAP, VOLUME, 4.24304))\n corr_win = param1 # change from orignal 4.243 to 8\n part1 = data['vwap'].iloc[-corr_win:].corrwith(data['turnover_vol'].iloc[-corr_win:]).rank(pct=True)\n\n # rank(correlation(rank(LOW), rank(ADV50), 12.4413))\n mean_win = param2 # change from orignal 50 to 12\n rank_price = data['lowest_price'].rank(axis=1, pct=True)\n rank_adv = data['turnover_vol'].rolling(window=mean_win).mean().rank(axis=1, pct=True)\n corr_win = param3 # change from orignal 12.4413 to 12\n part2 = rank_price.iloc[-corr_win:].corrwith(rank_adv.iloc[-corr_win:]).rank(pct=True)\n\n alpha = part1 - part2\n return alpha\n \n def alpha101_76(self, data, param1=5, param2=1, param3=5, param4=8, param5=20, param6=5,\n param7=20, param8=-1, param9=0.5,\n dependencies=['close_price', 'vwap', 'lowest_price', 'turnover_vol','indu'], \n max_window=50):\n # -1 * max(rank(decay_linear(delta(VWAP, 1.24383), 11.8259)),\n # ts_rank(decay_linear(ts_rank(correlation(indneutralize(LOW, indclass), ADV81, 8.14941), 19.569), 17.1543), 19.383))\n\n neu_low = df_indneutralize(data['lowest_price'], data['indu'])\n adv = data['turnover_vol'].rolling(window=param1).mean()\n\n # rank(decay_linear(delta(VWAP, 1.24383), 11.8259))\n diff_win, decay_win = param2, param3\n decay_df = rolling_decay(data['vwap'].diff(periods=diff_win), win=decay_win)\n part1 = decay_df.iloc[-1].rank(pct=True)\n\n # ts_rank(decay_linear(ts_rank(correlation(indneutralize(LOW, indclass), ADV81, 8.14941), 19.569), 17.1543), 19.383)\n corr_win, ts_rank_win1, decay_win, ts_rank_win2 = param4, param5, param6, param7\n corr_df = rolling_corr(neu_low, adv, win=corr_win)\n decay_df = rolling_decay(rolling_rank(corr_df, win=ts_rank_win1), win=decay_win)\n part2 = decay_df.iloc[-ts_rank_win2:].rank(axis=0, pct=True).iloc[-1]\n\n res_df = pd.DataFrame({'part1': part1, 'part2': part2})\n # alpha = -1.0 * res_df.max(axis=1)\n # # use adjusted formula\n alpha = float(param8) * (res_df.max(axis=1) - param9 * res_df.min(axis=1))\n return alpha\n \n def alpha101_80(self, data, param1=0.85, param2=0.15, param3=5, param4=4, param5=5,\n param6=6, param7=6, param8=-1,\n dependencies=['open_price', 'highest_price', 'turnover_vol', 'highest_price','indu'], max_window=20):\n # -1 * (rank(sign(delta(indneutralize(((OPEN * 0.868128) + (HIGH * (1 - 0.868128))), indclass), 4.04545)))^\n # ts_rank(correlation(HIGH, ADV10, 5.11456), 5.53756))\n\n neu_price = df_indneutralize(data['open_price'] * param1 + data['highest_price'] * param2, data['indu'])\n adv = data['turnover_vol'].rolling(window=param3).mean()\n\n # rank(sign(delta(indneutralize(((OPEN * 0.868128) + (HIGH * (1 - 0.868128))), indclass), 4.04545)))\n # use decay_linear instead of sign in part1 formula\n # rank(decay_linear(delta(indneutralize(((OPEN * 0.868128) + (HIGH * (1 - 0.868128))), indclass), 4.04545), 5))\n diff_win, decay_win = param4, param5\n part1 = decay_linear(neu_price.diff(periods=diff_win), win=decay_win).rank(pct=True)\n\n # ts_rank(correlation(HIGH, ADV10, 5.11456), 5.53756)\n corr_win, ts_rank_win = param6, param7\n corr_df = rolling_corr(data['highest_price'], adv, win=corr_win)\n part2 = corr_df.iloc[-ts_rank_win:].rank(axis=0, pct=True).iloc[-1]\n\n alpha = float(param8) * part1 ** part2\n return alpha\n \n def alpha101_81(self, data, param1=10, param2=10, param3=8, param4=10, param5=4,\n param6=8, param7=-1,\n dependencies=['vwap', 'turnover_vol', 'vwap'], max_window=32):\n # -1 * (rank(LOG(product(rank((rank(correlation(VWAP, sum(ADV10, 49.6054), 8.47743))^4)), 14.9655))) <\n # rank(correlation(rank(VWAP), rank(VOLUME), 5.07914)))\n # rearranged formula: between two parts, use - instead of <\n # -1 * (rank(LOG(product(rank((rank(correlation(VWAP, sum(ADV10, 49.6054), 8.47743))^4)), 14.9655))) -\n # rank(correlation(rank(VWAP), rank(VOLUME), 5.07914)))\n\n # rank(LOG(product(rank((rank(correlation(VWAP, sum(ADV10, 49.6054), 8.47743))^4)), 14.9655)))\n mean_win, sum_win = param1, param2 # change from 10, 49.6054 to 10, 10\n adv_sum = data['turnover_vol'].rolling(window=mean_win).mean().rolling(window=sum_win).sum()\n corr_win, prod_win = param3, param4 # change from orignal 8.47743, 14.9655 to 8, 10\n corr_df = rolling_corr(data['vwap'], adv_sum, corr_win)\n prod_se = ((corr_df.rank(axis=1, pct=True)) ** param5).rank(axis=1, pct=True).iloc[-prod_win:].cumprod().iloc[-1]\n part1 = np.log(prod_se).rank(pct=True)\n\n # rank(correlation(rank(VWAP), rank(VOLUME), 5.07914))\n rank_price = data['vwap'].rank(axis=1, pct=True)\n rank_vol = data['turnover_vol'].rank(axis=1, pct=True)\n corr_win = param6 # change from orignal 5.07914 to 8\n part2 = rank_price.iloc[-corr_win:].corrwith(rank_vol.iloc[-corr_win:]).rank(pct=True)\n\n alpha = float(param7) * (part1 - part2)\n return alpha\n\n def alpha101_82(self, data, param1=1, param2=10, param3=16, param4=6, param5=14, param6=-1, param7=0.5,\n dependencies=['open_price', 'turnover_vol','indu'], max_window=40):\n # -1 * min(rank(decay_linear(delta(OPEN, 1.46063), 14.8717)),\n # ts_rank(decay_linear(correlation(indneutralize(VOLUME, indclass), ((OPEN * 0.634196) + (OPEN * (1 - 0.634196))), 17.4842), 6.92131), 13.4283))\n # rearranged formula\n # -1 * min(rank(decay_linear(delta(OPEN, 1.46063), 14.8717)),\n # ts_rank(decay_linear(correlation(indneutralize(VOLUME, indclass), OPEN, 17.4842), 6.92131), 13.4283))\n\n # rank(decay_linear(delta(OPEN, 1.46063), 14.8717))\n diff_win, decay_win = param1, param2\n part1 = decay_linear(data['open_price'].diff(periods=diff_win), win=decay_win).rank(pct=True)\n\n # ts_rank(decay_linear(correlation(indneutralize(VOLUME, indclass), OPEN, 17.4842), 6.92131), 13.4283)\n neu_vol = df_indneutralize(data['turnover_vol'], data['indu'])\n corr_win, decay_win, ts_rank_win = param3, param4, param5\n decay_df = rolling_decay(rolling_corr(neu_vol, data['open_price'], win=corr_win), win=decay_win)\n part2 = decay_df.iloc[-ts_rank_win:].rank(axis=0, pct=True).iloc[-1]\n\n alpha101_df = pd.DataFrame({'part1': part1, 'part2': part2})\n\n # # original alpha formula\n # alpha = -1.0 * alpha101_df.min(axis=1)\n # adjusted alpha formula\n alpha = float(param6) * (alpha101_df.min(axis=1) - float(param7) * alpha101_df.max(axis=1))\n return alpha\n \n def alpha101_83(self, data, param1=10, param2=2,\n dependencies=['highest_price', 'lowest_price', \n 'close_price', 'turnover_vol', \n 'vwap'], max_window=20):\n # (rank(delay(((HIGH - LOW) / (sum(CLOSE, 5) / 5)), 2)) * rank(VOLUME)) /\n # (((HIGH - LOW) / (sum(CLOSE, 5) / 5)) / (VWAP - CLOSE))\n # rearranged formula\n # rank(delay(((HIGH - LOW) / (sum(CLOSE, 5) / 5)), 2)) * rank(VOLUME) * (VWAP - CLOSE) /\n # ((HIGH - LOW) / (sum(CLOSE, 5) / 5))\n \n # rank(delay(((HIGH - LOW) / (sum(CLOSE, 5) / 5)), 2))\n mean_win, delay_win = param1, param2\n price_df = ((data['highest_price'] - data['lowest_price']) / data['close_price'].rolling(window=mean_win).mean())\n part1 = price_df.diff(periods=delay_win).iloc[-1].rank(pct=True)\n \n # rank(VOLUME) * (VWAP - CLOSE)\n part2 = (data['turnover_vol'].rank(axis=1, pct=True) * (data['vwap'] - data['close_price'])).iloc[-1]\n \n # ((HIGH - LOW) / (sum(CLOSE, 5) / 5))\n part3 = price_df.iloc[-1]\n \n alpha = part1 * part2 / part3\n return alpha\n \n def alpha101_84(self, data, param1=15, param2=20, param3=6,\n dependencies=['vwap', 'close_price'], max_window=40):\n # signedpower(ts_rank((VWAP - ts_max(VWAP, 15.3217)), 20.7127), delta(CLOSE, 4.96796))\n\n # ts_rank((VWAP - ts_max(VWAP, 15.3217)), 20.7127)\n max_win, rank_win = param1, param2\n price_df = data['vwap'] - data['vwap'].rolling(window=max_win).max()\n part1 = price_df.iloc[-rank_win:].rank(axis=0, pct=True).iloc[-1]\n \n # delta(CLOSE, 4.96796)\n diff_win = param3\n part2 = data['close_price'].diff(periods=diff_win).iloc[-1]\n part2 = data['close_price'].diff(periods=diff_win).iloc[-1].rank(pct=True)\n \n alpha = np.sign(part1) * (part1.abs() ** part2)\n return alpha\n\n def alpha101_87(self, data, param1=2, param2=3, param3=0.37, param4=0.63, param5=12, param6=5,\n param7=14,\n dependencies=['close_price', 'vwap', 'turnover_vol','indu'], max_window=30):\n # -1 * max(rank(decay_linear(delta(((CLOSE * 0.369701) + (VWAP * (1 - 0.369701))), 1.91233), 2.65461)),\n # ts_rank(decay_linear(abs(correlation(indneutralize(ADV81, indclass), CLOSE, 13.4132)), 4.89768), 14.4535))\n \n # rank(decay_linear(delta(((CLOSE * 0.369701) + (VWAP * (1 - 0.369701))), 1.91233), 2.65461))\n diff_win, decay_win = param1, param2\n price_df = data['close_price'] * param3 + data['vwap'] * param4\n part1 = decay_linear(price_df.diff(periods=diff_win), win=decay_win).rank(pct=True)\n\n # ts_rank(decay_linear(abs(correlation(indneutralize(ADV81, indclass), CLOSE, 13.4132)), 4.89768), 14.4535)\n neu_adv = df_indneutralize(data['turnover_vol'].rolling(window=8).mean(), data['indu'])\n corr_win, decay_win, ts_rank_win = param5, param6, param6\n decay_df = rolling_decay(rolling_corr(neu_adv, data['close_price'], win=corr_win).abs(), win=decay_win)\n part2 = decay_df.iloc[-ts_rank_win:].rank(axis=0, pct=True).iloc[-1]\n \n alpha101_df = pd.DataFrame({'part1': part1, 'part2': part2})\n \n # # original alpha formula\n # alpha = -1.0 * alpha101_df.max(axis=1)\n # adjusted alpha formula\n alpha = -1.0 * (alpha101_df.max(axis=1) - 0.5 * alpha101_df.min(axis=1))\n return alpha\n \n def alpha101_88(self, data, param1=8, param2=20, param3=9, param4=20, param5=9, param6=6, param7=20,\n dependencies=['open_price', 'highest_price', \n 'lowest_price', 'close_price', \n 'turnover_vol'], max_window=50):\n # min(rank(decay_linear(((rank(OPEN) + rank(LOW)) - (rank(HIGH) + rank(CLOSE))), 8.06882)),\n # ts_rank(decay_linear(correlation(ts_rank(CLOSE, 8.44728), ts_rank(ADV60, 20.6966), 8.01266), 6.65053), 2.61957))\n\n # rank(decay_linear(((rank(OPEN) + rank(LOW)) - (rank(HIGH) + rank(CLOSE))), 8.06882))\n decay_win = param1\n open_low = data['open_price'].rank(axis=1, pct=True) + data['lowest_price'].rank(axis=1, pct=True)\n high_close = data['highest_price'].rank(axis=1, pct=True) + data['close_price'].rank(axis=1, pct=True)\n part1 = decay_linear(open_low - high_close, win=decay_win).rank(pct=True)\n\n # ts_rank(decay_linear(correlation(ts_rank(CLOSE, 8.44728), ts_rank(ADV60, 20.6966), 8.01266), 6.65053), 2.61957)\n adv_win, ts_close_win, ts_adv_win = param2, param3, param4\n adv_df = data['turnover_vol'].rolling(window=adv_win).mean()\n rank_close = rolling_rank(data['close_price'], win=ts_close_win)\n rank_adv = rolling_rank(adv_df, win=ts_adv_win)\n corr_win, decay_win, ts_rank_win = param5, param6, param7 # change from original 8.01266, 6.65053, 2.61957 to 9, 6, 10\n decay_df = rolling_decay(rolling_corr(rank_close, rank_adv, win=corr_win), win=decay_win)\n part2 = decay_df.iloc[-ts_rank_win:].rank(axis=0, pct=True).iloc[-1]\n \n # original\n alpha = pd.DataFrame({'part1': part1, 'part2': part2}).min(axis=1)\n # # adjusted formula\n # alpha = pd.DataFrame({'part1': part1, 'part2': part2}).mean(axis=1)\n return alpha\n \n def alpha101_90(self, data, param1=5, param2=4, param3=8, param4=8, param5=6, param6=-1,\n dependencies=['close_price', 'lowest_price', 'turnover_vol','indu'], max_window=20):\n # -1 *(rank((CLOSE - ts_max(CLOSE, 4.66719)))^\n # ts_rank(correlation(indneutralize(ADV40, indclass), LOW, 5.38375), 3.21856))\n \n # rank((CLOSE - ts_max(CLOSE, 4.66719)))\n # add a decay linear\n close_df = data['close_price'] - data['close_price'].rolling(window=param1).max()\n part1 = decay_linear(close_df.rank(axis=1, pct=True), win=param2).rank(pct=True)\n \n # ts_rank(correlation(indneutralize(ADV40, indclass), LOW, 5.38375), 3.21856)\n neu_adv = df_indneutralize(data['turnover_vol'].rolling(window=param3).mean(), data['indu'])\n corr_win, ts_rank_win = param4, param5\n corr_df = rolling_corr(neu_adv, data['lowest_price'], win=corr_win)\n part2 = corr_df.iloc[-ts_rank_win:].rank(axis=0, pct=True).iloc[-1]\n \n alpha = float(param6) * part1 ** part2\n return alpha\n \n def alpha101_91(self, data, param1=5, param2=10, param3=10, param4=3, param5=10, \n param6=8, param7=3, param8=-1,\n dependencies=['close_price', 'turnover_vol', \n 'vwap','indu'], max_window=32):\n # -1 * (ts_rank(decay_linear(decay_linear(correlation(indneutralize(CLOSE, indclass), VOLUME, 9.74928), 16.398), 3.83219), 4.8667) -\n # rank(decay_linear(correlation(VWAP, ADV30, 4.01303), 2.6809)))\n\n neu_close = df_indneutralize(data['close_price'], data['indu'])\n adv = data['turnover_vol'].rolling(window=param1).mean()\n\n # ts_rank(decay_linear(decay_linear(correlation(indneutralize(CLOSE, indclass), VOLUME, 9.74928), 16.398), 3.83219), 4.8667)\n corr_win, decay_win1, decay_win2, ts_rank_win = param2, param3, param4, param5\n decay_df = rolling_decay(rolling_decay(rolling_corr(neu_close, data['turnover_vol'], \n win=corr_win), win=decay_win1), win=decay_win2)\n part1 = decay_df.iloc[-ts_rank_win:].rank(axis=0, pct=True).iloc[-1]\n\n # rank(decay_linear(correlation(VWAP, ADV30, 4.01303), 2.6809))\n corr_win, decay_win = param6, param7\n part2 = decay_linear(rolling_corr(data['vwap'], adv, win=corr_win), win=decay_win)\n \n alpha = float(param8) * (part1 - part2)\n return alpha\n \n def alpha101_96(self, data, param1=6, param2=4, param3=14, param4=20, param5=8, param6=6,\n param7=8, param8=12, param9=6, param10=13, param11=-1,\n dependencies=['vwap', 'turnover_vol', \n 'close_price'], max_window=50):\n # -1.0 * max(ts_rank(decay_linear(correlation(rank(VWAP), rank(VOLUME), 3.83878), 4.16783), 8.38151),\n # ts_rank(decay_linear(ts_argmax(correlation(ts_rank(CLOSE, 7.45404), ts_rank(ADV60, 4.13242), 3.65459), 12.6556), 14.0365), 13.4143))\n\n # ts_rank(decay_linear(correlation(rank(VWAP), rank(VOLUME), 3.83878), 4.16783), 8.38151)\n rank_vwap = data['vwap'].rank(axis=1, pct=True)\n rank_vol = data['turnover_vol'].rank(axis=1, pct=True)\n corr_win, decay_win, ts_rank_win = param1, param2, param3\n decay_df = rolling_decay(rolling_corr(rank_vwap, rank_vol, win=corr_win), win=decay_win)\n part1 = decay_df.iloc[-ts_rank_win:].rank(axis=0, pct=True).iloc[-1]\n\n # ts_rank(decay_linear(ts_argmax(correlation(ts_rank(CLOSE, 7.45404), ts_rank(ADV60, 4.13242), 3.65459), 12.6556), 14.0365), 13.4143)\n adv_win, ts_close_win, ts_adv_win = param4, param5, param6\n adv_df = data['turnover_vol'].rolling(window=adv_win).mean()\n rank_close = rolling_rank(data['close_price'], win=ts_close_win)\n rank_adv = rolling_rank(adv_df, win=ts_adv_win)\n # change from original 3.65459, 12.6556, 14.0365, 13.4143 to 8, 12, 6, 13\n corr_win, ts_max_win, decay_win, ts_rank_win = param7, param8, param9, param10\n corr_df = rolling_corr(rank_close, rank_adv, win=corr_win)\n ts_argmax_df = corr_df.rolling(window=ts_max_win).apply(lambda x: x.argmax())\n decay_df = rolling_decay(ts_argmax_df, win=decay_win)\n part2 = decay_df.iloc[-ts_rank_win:].rank(axis=0, pct=True).iloc[-1]\n \n # original formula\n alpha = float(param11) * pd.DataFrame({'part1': part1, 'part2': part2}).max(axis=1)\n # # adjusted formula\n # alpha = -1 * pd.DataFrame({'part1': part1, 'part2': part2}).mean(axis=1)\n return alpha\n \n def alpha101_97(self, data, param1=4, param2=12, param3=0.7, param4=10, param5=17,\n param6=8, param7=18, param8=5, param9=16, param10=-1,\n dependencies=['lowest_price', 'vwap', 'turnover_vol', 'lowest_price','indu'], max_window=45):\n # -(rank(decay_linear(delta(indneutralize(((LOW * 0.721001) + (VWAP * (1 - 0.721001))), indclass), 3.3705), 20.4523)) -\n # ts_rank(decay_linear(ts_rank(correlation(ts_rank(LOW, 7.87871), ts_rank(ADV60, 17.255), 4.97547), 18.5925), 15.7152), 6.71659))\n \n # rank(decay_linear(delta(indneutralize(((LOW * 0.721001) + (VWAP * (1 - 0.721001))), indclass), 3.3705), 20.4523))\n diff_win, decay_win = param1, param2\n price_df = data['lowest_price'] * param3 + data['vwap'] * (1 - param3)\n part1 = decay_linear(df_indneutralize(price_df, data['indu']).diff(periods=diff_win), win=decay_win).rank(pct=True)\n \n # ts_rank(decay_linear(ts_rank(correlation(ts_rank(LOW, 7.87871), ts_rank(ADV60, 17.255), 4.97547), 18.5925), 15.7152), 6.71659)\n ts_rank_low = rolling_rank(data['lowest_price'], win=param4)\n ts_rank_adv = rolling_rank(data['turnover_vol'].rolling(window=param4).mean(), win=param5)\n corr_win, ts_win1, decay_win, ts_win2 = param6, param7, param8, param9\n decay_df = rolling_decay(rolling_rank(rolling_corr(ts_rank_low, ts_rank_adv, win=corr_win), win=ts_win1), win=decay_win)\n part2 = decay_df.iloc[-ts_win2:].rank(axis=0, pct=True).iloc[-1]\n \n alpha = float(param10) * (part1 - part2)\n return alpha\n \n def alpha101_99(self, data, param1=20, param2=16, param3=16, param4=9, param5=2, param6=7, param7=-1,\n dependencies=['highest_price', 'lowest_price', 'turnover_vol'], max_window=50):\n # -1 * (rank(correlation(sum(((HIGH + LOW) / 2), 19.8975), sum(ADV60, 19.8975), 8.8136)) <\n # rank(correlation(LOW, VOLUME, 6.28259)))\n # rearranged formula: between two parts, use - instead of <\n # -1 * (rank(correlation(sum(((HIGH + LOW) / 2), 19.8975), sum(ADV60, 19.8975), 8.8136)) -\n # rank(correlation(LOW, VOLUME, 6.28259)))\n\n # rank(correlation(sum(((HIGH + LOW) / 2), 19.8975), sum(ADV60, 19.8975), 8.8136))\n adv_win, sum_price_win, sum_adv_win, corr_win = param1, param2, param3, param4\n sum_price = ((data['highest_price'] + data['lowest_price']) / float(param5)).rolling(window=sum_price_win).mean()\n sum_adv = data['turnover_vol'].rolling(window=adv_win).mean().rolling(window=sum_adv_win).mean()\n part1 = sum_price.iloc[-corr_win:].corrwith(sum_adv.iloc[-corr_win:]).rank(pct=True)\n \n # rank(correlation(LOW, VOLUME, 6.28259))\n corr_win = param6\n part2 = data['lowest_price'].iloc[-corr_win:].corrwith(data['turnover_vol'].iloc[-corr_win:]).rank(pct=True)\n \n alpha = float(param7) * (part1 - part2)\n return alpha\n",
"# -*- coding: utf-8 -*-\nimport argparse\nimport pandas as pd\nimport sqlalchemy as sa\nfrom integrated.calc_engine import CalcEngine\nfrom datetime import datetime\nimport warnings\nimport config\n\nwarnings.filterwarnings(\"ignore\")\ndb_url = '''mysql+mysqlconnector://{0}:{1}@{2}:{3}/{4}'''.format(config.rl_db_user,\n config.rl_db_pwd,\n config.rl_db_host,\n config.rl_db_port,\n config.rl_db_database)\n\n\ndef get_start_date(factor_name):\n destination = sa.create_engine(db_url)\n date = 20140101\n sql = \"\"\"select max(trade_date) as trade_date from factor_integrated_basic where factor_type='{0}';\"\"\".format(\n factor_name)\n trades_sets = pd.read_sql(sql, destination)\n if not trades_sets.empty:\n ts = trades_sets['trade_date'][0]\n if ts is not None:\n date = str(ts).replace('-', '')\n return date\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--start_date', type=int, default=20140101)\n parser.add_argument('--end_date', type=int, default=0)\n parser.add_argument('--factor_name', type=str, default='factor_sentiment') # factor_earning\n parser.add_argument('--update', type=bool, default=False)\n parser.add_argument('--schedule', type=bool, default=False)\n args = parser.parse_args()\n if args.end_date == 0:\n end_date = datetime.now().date().strftime('%Y%m%d')\n else:\n end_date = args.end_date\n calc_engine = CalcEngine('rl', db_url, args.factor_name)\n if args.update:\n calc_engine.local_run(args.start_date, end_date, args.factor_name)\n if args.schedule:\n start_date = get_start_date(args.factor_name)\n calc_engine.local_run(start_date, end_date, args.factor_name)\n"
] | [
[
"numpy.where",
"pandas.DataFrame"
],
[
"numpy.matrix",
"numpy.dot",
"numpy.log",
"numpy.sqrt",
"pandas.Series",
"pandas.DataFrame",
"numpy.sign",
"numpy.cov",
"numpy.prod",
"numpy.sum"
],
[
"pandas.read_sql"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
eigenfoo/Theano-PyMC | [
"d079273a9f8f8991923391eb7a83416bdd1b659e"
] | [
"theano/tensor/basic.py"
] | [
"\"\"\"A `Type` and `Op` classes to work with numpy.ndarrays symbolically.\"\"\"\n\nimport builtins\nimport logging\nimport numbers\nimport warnings\nfrom collections.abc import Sequence\nfrom functools import partial\n\nimport numpy as np\n\nimport theano\nimport theano.scalar.sharedvar\nfrom theano import compile, config, gof, printing\nfrom theano import scalar as scal\n\n# For history\nfrom theano.compile import Rebroadcast, Shape, shape\nfrom theano.gof import Apply, Constant, Op, ParamsType, Variable\nfrom theano.gof.type import Generic\n\n# We use these exceptions as well.\nfrom theano.gradient import DisconnectedType, grad_not_implemented, grad_undefined\nfrom theano.printing import min_informative_str, pprint\nfrom theano.scalar import int32\nfrom theano.tensor import elemwise\n\n# set up the external interface\nfrom theano.tensor.elemwise import CAReduce, DimShuffle, Elemwise, Sum\nfrom theano.tensor.type import TensorType, values_eq_approx_always_true\nfrom theano.tensor.type_other import NoneConst\nfrom theano.tensor.var import TensorConstant, TensorVariable, _tensor_py_operators\n\n\n_logger = logging.getLogger(\"theano.tensor.basic\")\n\n__docformat__ = \"restructuredtext en\"\n\n# Define common subsets of dtypes (as strings).\ncomplex_dtypes = list(map(str, scal.complex_types))\ncontinuous_dtypes = list(map(str, scal.continuous_types))\nfloat_dtypes = list(map(str, scal.float_types))\ninteger_dtypes = list(map(str, scal.integer_types))\ndiscrete_dtypes = list(map(str, scal.discrete_types))\nall_dtypes = list(map(str, scal.all_types))\nint_dtypes = list(map(str, scal.int_types))\nuint_dtypes = list(map(str, scal.uint_types))\n\n\nclass ShapeError(Exception):\n \"\"\"Raised when the shape cannot be computed.\"\"\"\n\n\ndef check_equal_numpy(x, y):\n \"\"\"\n Return True iff x and y are equal.\n\n Checks the dtype and shape if x and y are numpy.ndarray instances.\n\n \"\"\"\n if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):\n return x.dtype == y.dtype and x.shape == y.shape and np.all(abs(x - y) < 1e-10)\n elif isinstance(x, np.random.RandomState) and isinstance(y, np.random.RandomState):\n return builtins.all(\n np.all(a == b) for a, b in zip(x.__getstate__(), y.__getstate__())\n )\n else:\n return x == y\n\n\ncompile.register_checker(check_equal_numpy)\n\n\n__oplist_constructor_list = []\n\"\"\"List of functions to be listed as op constructors in the oplist\n(`gen_oplist`, doc/oplist.txt).\"\"\"\n\n\ndef constructor(f):\n \"\"\"Add `f` to :doc:`oplist`.\n\n Make `f` appear as a constructor in the oplist (`gen_oplist`,\n doc/oplist.txt).\n\n \"\"\"\n __oplist_constructor_list.append(f)\n return f\n\n\ndef __oplist_tag(thing, tag):\n tags = getattr(thing, \"__oplist_tags\", [])\n tags.append(tag)\n thing.__oplist_tags = tags\n\n\ndef as_tensor_variable(x, name=None, ndim=None):\n \"\"\"Convert `x` into the appropriate `TensorType`.\n\n This function is often used by `make_node` methods of `Op` subclasses to\n turn ndarrays, numbers, `Scalar` instances, `Apply` instances and\n `TensorType` instances into valid input list elements.\n\n Parameters\n ----------\n x : Apply instance, Variable instance, numpy.ndarray, or number\n This thing will be transformed into a `Variable` in a sensible way. An\n ndarray argument will not be copied, but a list of numbers will be\n copied to make an ndarray.\n name : str or None\n If a new `Variable` instance is created, it will be named with this\n string.\n ndim : None or integer\n Return a Variable with this many dimensions.\n\n Raises\n ------\n TypeError\n If `x` cannot be converted to a TensorType Variable.\n\n \"\"\"\n if (\n isinstance(getattr(x, \"type\", None), TensorType)\n and (name is None or x.name == name)\n and (ndim is None or x.ndim == ndim)\n ):\n return x\n\n if hasattr(x, \"_as_TensorVariable\"):\n return x._as_TensorVariable() # TODO: pass name and ndim arguments\n\n if isinstance(x, gof.Apply):\n # use Apply's default output mechanism\n if (x.op.default_output is None) and (len(x.outputs) != 1):\n raise TypeError(\n \"Multi-output Op encountered. \"\n \"Retry using only one of the outputs directly.\"\n )\n\n x = x.default_output()\n\n if isinstance(x, Variable):\n\n if isinstance(x, Constant):\n return as_tensor_variable(x.data, name=name, ndim=ndim)\n\n if isinstance(x.type, scal.Scalar):\n x = tensor_from_scalar(x)\n\n if not isinstance(x.type, TensorType):\n raise TypeError(\n \"Tensor type field must be a TensorType; found {}.\".format(type(x.type))\n )\n\n if ndim is None:\n return x\n else:\n if x.type.ndim > ndim:\n # strip off leading broadcastable dimensions\n first_non_broadcastable = [\n idx for idx in range(x.ndim) if not x.broadcastable[idx]\n ][0]\n x = x.dimshuffle(list(range(x.ndim))[first_non_broadcastable:])\n if x.ndim > ndim:\n raise ValueError(\n \"Tensor of type {} could not be cast to have {} dimensions\".format(\n x.type, ndim\n )\n )\n return x\n elif x.type.ndim < ndim:\n return shape_padleft(x, n_ones=(ndim - x.type.ndim))\n else:\n return x\n\n elif isinstance(x, Sequence):\n\n def extract_constants(i):\n if isinstance(i, Variable):\n if isinstance(i, Constant):\n return i.data\n else:\n raise TypeError\n else:\n return i\n\n try:\n x = [extract_constants(i) for i in x]\n except TypeError:\n return stack(x)\n\n elif isinstance(x, bool):\n raise TypeError(\n \"Cannot cast True or False as a tensor variable. Please use \"\n \"np.array(True) or np.array(False) if you need these constants. \"\n \"This error might be caused by using the == operator on \"\n \"Variables. v == w does not do what you think it does, \"\n \"use theano.tensor.eq(v, w) instead.\"\n )\n\n return constant(x, name=name, ndim=ndim)\n\n\n# this has a different name, because _as_tensor_variable is the\n# function which ops use to upcast their arguments... this\n# internal-use function is a good place to put debugging stuff, better\n# than the global astensor.\n_as_tensor_variable = as_tensor_variable\n\nas_tensor = as_tensor_variable\n\n\ndef constant(x, name=None, ndim=None, dtype=None):\n \"\"\"Return a `TensorConstant` with value `x`.\n\n Raises\n ------\n TypeError\n `x` could not be converted to a numpy.ndarray.\n ValueError\n `x` could not be expanded to have ndim dimensions.\n\n \"\"\"\n if isinstance(x, TensorConstant):\n if (\n (name is None or x.name == name)\n and (ndim is None or x.ndim == ndim)\n and (dtype is None or x.dtype == dtype)\n ):\n return x\n else:\n x = x.data\n\n x_ = scal.convert(x, dtype=dtype)\n\n if ndim is not None:\n if x_.ndim < ndim:\n x_ = np.expand_dims(x_, axis=tuple(range(ndim - x_.ndim)))\n elif x_.ndim > ndim:\n try:\n x_ = np.squeeze(x_, axis=tuple(range(x_.ndim - ndim)))\n except np.AxisError:\n raise ValueError(\n f\"ndarray could not be cast to constant with {int(ndim)} dimensions\"\n )\n\n assert x_.ndim == ndim\n\n ttype = TensorType(dtype=x_.dtype, broadcastable=[s == 1 for s in x_.shape])\n\n try:\n return TensorConstant(ttype, x_, name=name)\n except Exception:\n raise TypeError(f\"Could not convert {x} to TensorType\", type(x))\n\n\ndef _obj_is_wrappable_as_tensor(x):\n try:\n constant(x)\n return True\n except TypeError:\n return False\n\n\nif int(config.tensor.cmp_sloppy) > 1:\n # This config variable is a quick-and-dirty way to get low-precision\n # comparisons. For a more precise setting of these tolerances set\n # them explicitly in your user code by assigning, for example,\n # \"theano.tensor.basic.float32_atol = ...\"\n\n # When config.tensor.cmp_sloppy>1 we are even more sloppy. This is\n # useful to test the GPU as they don't use extended precision and\n # this cause some difference bigger then the normal sloppy.\n float16_atol = 1e-2\n float16_rtol = 5e-2\n\n float32_atol = 5e-4\n float32_rtol = 1e-3\n\n float64_rtol = 1e-4\n float64_atol = 1e-3\nelif int(config.tensor.cmp_sloppy):\n float16_atol = 5e-3\n float16_rtol = 1e-2\n\n float32_atol = 1e-4\n float32_rtol = 1e-3\n\n float64_rtol = 1e-4\n float64_atol = 1e-3\nelse:\n # If you change those value in test don't forget to put them back\n # when the test end. Don't forget the case when the test fail.\n float16_atol = 1e-3\n float16_rtol = 1e-3\n\n float32_atol = 1e-5\n float32_rtol = 1e-5\n\n # defaults in numpy.allclose\n # Don't be more strict then numpy rtol\n # It cause useless error.\n float64_rtol = 1.0000000000000001e-05\n float64_atol = 1e-8\n\n\ndef _get_atol_rtol(a, b):\n tiny = (\"float16\",)\n narrow = (\"float32\", \"complex64\")\n if (str(a.dtype) in tiny) or (str(b.dtype) in tiny):\n atol = float16_atol\n rtol = float16_rtol\n elif (str(a.dtype) in narrow) or (str(b.dtype) in narrow):\n atol = float32_atol\n rtol = float32_rtol\n else:\n atol = float64_atol\n rtol = float64_rtol\n return atol, rtol\n\n\ndef _allclose(a, b, rtol=None, atol=None):\n a = np.asarray(a)\n b = np.asarray(b)\n atol_, rtol_ = _get_atol_rtol(a, b)\n if rtol is not None:\n rtol_ = rtol\n if atol is not None:\n atol_ = atol\n\n return np.allclose(a, b, atol=atol_, rtol=rtol_)\n\n\nclass NotScalarConstantError(Exception):\n \"\"\"\n Raised by get_scalar_constant_value if called on something that is\n not a scalar constant.\n \"\"\"\n\n\nclass EmptyConstantError(NotScalarConstantError):\n \"\"\"\n Raised by get_scalar_const_value if called on something that is a\n zero dimensional constant.\n \"\"\"\n\n\ndef numpy_scalar(data):\n \"\"\"Return a scalar stored in a numpy ndarray.\n\n Raises\n ------\n NotScalarConstantError\n If the numpy ndarray is not a scalar.\n\n \"\"\"\n\n # handle case where data is numpy.array([])\n if data.ndim > 0 and (len(data.shape) == 0 or builtins.max(data.shape) == 0):\n assert np.all(np.array([]) == data)\n raise EmptyConstantError()\n try:\n np.complex(data) # works for all numeric scalars\n return data\n except Exception:\n raise NotScalarConstantError(\n \"v.data is non-numeric, non-scalar, or has more than one\" \" unique value\",\n data,\n )\n\n\nget_scalar_constant_value_elemwises = (\n scal.Cast,\n scal.Switch,\n scal.NEQ,\n scal.EQ,\n scal.LT,\n scal.GT,\n scal.LE,\n scal.GE,\n scal.Sub,\n scal.Add,\n scal.Mod,\n scal.Mul,\n scal.IntDiv,\n scal.TrueDiv,\n scal.Minimum,\n scal.Maximum,\n)\n\n\ndef get_scalar_constant_value(\n orig_v, elemwise=True, only_process_constants=False, max_recur=10\n):\n \"\"\"Return the constant scalar(0-D) value underlying variable `v`.\n\n If `v` is the output of dimshuffles, fills, allocs, rebroadcasts,\n cast, OutputGuard, DeepCopyOp, ScalarFromTensor, ScalarOp, Elemwise\n and some pattern with Subtensor, this function digs through them.\n\n If `v` is not some view of constant scalar data, then raise a\n NotScalarConstantError.\n\n Parameters\n ----------\n elemwise : bool\n If False, we won't try to go into elemwise. So this call is faster.\n But we still investigate in Second Elemwise (as this is a substitute\n for Alloc)\n only_process_constants : bool\n If True, we only attempt to obtain the value of `orig_v` if it's\n directly constant and don't try to dig through dimshuffles, fills,\n allocs, and other to figure out its value.\n max_recur : int\n The maximum number of recursion.\n\n Notes\n -----\n There may be another function similar to this one in the code,\n but I'm not sure where it is.\n\n \"\"\"\n v = orig_v\n while True:\n if v is None:\n # None is not a scalar (and many uses of this function seem\n # to depend on passing it None)\n raise NotScalarConstantError()\n\n if isinstance(v, (np.integer, int, float)):\n return np.asarray(v)\n\n if isinstance(v, np.ndarray):\n return numpy_scalar(v).copy()\n\n if isinstance(v, Constant):\n if getattr(v.tag, \"unique_value\", None) is not None:\n data = v.tag.unique_value\n else:\n data = v.data\n if isinstance(data, np.ndarray):\n return numpy_scalar(data).copy()\n else:\n return data\n\n if not only_process_constants and getattr(v, \"owner\", None) and max_recur > 0:\n max_recur -= 1\n if isinstance(\n v.owner.op,\n (\n Alloc,\n DimShuffle,\n Rebroadcast,\n # outputguard is only used in debugmode but we\n # keep it here to avoid problems with old pickels.\n compile.ops.OutputGuard,\n compile.DeepCopyOp,\n ),\n ):\n v = v.owner.inputs[0]\n continue\n elif isinstance(v.owner.op, theano.compile.ops.Shape_i):\n i = v.owner.op.i\n inp = v.owner.inputs[0]\n if isinstance(inp, Constant):\n return np.asarray(inp.data.shape[i])\n # The shape of a broadcastable dimension is 1\n if hasattr(inp.type, \"broadcastable\") and inp.type.broadcastable[i]:\n return np.asarray(1)\n\n # Don't act as the constant_folding optimization here as this\n # fct is used too early in the optimization phase. This would\n # mess with the stabilization optimization and be too slow.\n # We put all the scalar Ops used by get_canonical_form_slice()\n # to allow it to determine the broadcast pattern correctly.\n elif isinstance(v.owner.op, (ScalarFromTensor, TensorFromScalar)):\n v = v.owner.inputs[0]\n continue\n elif isinstance(v.owner.op, theano.tensor.opt.Assert):\n # check if all conditions are constant and true\n cond = [\n get_scalar_constant_value(c, max_recur=max_recur)\n for c in v.owner.inputs[1:]\n ]\n if builtins.all([0 == c.ndim and c != 0 for c in cond]):\n v = v.owner.inputs[0]\n continue\n elif isinstance(v.owner.op, scal.ScalarOp):\n if isinstance(v.owner.op, scal.Second):\n # We don't need both input to be constant for second\n shp, val = v.owner.inputs\n v = val\n continue\n if isinstance(v.owner.op, get_scalar_constant_value_elemwises):\n const = [\n get_scalar_constant_value(i, max_recur=max_recur)\n for i in v.owner.inputs\n ]\n ret = [[None]]\n v.owner.op.perform(v.owner, const, ret)\n return ret[0][0].copy()\n # In fast_compile, we don't enable local_fill_to_alloc, so\n # we need to investigate Second as Alloc. So elemwise\n # don't disable the check for Second.\n elif isinstance(v.owner.op, Elemwise):\n if isinstance(v.owner.op.scalar_op, scal.Second):\n # We don't need both input to be constant for second\n shp, val = v.owner.inputs\n v = val\n continue\n elif elemwise and isinstance(\n v.owner.op.scalar_op, get_scalar_constant_value_elemwises\n ):\n const = [\n get_scalar_constant_value(i, max_recur=max_recur)\n for i in v.owner.inputs\n ]\n ret = [[None]]\n v.owner.op.perform(v.owner, const, ret)\n return ret[0][0].copy()\n elif (\n isinstance(v.owner.op, theano.tensor.subtensor.Subtensor)\n and v.ndim == 0\n ):\n if isinstance(v.owner.inputs[0], TensorConstant):\n cdata = tuple(v.owner.op.get_constant_idx(v.owner.inputs))\n try:\n return v.owner.inputs[0].data.__getitem__(cdata).copy()\n except IndexError:\n raise IndexError(\n str(tuple(v.owner.op.idx_list))\n + \" is not a valid index into \"\n + str(v.owner.inputs[0].data)\n )\n\n # The index list 'idx_list' should have length the same\n # shape as the input.\n # TODO: implement the case where we take a scalar in a matrix\n assert len(v.owner.op.idx_list) == v.owner.inputs[0].ndim\n\n # Needed to make better graph in this test in\n # theano/tensor/tests/test_sharedvar.py:\n # test_shared_options.test_specify_shape_partial\n if (\n v.owner.inputs[0].owner\n and isinstance(v.owner.inputs[0].owner.op, Join)\n and len(v.owner.op.idx_list) == 1\n ):\n # Ensure the Join is joining only scalar variables (so that\n # the constant value can be found at the same index as the\n # one used in the sub-tensor).\n if builtins.all(\n var.ndim == 0 for var in v.owner.inputs[0].owner.inputs[1:]\n ):\n idx = v.owner.op.idx_list[0]\n if isinstance(idx, gof.Type):\n idx = get_scalar_constant_value(\n v.owner.inputs[1], max_recur=max_recur\n )\n # Note the '+ 1' is because the first argument to Join\n # is the axis.\n ret = v.owner.inputs[0].owner.inputs[idx + 1]\n ret = get_scalar_constant_value(ret, max_recur=max_recur)\n # join can cast implicitly its input in some case.\n return theano._asarray(ret, dtype=v.type.dtype)\n if builtins.all(\n var.ndim == 1 for var in v.owner.inputs[0].owner.inputs[1:]\n ):\n idx = v.owner.op.idx_list[0]\n if isinstance(idx, gof.Type):\n idx = get_scalar_constant_value(\n v.owner.inputs[1], max_recur=max_recur\n )\n try:\n # TODO: assert joined axis is 0.\n length = 0\n loop = False\n for joined in v.owner.inputs[0].owner.inputs[1:]:\n ll = get_vector_length(joined)\n if idx < length + ll:\n v = joined[idx - length]\n loop = True\n break\n length += ll\n if loop:\n continue\n except TypeError:\n pass\n except ValueError:\n pass\n\n elif (\n v.owner.inputs[0].owner\n and isinstance(\n v.owner.inputs[0].owner.op, theano.tensor.opt.MakeVector\n )\n and\n # MakeVector normally accept only scalar as input.\n # We put this check in case there is change in the future\n builtins.all(\n var.ndim == 0 for var in v.owner.inputs[0].owner.inputs\n )\n and len(v.owner.op.idx_list) == 1\n ):\n\n idx = v.owner.op.idx_list[0]\n if isinstance(idx, gof.Type):\n idx = get_scalar_constant_value(\n v.owner.inputs[1], max_recur=max_recur\n )\n # Python 2.4 does not support indexing with numpy.integer\n # So we cast it.\n idx = int(idx)\n ret = v.owner.inputs[0].owner.inputs[idx]\n ret = get_scalar_constant_value(ret, max_recur=max_recur)\n # MakeVector can cast implicitly its input in some case.\n return theano._asarray(ret, dtype=v.type.dtype)\n\n # This is needed when we take the grad as the Shape op\n # are not already changed into MakeVector\n owner = v.owner\n leftmost_parent = owner.inputs[0]\n if leftmost_parent.owner and isinstance(\n leftmost_parent.owner.op, theano.tensor.Shape\n ):\n op = owner.op\n idx_list = op.idx_list\n idx = idx_list[0]\n if isinstance(idx, gof.Type):\n idx = get_scalar_constant_value(\n owner.inputs[1], max_recur=max_recur\n )\n grandparent = leftmost_parent.owner.inputs[0]\n gp_broadcastable = grandparent.type.broadcastable\n ndim = grandparent.type.ndim\n if grandparent.owner and isinstance(\n grandparent.owner.op, Rebroadcast\n ):\n ggp_broadcastable = grandparent.owner.inputs[0].broadcastable\n l = [\n b1 or b2\n for b1, b2 in zip(ggp_broadcastable, gp_broadcastable)\n ]\n gp_broadcastable = tuple(l)\n\n assert ndim == len(gp_broadcastable)\n\n if not (idx < len(gp_broadcastable)):\n msg = (\n \"get_scalar_constant_value detected \"\n f\"deterministic IndexError: x.shape[{int(idx)}] \"\n f\"when x.ndim={int(ndim)}.\"\n )\n if config.exception_verbosity == \"high\":\n msg += f\" x={min_informative_str(v)}\"\n else:\n msg += f\" x={v}\"\n raise ValueError(msg)\n\n if gp_broadcastable[idx]:\n return np.asarray(1)\n\n raise NotScalarConstantError(v)\n\n\n# Easy constructors\n\n\ndef tensor(*args, **kwargs):\n name = kwargs.pop(\"name\", None)\n return TensorType(*args, **kwargs)(name=name)\n\n\ndef _multi(*fns):\n def f2(f, *names):\n if names and isinstance(names[0], int):\n if names == 1:\n return f()\n else:\n return [f() for i in range(names[0])]\n if isinstance(names, tuple):\n if len(names) == 1:\n names = names[0]\n if len(names) == 1:\n return f(names)\n else:\n return [f(name) for name in names]\n\n if len(fns) == 1:\n return partial(f2, fns)\n else:\n return [partial(f2, f) for f in fns]\n\n\ncscalar = TensorType(\"complex64\", ())\nzscalar = TensorType(\"complex128\", ())\nfscalar = TensorType(\"float32\", ())\ndscalar = TensorType(\"float64\", ())\nbscalar = TensorType(\"int8\", ())\nwscalar = TensorType(\"int16\", ())\niscalar = TensorType(\"int32\", ())\nlscalar = TensorType(\"int64\", ())\n\n\ndef scalar(name=None, dtype=None):\n \"\"\"Return a symbolic scalar variable.\n\n Parameters\n ----------\n dtype: numeric\n None means to use theano.config.floatX.\n name\n A name to attach to this variable.\n\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n type = TensorType(dtype, ())\n return type(name)\n\n\nscalars, fscalars, dscalars, iscalars, lscalars = _multi(\n scalar, fscalar, dscalar, iscalar, lscalar\n)\n\nint_types = bscalar, wscalar, iscalar, lscalar\nfloat_types = fscalar, dscalar\ncomplex_types = cscalar, zscalar\nint_scalar_types = int_types\nfloat_scalar_types = float_types\ncomplex_scalar_types = complex_types\n\ncvector = TensorType(\"complex64\", (False,))\nzvector = TensorType(\"complex128\", (False,))\nfvector = TensorType(\"float32\", (False,))\ndvector = TensorType(\"float64\", (False,))\nbvector = TensorType(\"int8\", (False,))\nwvector = TensorType(\"int16\", (False,))\nivector = TensorType(\"int32\", (False,))\nlvector = TensorType(\"int64\", (False,))\n\n\ndef vector(name=None, dtype=None):\n \"\"\"Return a symbolic vector variable.\n\n Parameters\n ----------\n dtype: numeric\n None means to use theano.config.floatX.\n name\n A name to attach to this variable\n\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n type = TensorType(dtype, (False,))\n return type(name)\n\n\nvectors, fvectors, dvectors, ivectors, lvectors = _multi(\n vector, fvector, dvector, ivector, lvector\n)\n\nint_vector_types = bvector, wvector, ivector, lvector\nfloat_vector_types = fvector, dvector\ncomplex_vector_types = cvector, zvector\n\ncmatrix = TensorType(\"complex64\", (False, False))\nzmatrix = TensorType(\"complex128\", (False, False))\nfmatrix = TensorType(\"float32\", (False, False))\ndmatrix = TensorType(\"float64\", (False, False))\nbmatrix = TensorType(\"int8\", (False, False))\nwmatrix = TensorType(\"int16\", (False, False))\nimatrix = TensorType(\"int32\", (False, False))\nlmatrix = TensorType(\"int64\", (False, False))\n\n\ndef matrix(name=None, dtype=None):\n \"\"\"Return a symbolic matrix variable.\n\n Parameters\n ----------\n dtype: numeric\n None means to use theano.config.floatX.\n name\n A name to attach to this variable.\n\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n type = TensorType(dtype, (False, False))\n return type(name)\n\n\nmatrices, fmatrices, dmatrices, imatrices, lmatrices = _multi(\n matrix, fmatrix, dmatrix, imatrix, lmatrix\n)\n\nint_matrix_types = bmatrix, wmatrix, imatrix, lmatrix\nfloat_matrix_types = fmatrix, dmatrix\ncomplex_matrix_types = cmatrix, zmatrix\n\ncrow = TensorType(\"complex64\", (True, False))\nzrow = TensorType(\"complex128\", (True, False))\nfrow = TensorType(\"float32\", (True, False))\ndrow = TensorType(\"float64\", (True, False))\nbrow = TensorType(\"int8\", (True, False))\nwrow = TensorType(\"int16\", (True, False))\nirow = TensorType(\"int32\", (True, False))\nlrow = TensorType(\"int64\", (True, False))\n\n\ndef row(name=None, dtype=None):\n \"\"\"Return a symbolic row variable (ndim=2, broadcastable=[True,False]).\n\n Parameters\n ----------\n dtype: numeric type\n None means to use theano.config.floatX.\n name\n A name to attach to this variable.\n\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n type = TensorType(dtype, (True, False))\n return type(name)\n\n\nrows, frows, drows, irows, lrows = _multi(row, frow, drow, irow, lrow)\n\nccol = TensorType(\"complex64\", (False, True))\nzcol = TensorType(\"complex128\", (False, True))\nfcol = TensorType(\"float32\", (False, True))\ndcol = TensorType(\"float64\", (False, True))\nbcol = TensorType(\"int8\", (False, True))\nwcol = TensorType(\"int16\", (False, True))\nicol = TensorType(\"int32\", (False, True))\nlcol = TensorType(\"int64\", (False, True))\n\n\ndef col(name=None, dtype=None):\n \"\"\"Return a symbolic column variable (ndim=2, broadcastable=[False,True]).\n\n Parameters\n ----------\n dtype : numeric\n None means to use theano.config.floatX.\n name\n A name to attach to this variable.\n\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n type = TensorType(dtype, (False, True))\n return type(name)\n\n\ncols, fcols, dcols, icols, lcols = _multi(col, fcol, dcol, icol, lcol)\n\nctensor3 = TensorType(\"complex64\", ((False,) * 3))\nztensor3 = TensorType(\"complex128\", ((False,) * 3))\nftensor3 = TensorType(\"float32\", ((False,) * 3))\ndtensor3 = TensorType(\"float64\", ((False,) * 3))\nbtensor3 = TensorType(\"int8\", ((False,) * 3))\nwtensor3 = TensorType(\"int16\", ((False,) * 3))\nitensor3 = TensorType(\"int32\", ((False,) * 3))\nltensor3 = TensorType(\"int64\", ((False,) * 3))\n\n\ndef tensor3(name=None, dtype=None):\n \"\"\"Return a symbolic 3-D variable.\n\n Parameters\n ----------\n dtype: numeric type\n None means to use theano.config.floatX.\n name\n A name to attach to this variable.\n\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n type = TensorType(dtype, (False, False, False))\n return type(name)\n\n\ntensor3s, ftensor3s, dtensor3s, itensor3s, ltensor3s = _multi(\n tensor3, ftensor3, dtensor3, itensor3, ltensor3\n)\n\nctensor4 = TensorType(\"complex64\", ((False,) * 4))\nztensor4 = TensorType(\"complex128\", ((False,) * 4))\nftensor4 = TensorType(\"float32\", ((False,) * 4))\ndtensor4 = TensorType(\"float64\", ((False,) * 4))\nbtensor4 = TensorType(\"int8\", ((False,) * 4))\nwtensor4 = TensorType(\"int16\", ((False,) * 4))\nitensor4 = TensorType(\"int32\", ((False,) * 4))\nltensor4 = TensorType(\"int64\", ((False,) * 4))\n\n\ndef tensor4(name=None, dtype=None):\n \"\"\"Return a symbolic 4-D variable.\n\n Parameters\n ----------\n dtype: numeric type\n None means to use theano.config.floatX.\n name\n A name to attach to this variable.\n\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n type = TensorType(dtype, (False, False, False, False))\n return type(name)\n\n\ntensor4s, ftensor4s, dtensor4s, itensor4s, ltensor4s = _multi(\n tensor4, ftensor4, dtensor4, itensor4, ltensor4\n)\n\nctensor5 = TensorType(\"complex64\", ((False,) * 5))\nztensor5 = TensorType(\"complex128\", ((False,) * 5))\nftensor5 = TensorType(\"float32\", ((False,) * 5))\ndtensor5 = TensorType(\"float64\", ((False,) * 5))\nbtensor5 = TensorType(\"int8\", ((False,) * 5))\nwtensor5 = TensorType(\"int16\", ((False,) * 5))\nitensor5 = TensorType(\"int32\", ((False,) * 5))\nltensor5 = TensorType(\"int64\", ((False,) * 5))\n\n\ndef tensor5(name=None, dtype=None):\n \"\"\"Return a symbolic 5-D variable.\n\n Parameters\n ----------\n dtype: numeric type\n None means to use theano.config.floatX.\n name\n A name to attach to this variable.\n\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n type = TensorType(dtype, (False, False, False, False, False))\n return type(name)\n\n\ntensor5s, ftensor5s, dtensor5s, itensor5s, ltensor5s = _multi(\n tensor5, ftensor5, dtensor5, itensor5, ltensor5\n)\n\nctensor6 = TensorType(\"complex64\", ((False,) * 6))\nztensor6 = TensorType(\"complex128\", ((False,) * 6))\nftensor6 = TensorType(\"float32\", ((False,) * 6))\ndtensor6 = TensorType(\"float64\", ((False,) * 6))\nbtensor6 = TensorType(\"int8\", ((False,) * 6))\nwtensor6 = TensorType(\"int16\", ((False,) * 6))\nitensor6 = TensorType(\"int32\", ((False,) * 6))\nltensor6 = TensorType(\"int64\", ((False,) * 6))\n\n\ndef tensor6(name=None, dtype=None):\n \"\"\"Return a symbolic 6-D variable.\n\n Parameters\n ----------\n dtype: numeric type\n None means to use theano.config.floatX.\n name\n A name to attach to this variable.\n\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n type = TensorType(dtype, (False,) * 6)\n return type(name)\n\n\ntensor6s, ftensor6s, dtensor6s, itensor6s, ltensor6s = _multi(\n tensor6, ftensor6, dtensor6, itensor6, ltensor6\n)\n\nctensor7 = TensorType(\"complex64\", ((False,) * 7))\nztensor7 = TensorType(\"complex128\", ((False,) * 7))\nftensor7 = TensorType(\"float32\", ((False,) * 7))\ndtensor7 = TensorType(\"float64\", ((False,) * 7))\nbtensor7 = TensorType(\"int8\", ((False,) * 7))\nwtensor7 = TensorType(\"int16\", ((False,) * 7))\nitensor7 = TensorType(\"int32\", ((False,) * 7))\nltensor7 = TensorType(\"int64\", ((False,) * 7))\n\n\ndef tensor7(name=None, dtype=None):\n \"\"\"Return a symbolic 7-D variable.\n\n Parameters\n ----------\n dtype: numeric type\n None means to use theano.config.floatX.\n name\n A name to attach to this variable.\n\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n type = TensorType(dtype, (False,) * 7)\n return type(name)\n\n\ntensor7s, ftensor7s, dtensor7s, itensor7s, ltensor7s = _multi(\n tensor7, ftensor7, dtensor7, itensor7, ltensor7\n)\n\n\nTensor = TensorType\n\n\n# This bizarre push-import avoids a circular dependency.\nelemwise.as_tensor_variable = as_tensor_variable\nelemwise.TensorType = TensorType\nelemwise.TensorVariable = TensorVariable\nelemwise.TensorConstant = TensorConstant\n\n#########################\n# Utilities\n#########################\n\n\ndef _scal_elemwise_with_nfunc(nfunc, nin, nout):\n \"\"\"\n Replace a symbol definition with an elementwise version of the\n corresponding scalar Op. If it is not None, the nfunc argument\n should be a string such that getattr(numpy, nfunc) implements\n a vectorized version of the elemwise operation. nin is the number\n of inputs expected by that function, and nout is the number of\n **destination** inputs it takes. That is, the function should\n take nin+nout inputs. nout == 0 means that the numpy function\n does not take a numpy array argument to put its result in.\n\n \"\"\"\n\n def construct(symbol):\n symbolname = symbol.__name__\n inplace = symbolname.endswith(\"_inplace\")\n if inplace:\n msg = \"inplace\"\n else:\n msg = \"no_inplace\"\n\n n = f\"Elemwise{{{symbolname},{msg}}}\"\n\n if inplace:\n scalar_op = getattr(scal, symbolname[: -len(\"_inplace\")])\n inplace_scalar_op = scalar_op.__class__(scal.transfer_type(0))\n rval = elemwise.Elemwise(\n inplace_scalar_op,\n {0: 0},\n name=n,\n nfunc_spec=(nfunc and (nfunc, nin, nout)),\n )\n else:\n scalar_op = getattr(scal, symbolname)\n rval = elemwise.Elemwise(\n scalar_op, name=n, nfunc_spec=(nfunc and (nfunc, nin, nout))\n )\n\n if getattr(symbol, \"__doc__\", False):\n rval.__doc__ = symbol.__doc__ + \"\\n\" + rval.__doc__\n\n # for the meaning of this see the ./epydoc script\n # it makes epydoc display rval as if it were a function, not an object\n rval.__epydoc_asRoutine = symbol\n rval.__module__ = \"tensor\"\n\n pprint.assign(rval, printing.FunctionPrinter(symbolname))\n\n return rval\n\n return construct\n\n\n_scal_elemwise = _scal_elemwise_with_nfunc(None, None, None)\n\n\ndef _pack(x):\n \"\"\"\n Convert x to a list if it is an iterable, otherwise wrap it in a list.\n \"\"\"\n try:\n return list(x)\n except TypeError:\n return [x]\n\n\ndef check_and_normalize_axes(x, axis):\n \"\"\"\n Check axes, normalize and convert them to a Python list of integers.\n Return an empty list if argument is None.\n\n Parameters\n ----------\n x: Tensor variable\n axis = Integer, tuple or list of integers\n\n Returns\n -------\n axis: list of integers\n \"\"\"\n x = as_tensor_variable(x)\n if axis is None:\n axis = []\n elif isinstance(axis, (int, np.integer)) or (\n isinstance(axis, np.ndarray) and axis.ndim == 0\n ):\n axis = [int(axis)]\n elif isinstance(axis, (tuple, list, np.ndarray)):\n axis = [int(i) for i in axis]\n elif isinstance(axis, Variable):\n if NoneConst.equals(axis):\n axis = []\n elif not isinstance(axis, TensorConstant):\n raise TypeError(f\"Computation needs a constant axis. Got {axis}\")\n else:\n assert axis.dtype in integer_dtypes\n if isinstance(axis.data, (int, np.integer)) or (\n isinstance(axis.data, np.ndarray) and axis.data.ndim == 0\n ):\n axis = [int(axis.data)]\n elif isinstance(axis.data, (list, np.ndarray)):\n axis = [int(i) for i in axis.data]\n else:\n raise TypeError(\n f\"Axis must be an integer, tuple, list of integers or a TensorVariable. Got {axis}\"\n )\n if len(axis) > 0:\n for i in range(len(axis)):\n if axis[i] < 0:\n axis[i] += x.type.ndim\n if axis[i] < 0 or axis[i] >= x.type.ndim:\n raise ValueError(\n f\"Computation needs a valid axis number for {int(x.type.ndim)}-D tensor. Got {int(axis[i])}\"\n )\n axis = list(set(axis))\n axis.sort()\n return axis\n\n\n#########################\n# Casting Operations\n#########################\n\n\nclass TensorFromScalar(Op):\n\n __props__ = ()\n\n def make_node(self, s):\n assert isinstance(s.type, scal.Scalar)\n return Apply(self, [s], [tensor(dtype=s.type.dtype, broadcastable=())])\n\n def perform(self, node, inp, out_):\n (s,) = inp\n (out,) = out_\n out[0] = np.asarray(s)\n\n def infer_shape(self, node, in_shapes):\n return [()]\n\n def grad(self, inp, grads):\n (s,) = inp\n (dt,) = grads\n if s.type.dtype in float_dtypes:\n assert dt.type.dtype in float_dtypes\n return [scalar_from_tensor(dt)]\n\n # If the input dtype is an integer, then so is the output dtype,\n # and the \"zero\" gradient can be represented in that int dtype.\n # Currently, theano.grad insists that the dtype of the returned\n # gradient has a float dtype, so we use floatX.\n if s.type.dtype in discrete_dtypes:\n return [s.zeros_like().astype(theano.config.floatX)]\n\n raise NotImplementedError(\"grad not implemented for complex dtypes\")\n\n\ntensor_from_scalar = TensorFromScalar()\n\n\nclass ScalarFromTensor(Op):\n\n __props__ = ()\n\n def make_node(self, t):\n assert isinstance(t.type, TensorType)\n assert t.type.broadcastable == ()\n return Apply(\n self, [t], [scal.get_scalar_type(dtype=t.type.dtype).make_variable()]\n )\n\n def perform(self, node, inp, out_):\n (s,) = inp\n (out,) = out_\n out[0] = s.flatten()[0]\n\n def infer_shape(self, node, in_shapes):\n return [()]\n\n def grad(self, inp, grads):\n (s,) = inp\n (dt,) = grads\n return [tensor_from_scalar(dt)]\n\n def R_op(self, inputs, eval_points):\n if None in eval_points:\n return [None]\n return self.make_node(*eval_points).outputs\n\n def c_code(self, node, name, inputs, outputs, sub):\n (x,) = inputs\n (z,) = outputs\n fail = sub[\"fail\"]\n return (\n \"\"\"\n %(z)s = ((dtype_%(x)s*)(PyArray_DATA(%(x)s)))[0];\n \"\"\"\n % locals()\n )\n\n def c_code_cache_version(self):\n return (1,)\n\n\nscalar_from_tensor = ScalarFromTensor()\n\n\n# to be removed as we get the epydoc routine-documenting thing going\n# -JB 20080924\ndef _conversion(real_value, name):\n __oplist_tag(real_value, \"casting\")\n real_value.__module__ = \"tensor.basic\"\n pprint.assign(real_value, printing.FunctionPrinter(name))\n return real_value\n\n\n# These _conver_to_<type> functions have leading underscores to indicate that\n# they should not be called directly. They do not perform sanity checks about\n# what types you are casting to what. That logic is implemented by the\n# `cast()` function below.\n\n_convert_to_bool = _conversion(elemwise.Elemwise(scal.convert_to_bool), \"bool\")\n\"\"\"Cast to boolean\"\"\"\n\n_convert_to_int8 = _conversion(elemwise.Elemwise(scal.convert_to_int8), \"int8\")\n\"\"\"Cast to 8-bit integer\"\"\"\n\n_convert_to_int16 = _conversion(elemwise.Elemwise(scal.convert_to_int16), \"int16\")\n\"\"\"Cast to 16-bit integer\"\"\"\n\n_convert_to_int32 = _conversion(elemwise.Elemwise(scal.convert_to_int32), \"int32\")\n\"\"\"Cast to 32-bit integer\"\"\"\n\n_convert_to_int64 = _conversion(elemwise.Elemwise(scal.convert_to_int64), \"int64\")\n\"\"\"Cast to 64-bit integer\"\"\"\n\n_convert_to_uint8 = _conversion(elemwise.Elemwise(scal.convert_to_uint8), \"uint8\")\n\"\"\"Cast to unsigned 8-bit integer\"\"\"\n\n_convert_to_uint16 = _conversion(elemwise.Elemwise(scal.convert_to_uint16), \"uint16\")\n\"\"\"Cast to unsigned 16-bit integer\"\"\"\n\n_convert_to_uint32 = _conversion(elemwise.Elemwise(scal.convert_to_uint32), \"uint32\")\n\"\"\"Cast to unsigned 32-bit integer\"\"\"\n\n_convert_to_uint64 = _conversion(elemwise.Elemwise(scal.convert_to_uint64), \"uint64\")\n\"\"\"Cast to unsigned 64-bit integer\"\"\"\n\n_convert_to_float16 = _conversion(elemwise.Elemwise(scal.convert_to_float16), \"float16\")\n\"\"\"Cast to half-precision floating point\"\"\"\n\n_convert_to_float32 = _conversion(elemwise.Elemwise(scal.convert_to_float32), \"float32\")\n\"\"\"Cast to single-precision floating point\"\"\"\n\n_convert_to_float64 = _conversion(elemwise.Elemwise(scal.convert_to_float64), \"float64\")\n\"\"\"Cast to double-precision floating point\"\"\"\n\n_convert_to_complex64 = _conversion(\n elemwise.Elemwise(scal.convert_to_complex64), \"complex64\"\n)\n\"\"\"Cast to single-precision complex\"\"\"\n\n_convert_to_complex128 = _conversion(\n elemwise.Elemwise(scal.convert_to_complex128), \"complex128\"\n)\n\"\"\"Cast to double-precision complex\"\"\"\n\n_cast_mapping = {\n \"bool\": _convert_to_bool,\n \"int8\": _convert_to_int8,\n \"int16\": _convert_to_int16,\n \"int32\": _convert_to_int32,\n \"int64\": _convert_to_int64,\n \"uint8\": _convert_to_uint8,\n \"uint16\": _convert_to_uint16,\n \"uint32\": _convert_to_uint32,\n \"uint64\": _convert_to_uint64,\n \"float16\": _convert_to_float16,\n \"float32\": _convert_to_float32,\n \"float64\": _convert_to_float64,\n \"complex64\": _convert_to_complex64,\n \"complex128\": _convert_to_complex128,\n}\n\n\n@constructor\ndef cast(x, dtype):\n \"\"\"Symbolically cast `x` to a Tensor of type `dtype`.\"\"\"\n if dtype == \"floatX\":\n dtype = config.floatX\n\n _x = as_tensor_variable(x)\n if _x.type.dtype == dtype:\n return _x\n if _x.type.dtype.startswith(\"complex\") and not dtype.startswith(\"complex\"):\n raise TypeError(\n \"Casting from complex to real is ambiguous: consider real(), \"\n \"imag(), angle() or abs()\"\n )\n return _cast_mapping[dtype](x)\n\n\n##########################\n# Unary Operations\n##########################\n\n\nclass MaxAndArgmax(Op):\n \"\"\"\n Calculate the max and argmax over a given axis or over all axes.\n\n \"\"\"\n\n nin = 2 # tensor, axis\n nout = 2 # max val, max idx\n E_axis = \"invalid axis\"\n params_type = Generic()\n __props__ = (\"axis\",)\n _f16_ok = True\n\n def __init__(self, axis):\n assert isinstance(axis, list)\n self.axis = tuple(axis)\n\n def get_params(self, node):\n return self.axis\n\n def make_node(self, x):\n x = _as_tensor_variable(x)\n\n # We keep the original broadcastable flags for dimensions on which\n # we do not perform the max / argmax.\n all_axes = set(self.axis)\n broadcastable = [\n b for i, b in enumerate(x.type.broadcastable) if i not in all_axes\n ]\n inputs = [x]\n outputs = [\n tensor(x.type.dtype, broadcastable, name=\"max\"),\n tensor(\"int64\", broadcastable, name=\"argmax\"),\n ]\n return Apply(self, inputs, outputs)\n\n def perform(self, node, inp, outs, params):\n x = inp[0]\n axes = params\n max, max_idx = outs\n if axes is None:\n axes = tuple(range(x.ndim))\n else:\n axes = tuple(int(ax) for ax in axes)\n max[0] = theano._asarray(np.max(x, axes), dtype=node.outputs[0].dtype)\n # Numpy does not support multiple axes for argmax\n # Work around\n keep_axes = np.array([i for i in range(x.ndim) if i not in axes], dtype=\"int64\")\n # Not-reduced axes in front\n transposed_x = np.transpose(x, np.concatenate((keep_axes, axes)))\n kept_shape = transposed_x.shape[: len(keep_axes)]\n reduced_shape = transposed_x.shape[len(keep_axes) :]\n\n # Numpy.prod returns 1.0 when arg is empty, so we cast it to int64\n # Otherwise reshape would complain citing float arg\n new_shape = kept_shape + (np.prod(reduced_shape, dtype=\"int64\"),)\n reshaped_x = transposed_x.reshape(new_shape)\n\n max_idx[0] = theano._asarray(np.argmax(reshaped_x, axis=-1), dtype=\"int64\")\n\n def c_code(self, node, name, inp, out, sub):\n if len(self.axis) != 1 and len(self.axis) != node.inputs[0].ndim:\n raise NotImplementedError(\n \"NumPy C-API can compute max and argmax only for 1 axis or for all axes.\"\n )\n x = inp[0]\n axis = sub[\"params\"]\n max, argmax = out\n fail = sub[\"fail\"]\n ret = \"\"\"\n #if PY_MAJOR_VERSION >= 3\n #ifndef PyInt_AS_LONG\n #define PyInt_AS_LONG PyLong_AS_LONG\n #endif\n #endif\n\n int axis;\n\n if (PyTuple_GET_SIZE(%(axis)s) == PyArray_NDIM(%(x)s)) {\n axis = NPY_MAXDIMS;\n } else if(PyTuple_GET_SIZE(%(axis)s) == 1) {\n PyObject* axis_object = PyTuple_GET_ITEM(%(axis)s, 0);\n axis = (int)PyInt_AS_LONG(axis_object);\n if (axis > PyArray_NDIM(%(x)s)-1 || axis < -PyArray_NDIM(%(x)s)) {\n PyErr_SetString(PyExc_ValueError,\n \"MaxAndArgmax: bad axis argument\");\n %(fail)s\n }\n } else {\n PyErr_SetString(PyExc_NotImplementedError,\n \"MaxAndArgmax: NumPy C-API can compute max and argmax only for 1 axis or for all axes.\");\n %(fail)s\n }\n\n Py_CLEAR(%(max)s);\n Py_CLEAR(%(argmax)s);//todo pass them as out parameter.\n\n %(max)s = (PyArrayObject*)PyArray_Max(%(x)s, axis, NULL);\n if (%(max)s == NULL) {\n %(fail)s;\n }\n if (!PyArray_CheckExact(%(max)s)) {\n %(max)s = (PyArrayObject*)PyArray_FromAny((PyObject*)%(max)s, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL);\n if(%(max)s == NULL){\n %(fail)s;\n }\n }\n\n %(argmax)s = (PyArrayObject*)PyArray_ArgMax(%(x)s, axis, NULL);\n if (%(argmax)s == NULL) {\n Py_CLEAR(%(max)s);\n %(fail)s;\n }\n if (!PyArray_CheckExact(%(argmax)s)) {\n %(argmax)s = (PyArrayObject*)PyArray_FromAny((PyObject*)%(argmax)s, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL);\n if(%(argmax)s == NULL){\n %(fail)s;\n }\n }\n if (PyArray_TYPE(%(argmax)s) != NPY_INT64) {\n PyObject * tmp = PyArray_Cast(%(argmax)s, NPY_INT64);\n if (NULL == tmp){\n %(fail)s;\n }\n Py_DECREF(%(argmax)s);\n %(argmax)s = (PyArrayObject*)tmp;\n }\n \"\"\"\n return ret % locals()\n\n def c_code_cache_version(self):\n return (5,)\n\n def infer_shape(self, node, shapes):\n ishape = shapes[0]\n rval = tuple(\n ishape[i]\n for (i, b) in enumerate(node.inputs[0].type.broadcastable)\n if i not in self.axis\n )\n return [rval, rval]\n\n def R_op(self, inputs, eval_points):\n if eval_points[0] is None:\n return [None, None]\n if len(self.axis) != 1:\n raise ValueError(\"R_op supported for arg_max only for \" \"one axis!\")\n if self.axis[0] > 1:\n raise ValueError(\"R_op supported for arg_max only when \" \" axis is 0 or 1\")\n if inputs[0].ndim != 2:\n raise ValueError(\n \"R_op supported for arg_max only when \" \" input is a matrix\"\n )\n max_vals, max_pos = self.make_node(*inputs).outputs\n if self.axis[0] == 0:\n return [eval_points[0][max_pos, arange(eval_points[0].shape[1])], None]\n else:\n return [eval_points[0][arange(eval_points[0].shape[0]), max_pos], None]\n\n def grad(self, inp, grads):\n # The strict sense mathematical gradient of the maximum function is\n # not calculated here for it is not defined at every point where some\n # coordinates are identical. However, since the latter set has null\n # Lebesgue measure, the result may be interpreted as weak gradient.\n\n # @note: This function should work correctly for L{vector}s.\n # (x, y), (gz, gw)\n # gz*dz/dx + gw*dw/dx, gz*dz/dy + gw*dw/dy\n # gMax * dMax/dx + gArgMax * dArgMax/dx,\n # gMax * dMax/daxis + gArgMax * dArgMax/daxis\n # g_max has one less dimension than x, so you need to complete\n # g_max to x's shape when axis=0 the broadcasting mechanism\n # does it automatically\n x = inp[0]\n axis = _as_tensor_variable(self.axis)\n g_max, g_max_idx = grads\n\n g_max_disconnected = isinstance(g_max.type, DisconnectedType)\n g_max_idx_disconnected = isinstance(g_max_idx.type, DisconnectedType)\n\n # if the op is totally disconnected, so are its inputs\n if g_max_disconnected and g_max_idx_disconnected:\n return [DisconnectedType()(), DisconnectedType()()]\n\n # if the max is disconnected but the argmax is not,\n # the gradient on its inputs is zero\n if g_max_disconnected:\n return [x.zeros_like()]\n if NoneConst.equals(axis):\n axis_ = list(range(x.ndim))\n else:\n axis_ = axis\n xmax = max(x, axis_)\n\n # Raise the g_max and xmax to the same number of dim as the input.\n pattern = []\n out_dim = 0\n if NoneConst.equals(axis):\n # We are taking the max/argmax over all dimensions.\n axis = None\n for i in range(x.ndim):\n if axis is None or i in axis.data:\n pattern.append(\"x\")\n else:\n pattern.append(out_dim)\n out_dim += 1\n g_max_pad = DimShuffle(g_max.broadcastable, pattern)(g_max)\n xmax_pad = DimShuffle(xmax.broadcastable, pattern)(xmax)\n\n # Set the grad to the correct position.\n g_x = eq(xmax_pad, x) * g_max_pad\n return (g_x,)\n\n\nclass Argmax(Op):\n \"\"\"\n Calculate the argmax over a given axis or over all axes.\n \"\"\"\n\n nin = 2 # tensor, axis\n nout = 1\n E_axis = \"invalid axis\"\n __props__ = (\"axis\",)\n _f16_ok = True\n\n params_type = ParamsType(c_axis=scal.int64)\n\n def __init__(self, axis):\n if axis is not None:\n axis = tuple(axis)\n self.axis = tuple(axis)\n\n def get_params(self, node):\n if self.axis is not None and len(self.axis) == 1:\n c_axis = np.int64(self.axis[0])\n else:\n # The value here doesn't matter, it won't be used\n c_axis = np.int64(-1)\n return self.params_type.get_params(c_axis=c_axis)\n\n def make_node(self, x, axis=None):\n x = _as_tensor_variable(x)\n if self.axis is None:\n all_axes = list(range(x.ndim))\n else:\n all_axes = self.axis\n inputs = [x]\n\n # We keep the original broadcastable flags for dimensions on which\n # we do not perform the argmax.\n broadcastable = [\n b for i, b in enumerate(x.type.broadcastable) if i not in all_axes\n ]\n outputs = [tensor(\"int64\", broadcastable, name=\"argmax\")]\n return Apply(self, inputs, outputs)\n\n def prepare_node(self, node, storage_map, compute_map, impl):\n if len(node.inputs) == 2:\n raise ValueError(\n \"You are trying to compile a graph with an old Argmax node. Either reoptimize your graph or rebuild it to get the new node format.\"\n )\n\n def perform(self, node, inp, outs, params):\n (x,) = inp\n axes = self.axis\n (max_idx,) = outs\n if axes is None:\n axes = tuple(range(x.ndim))\n\n # Numpy does not support multiple axes for argmax\n # Work around\n keep_axes = np.array([i for i in range(x.ndim) if i not in axes], dtype=\"int64\")\n # Not-reduced axes in front\n transposed_x = np.transpose(x, np.concatenate((keep_axes, axes)))\n kept_shape = transposed_x.shape[: len(keep_axes)]\n reduced_shape = transposed_x.shape[len(keep_axes) :]\n new_shape = kept_shape + (np.prod(reduced_shape),)\n reshaped_x = transposed_x.reshape(new_shape)\n\n max_idx[0] = theano._asarray(np.argmax(reshaped_x, axis=-1), dtype=\"int64\")\n\n def c_code(self, node, name, inp, out, sub):\n (x,) = inp\n (argmax,) = out\n fail = sub[\"fail\"]\n params = sub[\"params\"]\n if self.axis is None:\n axis_code = \"axis = NPY_MAXDIMS;\"\n else:\n if len(self.axis) > 1:\n raise NotImplementedError()\n # params is only used here for now\n axis_code = (\n \"\"\"\n axis = %(params)s->c_axis;\n if(axis > PyArray_NDIM(%(x)s)-1 || axis < -PyArray_NDIM(%(x)s)){\n PyErr_SetString(PyExc_ValueError,\n \"Argmax, bad axis argument\");\n %(fail)s\n }\n \"\"\"\n % locals()\n )\n ret = \"\"\"\n int axis;\n\n Py_CLEAR(%(argmax)s);//todo pass them as out parameter.\n %(axis_code)s\n\n %(argmax)s = (PyArrayObject*)PyArray_ArgMax(%(x)s, axis, NULL);\n if(%(argmax)s == NULL){\n %(fail)s;\n }\n if(!PyArray_CheckExact(%(argmax)s)){\n %(argmax)s = (PyArrayObject*)PyArray_FromAny((PyObject*)%(argmax)s, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL);\n if(%(argmax)s == NULL){\n %(fail)s;\n }\n }\n if(PyArray_TYPE(%(argmax)s) != NPY_INT64){\n PyObject * tmp = PyArray_Cast(%(argmax)s, NPY_INT64);\n if (NULL == tmp){\n %(fail)s;\n }\n Py_DECREF(%(argmax)s);\n %(argmax)s = (PyArrayObject*)tmp;\n }\n \"\"\"\n return ret % locals()\n\n def c_code_cache_version(self):\n return (1,)\n\n def infer_shape(self, node, shapes):\n (ishape,) = shapes\n if self.axis is None:\n return [()]\n rval = tuple(\n [\n ishape[i]\n for (i, b) in enumerate(node.inputs[0].type.broadcastable)\n if i not in self.axis\n ]\n )\n return [rval]\n\n def grad(self, inp, grads):\n (x,) = inp\n\n return [x.zeros_like()]\n\n\ndef makeKeepDims(x, y, axis):\n \"\"\"\n Reintroduces in y with length one the axes of x which have been left out\n in a prior reduction of x. With this option, the resulting tensor will\n broadcast correctly against the original tensor x.\n\n \"\"\"\n x = as_tensor_variable(x)\n y = as_tensor_variable(y)\n\n if axis is None:\n axis = list(range(x.type.ndim))\n elif isinstance(axis, (int, np.integer)):\n axis = [axis]\n elif isinstance(axis, np.ndarray) and axis.ndim == 0:\n axis = [int(axis)]\n else:\n axis = [int(a) for a in axis]\n newaxis = []\n for a in axis:\n if not isinstance(a, int):\n raise ValueError(\"keepdims option can be used only with constant axis\")\n if a < 0:\n a += x.type.ndim\n newaxis.append(a)\n i = 0\n new_dims = []\n for j, _ in enumerate(x.type.broadcastable):\n if j in newaxis:\n new_dims.append(\"x\")\n else:\n new_dims.append(i)\n i += 1\n return DimShuffle(y.type.broadcastable, new_dims)(y)\n\n\n@constructor\ndef max_and_argmax(a, axis=None, keepdims=False):\n \"\"\"\n Returns maximum elements and their indices obtained by iterating over\n given axis.\n\n When axis is None (the default value), the max is performed\n over the flattened tensor.\n\n Parameters\n ----------\n keepdims : bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n \"\"\"\n # Check axis and convert it to a Python list of integers.\n # Axis will be used as an op param of MaxAndArgmax.\n a = as_tensor_variable(a)\n axis = check_and_normalize_axes(a, axis)\n if len(axis) == 0:\n axis = list(range(a.type.ndim))\n out, argout = MaxAndArgmax(axis)(a)\n\n if keepdims:\n out = makeKeepDims(a, out, axis)\n argout = makeKeepDims(a, argout, axis)\n return [out, argout]\n\n\nclass Max(CAReduce):\n nfunc_spec = (\"max\", 1, 1)\n\n def __init__(self, axis):\n super().__init__(scal.maximum, axis)\n\n\nclass Min(CAReduce):\n nfunc_spec = (\"min\", 1, 1)\n\n def __init__(self, axis):\n super().__init__(scal.minimum, axis)\n\n\n@constructor\ndef max(x, axis=None, keepdims=False):\n \"\"\"\n Returns maximum elements obtained by iterating over given axis.\n\n When axis is None (the default value), the max is performed\n over the flattened tensor.\n\n Parameters\n ----------\n keepdims: bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n Notes\n -----\n We return an error as numpy when we reduce a dim with a shape of 0.\n\n \"\"\"\n\n # We have a choice of implementing this call with the\n # CAReduce op or the MaxAndArgmax op.\n\n # MaxAndArgmax supports grad and Rop, so we prefer to use that.\n # CAReduce is faster, but optimizations will replace MaxAndArgmax[0]\n # with CAReduce at compile time, so at this stage the important\n # thing is supporting all user interface features, not speed.\n # Some cases can be implemented only with CAReduce.\n\n # We thus prefer to use MaxAndArgmax, if possible. It does not\n # support all axis arguments, so we may need to fall back to CAReduce.\n\n try:\n out = max_and_argmax(x, axis)[0]\n except Exception:\n out = Max(axis)(x)\n\n if keepdims:\n out = makeKeepDims(x, out, axis)\n return out\n\n\n@constructor\ndef argmax(x, axis=None, keepdims=False):\n \"\"\"\n Returns indices of maximum elements obtained by iterating over given axis.\n\n When axis is None (the default value), the argmax is performed\n over the flattened tensor.\n\n Parameters\n ----------\n keepdims : bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n \"\"\"\n argout = max_and_argmax(x, axis)[1]\n\n if keepdims:\n argout = makeKeepDims(x, argout, axis)\n return argout\n\n\n@constructor\ndef min(x, axis=None, keepdims=False):\n \"\"\"\n Returns minimum elements obtained by iterating over given axis.\n\n When axis is None (the default value), the min is performed\n over the flattened tensor.\n\n Parameters\n ----------\n keepdims: bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n \"\"\"\n x = as_tensor_variable(x)\n str_x_type = str(x.dtype)\n if str_x_type.startswith(\"float\") or str_x_type in int_dtypes:\n return -max(-x, axis=axis, keepdims=keepdims)\n elif str_x_type in uint_dtypes:\n itype = np.iinfo(x.dtype)\n max_val = np.array(itype.max, dtype=itype.dtype)\n return max_val - max(max_val - x, axis=axis, keepdims=keepdims)\n elif str_x_type == \"bool\":\n return ~max(~x, axis=axis, keepdims=keepdims)\n else:\n # Be careful about unsigned integers, complex\n raise NotImplementedError()\n\n\n@constructor\ndef argmin(x, axis=None, keepdims=False):\n \"\"\"\n Returns indices of minimum elements obtained by iterating over given axis.\n\n When axis is None (the default value), the argmin is performed\n over the flattened tensor.\n\n Parameters\n ----------\n keepdims: bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n \"\"\"\n x = as_tensor_variable(x)\n str_x_type = str(x.dtype)\n if str_x_type.startswith(\"float\") or str_x_type in int_dtypes:\n return argmax(-x, axis=axis, keepdims=keepdims)\n elif str_x_type in uint_dtypes:\n itype = np.iinfo(x.dtype)\n return argmax(itype.max - x, axis=axis, keepdims=keepdims)\n elif str_x_type == \"bool\":\n return argmax(~x, axis=axis, keepdims=keepdims)\n else:\n # Be careful about unsigned integers, complex\n raise NotImplementedError()\n\n\n@constructor\ndef smallest(*args):\n \"\"\"\n Return the [elementwise] smallest of a variable number of arguments.\n\n Like python's min.\n\n \"\"\"\n if len(args) == 2:\n a, b = args\n return switch(a < b, a, b)\n else:\n return min(stack(args), axis=0)\n\n\n@constructor\ndef largest(*args):\n \"\"\"\n Return the [elementwise] largest of a variable number of arguments.\n\n Like python's max.\n\n \"\"\"\n if len(args) == 2:\n a, b = args\n return switch(a > b, a, b)\n else:\n return max(stack(args), axis=0)\n\n\n##########################\n# Comparison\n##########################\n\n\n@_scal_elemwise\ndef lt(a, b):\n \"\"\"a < b\"\"\"\n\n\n@_scal_elemwise\ndef gt(a, b):\n \"\"\"a > b\"\"\"\n\n\n@_scal_elemwise\ndef le(a, b):\n \"\"\"a <= b\"\"\"\n\n\n@_scal_elemwise\ndef ge(a, b):\n \"\"\"a >= b\"\"\"\n\n\n@_scal_elemwise\ndef eq(a, b):\n \"\"\"a == b\"\"\"\n\n\n@_scal_elemwise\ndef neq(a, b):\n \"\"\"a != b\"\"\"\n\n\n@_scal_elemwise\ndef isnan(a):\n \"\"\"isnan(a)\"\"\"\n\n\n# Rename isnan to isnan_ to allow to bypass it when not needed.\n# glibc 2.23 don't allow isnan on int, so we remove it from the graph.\nisnan_ = isnan\n\n\ndef isnan(a):\n \"\"\"isnan(a)\"\"\"\n a = as_tensor_variable(a)\n if a.dtype in discrete_dtypes:\n return alloc(\n np.asarray(False, dtype=\"bool\"), *[a.shape[i] for i in range(a.ndim)]\n )\n return isnan_(a)\n\n\n@_scal_elemwise\ndef isinf(a):\n \"\"\"isinf(a)\"\"\"\n\n\n# Rename isnan to isnan_ to allow to bypass it when not needed.\n# glibc 2.23 don't allow isnan on int, so we remove it from the graph.\nisinf_ = isinf\n\n\ndef isinf(a):\n \"\"\"isinf(a)\"\"\"\n a = as_tensor_variable(a)\n if a.dtype in discrete_dtypes:\n return alloc(\n np.asarray(False, dtype=\"bool\"), *[a.shape[i] for i in range(a.ndim)]\n )\n return isinf_(a)\n\n\ndef allclose(a, b, rtol=1.0e-5, atol=1.0e-8, equal_nan=False):\n \"\"\"\n Implement Numpy's ``allclose`` on tensors.\n\n ``absolute(a - b) <= (atol + rtol * absolute(b))``\n\n Parameters\n ----------\n a : tensor\n Input to compare.\n b : tensor\n Input to compare.\n rtol : float\n The relative tolerance parameter.\n atol : float\n The absolute tolerance parameter.\n equal_nan: bool\n Whether to consider nan's in the same place to be close.\n\n Returns\n -------\n bool\n A boolean value (of type int8 returned by the tensor elementwise `all`\n function) whether all elements in a and b are in the tolerance range\n defined above.\n\n Notes\n -----\n Not a symmetric equation. See Numpy's documentation.\n\n \"\"\"\n return all(isclose(a, b, rtol, atol, equal_nan))\n\n\ndef isclose(a, b, rtol=1.0e-5, atol=1.0e-8, equal_nan=False):\n \"\"\"\n Implements Numpy's ``isclose`` on tensors.\n\n The tolerance values are positive, typically very small numbers. The\n relative difference (`rtol` * abs(`b`)) and the absolute difference\n `atol` are added together to compare against the absolute difference\n between `a` and `b`.\n\n ``absolute(a - b) <= (atol + rtol * absolute(b))``\n\n Parameters\n ----------\n a : tensor\n Input to compare.\n b : tensor\n Input to compare.\n rtol : float\n The relative tolerance parameter.\n atol : float\n The absolute tolerance parameter.\n equal_nan : bool\n Whether to consider nan's in the same place to be close\n\n Returns\n -------\n int8\n A boolean (int8) array where two arrays are element-wise equal\n within a tolerance.\n\n Notes\n -----\n Not a symmetric equation. See Numpy's documentation.\n\n Examples\n --------\n >>> import theano\n >>> import numpy as np\n >>> a = theano._asarray([1e10, 1e-7], dtype=\"float64\")\n >>> b = theano._asarray([1.00001e10, 1e-8], dtype=\"float64\")\n >>> theano.tensor.isclose(a, b).eval()\n array([1, 0], dtype=int8)\n >>> a = theano._asarray([1e10, 1e-8], dtype=\"float64\")\n >>> b = theano._asarray([1.00001e10, 1e-9], dtype=\"float64\")\n >>> theano.tensor.isclose(a, b).eval()\n array([1, 1], dtype=int8)\n >>> a = theano._asarray([1e10, 1e-8], dtype=\"float64\")\n >>> b = theano._asarray([1.0001e10, 1e-9], dtype=\"float64\")\n >>> theano.tensor.isclose(a, b).eval()\n array([0, 1], dtype=int8)\n >>> a = theano._asarray([1.0, np.nan], dtype=\"float64\")\n >>> b = theano._asarray([1.0, np.nan], dtype=\"float64\")\n >>> theano.tensor.isclose(a, b).eval()\n array([1, 0], dtype==int8)\n >>> a = theano._asarray([1.0, np.nan], dtype=\"float64\")\n >>> b = theano._asarray([1.0, np.nan], dtype=\"float64\")\n >>> theano.tensor.isclose(a, b, equal_nan=True).eval()\n array([1, 1], dtype==int8)\n >>> a = theano._asarray([1.0, np.inf], dtype=\"float64\")\n >>> b = theano._asarray([1.0, -np.inf], dtype=\"float64\")\n >>> theano.tensor.isclose(a, b).eval()\n array([1, 0], dtype==int8)\n >>> a = theano._asarray([1.0, np.inf], dtype=\"float64\")\n >>> b = theano._asarray([1.0, np.inf], dtype=\"float64\")\n >>> theano.tensor.isclose(a, b).eval()\n array([1, 1], dtype==int8)\n\n \"\"\"\n # close will be an int8 array of 1 where within tolerance\n # and 0 where not within tolerance or there was a nan or inf value.\n diff = abs(a - b)\n tolerance = atol + rtol * abs(b)\n close_prelim = le(diff, tolerance)\n\n a_nan = isnan(a)\n b_nan = isnan(b)\n nans = bitwise_or(a_nan, b_nan)\n\n a_inf = isinf(a)\n b_inf = isinf(b)\n infs = bitwise_or(a_inf, b_inf)\n\n nans_or_infs = bitwise_or(nans, infs)\n\n # close is now an array of 0's except where elements are not nan or inf\n # and are within the tolerance.\n close = bitwise_and(close_prelim, bitwise_not(nans_or_infs))\n\n # deal with signed inf values. this will make an array inf_eq of 0's\n # except where inf values have the same sign.\n both_infs = bitwise_and(a_inf, b_inf)\n inf_signs_eq = eq(a_inf * sgn(a), b_inf * sgn(b))\n inf_eq = bitwise_and(both_infs, inf_signs_eq)\n\n # now create the potential result combining close and inf_eq\n close_with_infs = bitwise_or(close, inf_eq)\n\n # deal with comparing nan's.\n if equal_nan:\n both_nans = bitwise_and(a_nan, b_nan)\n return bitwise_or(close_with_infs, both_nans)\n # otherwise nan's aren't considered close.\n else:\n return close_with_infs\n\n\n##########################\n# Condition\n##########################\n\n\n@_scal_elemwise\ndef switch(cond, ift, iff):\n \"\"\"if cond then ift else iff\"\"\"\n\n\nwhere = switch\n##########################\n# Bit-wise\n##########################\n\n\n@_scal_elemwise\ndef and_(a, b):\n \"\"\"bitwise a & b\"\"\"\n\n\nbitwise_and = and_ # numpy name for it\n\n\n@_scal_elemwise\ndef or_(a, b):\n \"\"\"bitwise a | b\"\"\"\n\n\nbitwise_or = or_ # numpy name for it\n\n\n@_scal_elemwise\ndef xor(a, b):\n \"\"\"bitwise a ^ b\"\"\"\n\n\nbitwise_xor = xor # numpy name for it\n\n\n@_scal_elemwise\ndef invert(a):\n \"\"\"bitwise ~a\"\"\"\n\n\nbitwise_not = invert # numpy alias for it\n\n\n##########################\n# Math\n##########################\n\n\n@_scal_elemwise\ndef abs_(a):\n \"\"\"|`a`|\n\n TensorVariable overloads the `TensorVariable.__abs__` operator so that\n this function is called when you type abs(a).\n\n \"\"\"\n\n\npprint.assign(abs_, printing.PatternPrinter((\"|%(0)s|\", -1000)))\n\n\n@_scal_elemwise\ndef exp(a):\n \"\"\"e^`a`\"\"\"\n\n\n@_scal_elemwise\ndef exp2(a):\n \"\"\"2^`a`\"\"\"\n\n\n@_scal_elemwise\ndef expm1(a):\n \"\"\"e^`a` - 1\"\"\"\n\n\n@_scal_elemwise\ndef neg(a):\n \"\"\"-a\"\"\"\n\n\n# numpy.reciprocal does integer division on integer inputs\n# (which is not very interesting)\n@_scal_elemwise\ndef inv(a):\n \"\"\"1.0/a\"\"\"\n\n\n@_scal_elemwise\ndef log(a):\n \"\"\"base e logarithm of a\"\"\"\n\n\n@_scal_elemwise\ndef log2(a):\n \"\"\"base 2 logarithm of a\"\"\"\n\n\n@_scal_elemwise\ndef log10(a):\n \"\"\"base 10 logarithm of a\"\"\"\n\n\n@_scal_elemwise\ndef log1p(a):\n \"\"\"log(1+a)\"\"\"\n\n\n@_scal_elemwise\ndef sgn(a):\n \"\"\"sign of a\"\"\"\n\n\n@_scal_elemwise\ndef ceil(a):\n \"\"\"ceiling of a\"\"\"\n\n\n@_scal_elemwise\ndef floor(a):\n \"\"\"floor of a\"\"\"\n\n\n@_scal_elemwise\ndef trunc(a):\n \"\"\"trunc of a\"\"\"\n\n\n@constructor\ndef iround(a, mode=None):\n \"\"\"cast(round(a,mode),'int64')\"\"\"\n return cast(round(a, mode), \"int64\")\n\n\n@constructor\ndef round(a, mode=None):\n \"\"\"round_mode(a) with mode in [half_away_from_zero, half_to_even].\n Default to half_to_even.\"\"\"\n if mode is None:\n mode = \"half_to_even\"\n if config.warn.round:\n warnings.warn(\n \"theano.tensor.round() changed its default from\"\n \" `half_away_from_zero` to `half_to_even` to have\"\n \" the same default as NumPy. Use the Theano flag\"\n \" `warn.round=False` to disable this warning.\"\n )\n if mode == \"half_away_from_zero\":\n return round_half_away_from_zero(a)\n elif mode == \"half_to_even\":\n return round_half_to_even(a)\n else:\n raise Exception(f\"round mode {mode} is not implemented.\")\n\n\n@_scal_elemwise\ndef round_half_to_even(a):\n \"\"\"round_half_to_even(a)\"\"\"\n\n\n@_scal_elemwise\ndef round_half_away_from_zero(a):\n \"\"\"round_half_away_from_zero(a)\"\"\"\n\n\n@_scal_elemwise\ndef sqr(a):\n \"\"\"square of a\"\"\"\n\n\n# alias to sqr, included to maintain similarity with numpy interface\nsquare = sqr\n\n\ndef cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=None):\n \"\"\"Calculate the covariance matrix.\n Covariance indicates the level to which two variables vary together.\n If we examine N-dimensional samples, :math:`m = [x_1, x_2, ... x_N]^T`,\n then the covariance matrix element :math:`C_{ij}` is the covariance of\n :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance\n of :math:`x_i`. Code and docstring ported from numpy.\n ----------\n m : array_like\n A 2-D array containing multiple variables and observations.\n Each row of `m` represents a variable, and each column is\n observations of all those variables.\n y : array_like, optional\n An additional set of variables and observations. `y` has the same form\n as that of `m`.\n rowvar : bool, optional\n If `rowvar` is True (default), then each row represents a\n variable, with observations in the columns. Otherwise, the relationship\n is transposed: each column represents a variable, while the rows\n contain observations.\n bias : bool, optional\n Default normalization (False) is by ``(N - 1)``, where ``N`` is the\n number of observations given (unbiased estimate). If `bias` is True, then\n normalization is by ``N``. These values can be overridden by using the\n keyword ``ddof``.\n ddof : int, optional\n If not ``None`` the default value implied by `bias` is overridden.\n The default value is ``None``.\n Returns\n -------\n out : The covariance matrix of the variables.\n \"\"\"\n\n if fweights is not None:\n raise NotImplementedError(\"fweights are not implemented\")\n if aweights is not None:\n raise NotImplementedError(\"aweights are not implemented\")\n\n if not rowvar and m.shape[0] != 1:\n m = m.T\n\n if y is not None:\n if not rowvar and y.shape[0] != 1:\n y = y.T\n m = theano.tensor.concatenate((m, y), axis=0)\n\n if ddof is None:\n if not bias:\n ddof = 1\n else:\n ddof = 0\n\n # Determine the normalization\n fact = m.shape[1] - ddof\n\n m -= m.mean(axis=1, keepdims=1)\n c = m.dot(m.T)\n c *= theano.tensor.constant(1) / fact\n return c.squeeze()\n\n\n@_scal_elemwise\ndef sqrt(a):\n \"\"\"square root of a\"\"\"\n\n\n@_scal_elemwise\ndef deg2rad(a):\n \"\"\"convert degree a to radian\"\"\"\n\n\n@_scal_elemwise\ndef rad2deg(a):\n \"\"\"convert radian a to degree\"\"\"\n\n\n@_scal_elemwise\ndef cos(a):\n \"\"\"cosine of a\"\"\"\n\n\n@_scal_elemwise\ndef arccos(a):\n \"\"\"arccosine of a\"\"\"\n\n\n@_scal_elemwise\ndef sin(a):\n \"\"\"sine of a\"\"\"\n\n\n@_scal_elemwise\ndef arcsin(a):\n \"\"\"arcsine of a\"\"\"\n\n\n@_scal_elemwise\ndef tan(a):\n \"\"\"tangent of a\"\"\"\n\n\n@_scal_elemwise\ndef arctan(a):\n \"\"\"arctangent of a\"\"\"\n\n\n@_scal_elemwise\ndef arctan2(a, b):\n \"\"\"arctangent of a / b\"\"\"\n\n\n@_scal_elemwise\ndef cosh(a):\n \"\"\"hyperbolic cosine of a\"\"\"\n\n\n@_scal_elemwise\ndef arccosh(a):\n \"\"\"hyperbolic arc cosine of a\"\"\"\n\n\n@_scal_elemwise\ndef sinh(a):\n \"\"\"hyperbolic sine of a\"\"\"\n\n\n@_scal_elemwise\ndef arcsinh(a):\n \"\"\"hyperbolic arc sine of a\"\"\"\n\n\n@_scal_elemwise\ndef tanh(a):\n \"\"\"hyperbolic tangent of a\"\"\"\n\n\n@_scal_elemwise\ndef arctanh(a):\n \"\"\"hyperbolic arc tangent of a\"\"\"\n\n\n@_scal_elemwise\ndef erf(a):\n \"\"\"error function\"\"\"\n\n\n@_scal_elemwise\ndef erfc(a):\n \"\"\"complementary error function\"\"\"\n\n\n@_scal_elemwise\ndef erfcx(a):\n \"\"\"scaled complementary error function\"\"\"\n\n\n@_scal_elemwise\ndef erfinv(a):\n \"\"\"inverse error function\"\"\"\n\n\n@_scal_elemwise\ndef erfcinv(a):\n \"\"\"inverse complementary error function\"\"\"\n\n\n@_scal_elemwise\ndef gamma(a):\n \"\"\"gamma function\"\"\"\n\n\n@_scal_elemwise\ndef gammaln(a):\n \"\"\"log gamma function\"\"\"\n\n\n@_scal_elemwise\ndef psi(a):\n \"\"\"derivative of log gamma function\"\"\"\n\n\n@_scal_elemwise\ndef tri_gamma(a):\n \"\"\"second derivative of the log gamma function\"\"\"\n\n\n@_scal_elemwise\ndef chi2sf(x, k):\n \"\"\"chi squared survival function\"\"\"\n\n\n@_scal_elemwise\ndef gammainc(k, x):\n \"\"\"Regularized lower gamma function\"\"\"\n\n\n@_scal_elemwise\ndef gammaincc(k, x):\n \"\"\"Regularized upper gamma function\"\"\"\n\n\n@_scal_elemwise\ndef gammau(k, x):\n \"\"\"Upper incomplete gamma function.\"\"\"\n\n\n@_scal_elemwise\ndef gammal(k, x):\n \"\"\"Lower incomplete gamma function.\"\"\"\n\n\n@_scal_elemwise\ndef j0(x):\n \"\"\"Bessel function of the first kind of order 0.\"\"\"\n\n\n@_scal_elemwise\ndef j1(x):\n \"\"\"Bessel function of the first kind of order 1.\"\"\"\n\n\n@_scal_elemwise\ndef jv(v, x):\n \"\"\"Bessel function of the first kind of order v (real).\"\"\"\n\n\n@_scal_elemwise\ndef i0(x):\n \"\"\"Modified Bessel function of the first kind of order 0.\"\"\"\n\n\n@_scal_elemwise\ndef i1(x):\n \"\"\"Modified Bessel function of the first kind of order 1.\"\"\"\n\n\n@_scal_elemwise\ndef iv(v, x):\n \"\"\"Modified Bessel function of the first kind of order v (real).\"\"\"\n\n\n@_scal_elemwise\ndef real(z):\n \"\"\"Return real component of complex-valued tensor `z`\"\"\"\n\n\n_tensor_py_operators.real = property(real)\n\n\n@_scal_elemwise\ndef imag(z):\n \"\"\"Return imaginary component of complex-valued tensor `z`\"\"\"\n\n\n_tensor_py_operators.imag = property(imag)\n\n\n@_scal_elemwise\ndef angle(z):\n \"\"\"Return polar-coordinate angle of complex-valued tensor `z`\"\"\"\n\n\n@_scal_elemwise # numpy.complex cannot build tensors\ndef complex(real, imag):\n \"\"\"Return complex-valued tensor with `real` and `imag` components\"\"\"\n\n\n@_scal_elemwise\ndef conj(z):\n \"\"\"Return the complex conjugate of `z`.\"\"\"\n\n\n@_scal_elemwise\ndef complex_from_polar(abs, angle):\n \"\"\"Return complex-valued tensor from polar coordinate specification.\"\"\"\n\n\n##########################\n# Misc\n##########################\n\n\n# fill, _fill_inplace = _elemwise(scal.second, 'fill',\n# \"\"\"fill WRITEME (elemwise)\"\"\")\n@_scal_elemwise\ndef second(a, b):\n \"\"\"Create a matrix by filling the shape of a with b\"\"\"\n\n\nfill = second\npprint.assign(fill, printing.FunctionPrinter(\"fill\"))\n\n\n@constructor\ndef ones_like(model, dtype=None, opt=False):\n \"\"\"equivalent of numpy.ones_like\n Parameters\n ----------\n model : tensor\n dtype : data-type, optional\n opt : If True, we will return a constant instead of a graph when possible.\n Useful for Theano optimization, not for user building a graph as this\n have the consequence that model isn't always in the graph.\n\n Returns\n -------\n tensor\n tensor the shape of model containing ones of the type of dtype.\n \"\"\"\n if dtype is None:\n dtype = model.type.dtype\n ret = constant(1.0, dtype=dtype)\n if opt and ret.type == model.type:\n return ret\n return fill(model, ret)\n\n\n@constructor\ndef zeros_like(model, dtype=None, opt=False):\n \"\"\"equivalent of numpy.zeros_like\n Parameters\n ----------\n model : tensor\n dtype : data-type, optional\n opt : If True, we will return a constant instead of a graph when possible.\n Useful for Theano optimization, not for user building a graph as this\n have the consequence that model isn't always in the graph.\n\n Returns\n -------\n tensor\n tensor the shape of model containing zeros of the type of dtype.\n \"\"\"\n\n if dtype is None:\n dtype = model.type.dtype\n ret = constant(0.0, dtype=dtype)\n if opt and ret.type == model.type:\n return ret\n return fill(model, ret)\n\n\ndef zeros(shape, dtype=None):\n \"\"\"\n Create a Tensor filled with zeros, closer to Numpy's syntax than ``alloc``.\n \"\"\"\n if not isinstance(shape, (np.ndarray, Sequence, TensorVariable)):\n shape = [shape]\n if dtype is None:\n dtype = config.floatX\n return alloc(np.array(0, dtype=dtype), *shape)\n\n\ndef ones(shape, dtype=None):\n \"\"\"\n Create a Tensor filled with ones, closer to Numpy's syntax than ``alloc``.\n \"\"\"\n if not isinstance(shape, (np.ndarray, Sequence, TensorVariable)):\n shape = [shape]\n if dtype is None:\n dtype = config.floatX\n return alloc(np.array(1, dtype=dtype), *shape)\n\n\nclass Nonzero(gof.Op):\n \"\"\"\n Return the indices of the elements that are non-zero.\n\n Parameters\n ----------\n a: array_like\n Input array.\n\n Returns\n -------\n indices: list\n A list containing the indices of the non-zero elements of `a`.\n\n See Also\n --------\n nonzero_values : Return the non-zero elements of the input array\n flatnonzero : Return the indices of the non-zero elements of the\n flattened input array.\n\n \"\"\"\n\n __props__ = ()\n\n def make_node(self, a):\n a = as_tensor_variable(a)\n if a.ndim == 0:\n raise ValueError(\"Nonzero only supports non-scalar arrays.\")\n output = [\n TensorType(dtype=\"int64\", broadcastable=(False,))() for i in range(a.ndim)\n ]\n return gof.Apply(self, [a], output)\n\n def perform(self, node, inp, out_):\n a = inp[0]\n\n result_tuple = np.nonzero(a)\n for i, res in enumerate(result_tuple):\n out_[i][0] = res.astype(\"int64\")\n\n def grad(self, inp, grads):\n return [grad_undefined(self, 0, inp[0])]\n\n\n_nonzero = Nonzero()\n\n\ndef nonzero(a, return_matrix=False):\n \"\"\"\n Returns one of the following:\n\n If return_matrix is False (default, same as NumPy):\n A tuple of vector arrays such that the ith element of the jth array\n is the index of the ith non-zero element of the input array in the\n jth dimension.\n\n If return_matrix is True (same as Theano Op):\n Returns a matrix of shape (ndim, number of nonzero elements) such\n that element (i,j) is the index in the ith dimension of the jth\n non-zero element.\n\n Parameters\n ----------\n a : array_like\n Input array.\n return_matrix : bool\n If True, returns a symbolic matrix. If False, returns a tuple of\n arrays. Defaults to False.\n\n Returns\n -------\n tuple of vectors or matrix\n\n See Also\n --------\n nonzero_values : Return the non-zero elements of the input array\n flatnonzero : Return the indices of the non-zero elements of the\n flattened input array.\n\n \"\"\"\n res = _nonzero(a)\n if isinstance(res, list):\n res = tuple(res)\n else:\n res = (res,)\n\n if return_matrix:\n if len(res) > 1:\n return stack(res, 0)\n elif len(res) == 1:\n return shape_padleft(res[0])\n else:\n return res\n\n\ndef flatnonzero(a):\n \"\"\"Return a vector of indices that are non-zero in the flattened version of `a`.\n\n Parameters\n ----------\n a : tensor\n Input tensor\n\n Returns\n -------\n vector\n Output vector, containing the indices of the elements of `a.flatten()`\n that are non-zero.\n\n See Also\n --------\n nonzero : Return the indices of the non-zero elements of the input array.\n nonzero_values : Return the non-zero elements of the input array\n\n \"\"\"\n if a.ndim == 0:\n raise ValueError(\"Nonzero only supports non-scalar arrays.\")\n return nonzero(a.flatten(), return_matrix=False)[0]\n\n\ndef nonzero_values(a):\n \"\"\"Return a vector of non-zero elements contained in the input array.\n\n Parameters\n ----------\n a : tensor\n Input tensor\n\n Returns\n -------\n vector\n Output vector, containing the non-zero elements of a.\n\n See Also\n --------\n nonzero : Return the indices of the non-zero elements of the input array.\n flatnonzero : Return the indices of the non-zero elements of the\n flattened input array.\n\n \"\"\"\n return a.flatten()[flatnonzero(a)]\n\n\nclass Tri(gof.Op):\n\n __props__ = (\"dtype\",)\n\n def __init__(self, dtype=None):\n if dtype is None:\n dtype = config.floatX\n self.dtype = dtype\n\n def make_node(self, N, M, k):\n N = as_tensor_variable(N)\n M = as_tensor_variable(M)\n k = as_tensor_variable(k)\n return gof.Apply(\n self,\n [N, M, k],\n [TensorType(dtype=self.dtype, broadcastable=(False, False))()],\n )\n\n def perform(self, node, inp, out_):\n N, M, k = inp\n (out,) = out_\n out[0] = np.tri(N, M, k, dtype=self.dtype)\n\n def infer_shape(self, node, in_shapes):\n out_shape = [node.inputs[0], node.inputs[1]]\n return [out_shape]\n\n def grad(self, inp, grads):\n return [grad_undefined(self, i, inp[i]) for i in range(3)]\n\n\ndef tri(N, M=None, k=0, dtype=None):\n \"\"\"\n An array with ones at and below the given diagonal and zeros elsewhere.\n\n Parameters\n ----------\n N : int\n Number of rows in the array.\n M : int, optional\n Number of columns in the array.\n By default, `M` is taken equal to `N`.\n k : int, optional\n The sub-diagonal at and below which the array is filled.\n `k` = 0 is the main diagonal, while `k` < 0 is below it,\n and `k` > 0 is above. The default is 0.\n dtype : dtype, optional\n Data type of the returned array. The default is float.\n\n Returns\n -------\n Array of shape (N, M)\n Array with its lower triangle filled with ones and zero elsewhere;\n in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.\n\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n if M is None:\n M = N\n op = Tri(dtype)\n return op(N, M, k)\n\n\ndef tril(m, k=0):\n \"\"\"\n Lower triangle of an array.\n\n Return a copy of an array with elements above the `k`-th diagonal zeroed.\n\n Parameters\n ----------\n m : array_like, shape (M, N)\n Input array.\n k : int, optional\n Diagonal above which to zero elements. `k = 0` (the default) is the\n main diagonal, `k < 0` is below it and `k > 0` is above.\n\n Returns\n -------\n array, shape (M, N)\n Lower triangle of `m`, of same shape and data-type as `m`.\n\n See Also\n --------\n triu : Same thing, only for the upper triangle.\n\n \"\"\"\n return m * tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype)\n\n\ndef triu(m, k=0):\n \"\"\"\n Upper triangle of an array.\n\n Return a copy of a matrix with the elements below the `k`-th diagonal\n zeroed.\n\n Please refer to the documentation for `tril` for further details.\n\n See Also\n --------\n tril : Lower triangle of an array.\n\n \"\"\"\n return m * (\n constant(1, dtype=m.dtype) - tri(m.shape[0], m.shape[1], k=k - 1, dtype=m.dtype)\n )\n\n\nclass Eye(gof.Op):\n\n __props__ = (\"dtype\",)\n\n def __init__(self, dtype=None):\n if dtype is None:\n dtype = config.floatX\n self.dtype = dtype\n\n def make_node(self, n, m, k):\n n = as_tensor_variable(n)\n m = as_tensor_variable(m)\n k = as_tensor_variable(k)\n assert n.ndim == 0\n assert m.ndim == 0\n assert k.ndim == 0\n return gof.Apply(\n self,\n [n, m, k],\n [TensorType(dtype=self.dtype, broadcastable=(False, False))()],\n )\n\n def perform(self, node, inp, out_):\n n, m, k = inp\n (out,) = out_\n out[0] = np.eye(n, m, k, dtype=self.dtype)\n\n def infer_shape(self, node, in_shapes):\n out_shape = [node.inputs[0], node.inputs[1]]\n return [out_shape]\n\n def grad(self, inp, grads):\n return [grad_undefined(self, i, inp[i]) for i in range(3)]\n\n\ndef eye(n, m=None, k=0, dtype=None):\n \"\"\"Return a 2-D array with ones on the diagonal and zeros elsewhere.\n\n Parameters\n ----------\n n : int\n Number of rows in the output.\n m : int, optional\n Number of columns in the output. If None, defaults to `N`.\n k : int, optional\n Index of the diagonal: 0 (the default) refers to the main diagonal,\n a positive value refers to an upper diagonal, and a negative value\n to a lower diagonal.\n dtype : data-type, optional\n Data-type of the returned array.\n\n Returns\n -------\n ndarray of shape (N,M)\n An array where all elements are equal to zero, except for the `k`-th\n diagonal, whose values are equal to one.\n\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n if m is None:\n m = n\n localop = Eye(dtype)\n return localop(n, m, k)\n\n\ndef identity_like(x):\n return eye(x.shape[0], x.shape[1], k=0, dtype=x.dtype)\n\n\ndef alloc_validate_shape(shape):\n sh = [as_tensor_variable(s) for s in shape]\n bcast = []\n for i, s in enumerate(sh):\n\n def err_str():\n if config.exception_verbosity == \"high\":\n return \"\\n\" + min_informative_str(s)\n else:\n return str(s)\n\n if s.type.dtype not in integer_dtypes:\n s_as_str = err_str()\n raise TypeError(\n \"Shape arguments to Alloc must be integers, \"\n f\"but argument {i} is not for apply node: {s_as_str}\"\n )\n if s.ndim != 0:\n s_as_str = err_str()\n raise TypeError(\n \"Each shape dimension to Alloc must be a scalar, \",\n f\"but dimension {i} have {int(s.ndim)} dimensions for apply node: {s_as_str}\",\n )\n\n # if s is constant 1, then we're broadcastable in that dim\n try:\n const_shp = get_scalar_constant_value(s)\n except NotScalarConstantError:\n const_shp = None\n bcast.append(1 == const_shp)\n return sh, bcast\n\n\nclass Alloc(gof.Op):\n \"\"\"Create a Tensor from an initial value and a desired shape.\n\n alloc(value, shape0, shape1, ..., shapeN)\n\n Returns an N-dimensional tensor initialized by `value` using something\n equivalent to\n\n z = numpy.zeros(shape, value.dtype)\n z += value\n\n The result has N dimensions, has the dtype of `value` and is obtained by\n broadcasting value over the output ndarray.\n\n This Op is used to replace fill() during optimizations because after shapes\n are lifted, the first argument to fill can often be pruned from the graph.\n\n \"\"\"\n\n _f16_ok = True\n __props__ = ()\n\n def validate_shape(self, shape):\n return alloc_validate_shape(shape)\n\n def make_node(self, value, *shape):\n v = as_tensor_variable(value)\n sh, bcast = alloc_validate_shape(shape)\n if v.ndim > len(sh):\n raise TypeError(\n \"The Alloc value to use has more dimensions\"\n \" than the specified dimensions\",\n v.ndim,\n len(sh),\n )\n otype = TensorType(dtype=v.dtype, broadcastable=bcast)\n return gof.Apply(self, [v] + sh, [otype()])\n\n def perform(self, node, inputs, out_):\n (out,) = out_\n v = inputs[0]\n sh = tuple([int(i) for i in inputs[1:]])\n if out[0] is None or out[0].shape != sh:\n if v.size == 1 and v.item() == 0:\n out[0] = np.zeros(sh, dtype=v.dtype)\n else:\n out[0] = np.empty(sh, dtype=v.dtype)\n out[0][...] = v # broadcast v to fill us up\n else:\n # reuse the allocated memory.\n out[0][...] = v # broadcast v to fill us up\n\n def c_code(self, node, name, inp, out, sub):\n vv = inp[0]\n ndim = len(inp[1:])\n (zz,) = out\n fail = sub[\"fail\"]\n\n code = f\"\"\"\n npy_intp shape[{ndim}];\n \"\"\"\n\n # Initialize shape\n for i, shp_i in enumerate(inp[1:]):\n code += \"\"\"\n shape[%(i)s] = ((dtype_%(shp_i)s*) PyArray_DATA(%(shp_i)s))[0];\n \"\"\" % dict(\n i=i, shp_i=shp_i\n )\n\n code += \"\"\"\n int need_new_out = (NULL == %(zz)s);\n for (int i = 0; i < %(ndim)s; i++)\n need_new_out = (need_new_out\n || (PyArray_DIMS(%(zz)s)[i] != shape[i]));\n\n if (need_new_out)\n {\n Py_XDECREF(%(zz)s);\n %(zz)s = (PyArrayObject*) PyArray_SimpleNew(%(ndim)s,\n shape, PyArray_TYPE((PyArrayObject*) py_%(vv)s));\n if (!%(zz)s)\n {\n PyErr_SetString(PyExc_MemoryError, \"alloc failed\");\n %(fail)s\n }\n }\n\n // This function takes care of broadcasting\n if (PyArray_CopyInto(%(zz)s, %(vv)s) == -1)\n %(fail)s\n \"\"\" % dict(\n vv=vv, ndim=ndim, zz=zz, fail=fail\n )\n\n return code\n\n def c_code_cache_version(self):\n return (2,)\n\n def infer_shape(self, node, input_shapes):\n return [node.inputs[1:]]\n\n def connection_pattern(self, node):\n\n rval = [[True]]\n\n for ipt in node.inputs[1:]:\n rval.append([False])\n\n return rval\n\n def grad(self, inputs, grads):\n x = inputs[0]\n gz = grads[0]\n n_axes_to_sum = gz.ndim - x.ndim\n # The number of dimensions added\n axis = list(range(n_axes_to_sum))\n # The broadcasted dimensions\n axis_broadcasted = []\n axis_kept = []\n for i, (ib, gb) in enumerate(\n zip(\n inputs[0].broadcastable,\n # We need the dimensions corresponding to x\n grads[0].broadcastable[-inputs[0].ndim :],\n )\n ):\n if ib and not gb:\n axis_broadcasted.append(i + n_axes_to_sum)\n else:\n axis_kept.append(i)\n gx = gz.sum(axis=axis + axis_broadcasted)\n if axis_broadcasted:\n new_order = [\"x\"] * x.ndim\n for idx, axis in enumerate(axis_kept):\n new_order[axis] = idx\n gx = gx.dimshuffle(new_order)\n # Dimshuffle to add back the broadcasted dims\n # The *elements* of the output are not connected to\n # the inputs that specify the shape. If you grow the\n # shape by epsilon, the existing elements do not\n # change.\n return [gx] + [DisconnectedType()() for i in inputs[1:]]\n\n def __call__(self, val, *shapes, **kwargs):\n \"\"\"\n If the alloc would be useless, this function returns val.\n\n If this function is called outside of a graph optimization context\n (for instance, it is manually called by a user building a graph),\n then we always return an Alloc node, to allow for DebugMode to check\n for size mismatches.\n\n If you always want an Alloc node, call make_node.\n\n \"\"\"\n ret = super().__call__(val, *shapes, **kwargs)\n try:\n # It makes optimization difficult when useless allocs are thrown\n # into the graph at every stage of optimization. This little logic\n # tries to help at least in some cases.\n if hasattr(val, \"fgraph\") and (val.type == ret.type):\n return val\n except AttributeError:\n pass\n return ret\n\n def R_op(self, inputs, eval_points):\n if eval_points[0] is None:\n return [None]\n return self(eval_points[0], *inputs[1:], **dict(return_list=True))\n\n def do_constant_folding(self, node):\n if not getattr(node.outputs[0], \"clients\", []):\n # If there are no clients then there is no point doing constant\n # folding.\n return False\n for client in node.outputs[0].clients:\n if client[0] == \"output\":\n # If the output is a constant, it will have to be deepcopied\n # each time the function is called. So we do not fold.\n return False\n elif (\n # The following ops work inplace of their input id 0.\n client[1] == 0\n and isinstance(\n client[0].op,\n (\n # Ops that will work inplace on the Alloc. So if they\n # get constant_folded, they would copy the\n # constant and this is less efficients.\n # Not doing the constant folding could also lower\n # the peak memory usage, as we the \"constant\" won't\n # always exists.\n theano.tensor.subtensor.IncSubtensor,\n theano.tensor.subtensor.AdvancedIncSubtensor1,\n theano.tensor.subtensor.AdvancedIncSubtensor,\n theano.tensor.blas.Gemv,\n theano.tensor.blas_c.CGemv,\n theano.tensor.blas.Ger,\n theano.tensor.blas_c.CGer,\n theano.tensor.blas_scipy.ScipyGer,\n ),\n )\n ):\n return False\n # If the clients is a transfer to the GPU, we don't want to\n # fold. We let the Alloc being moved to the GPU, then we\n # let the GPU algo decide if it need to fold it or not.\n elif client[0].op.__class__.__name__.lower().startswith(\"gpu\"):\n return False\n return True\n\n\nalloc = Alloc()\npprint.assign(alloc, printing.FunctionPrinter(\"alloc\"))\n\n\ndef transfer(var, target):\n \"\"\"\n Return a version of `var` transferred to `target`.\n\n `cpu` mean a TensorType (on the CPU). Other types may define\n additional targets.\n\n Parameters\n ----------\n var : variable\n A theano variable\n target : str\n The target of the transfer\n \"\"\"\n if target == \"cpu\":\n return as_tensor_variable(var)\n else:\n for trans in transfer._others:\n res = trans(var, target)\n if res is not None:\n return res\n raise ValueError(f\"Can't transfer to target {target}\")\n\n\ntransfer._others = []\n\n\ndef register_transfer(fn):\n \"\"\"\n Register a transfer function for alternative targets.\n\n Parameters\n ----------\n fn : callable\n \"\"\"\n transfer._others.append(fn)\n\n\n\"\"\"Create a duplicate of `a` (with duplicated storage)\"\"\"\ntensor_copy = elemwise.Elemwise(scal.identity)\npprint.assign(tensor_copy, printing.IgnorePrinter())\n\n\n@constructor\ndef sum(input, axis=None, dtype=None, keepdims=False, acc_dtype=None):\n \"\"\"\n Computes the sum along the given axis(es) of a tensor `input`.\n\n When axis is None (the default value), the sum is performed\n over the flattened tensor.\n\n For full documentation see ``tensor.elemwise.Sum``.\n In particular please pay attention to the important warning when using\n a custom acc_dtype.\n\n Parameters\n ----------\n keepdims: bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n \"\"\"\n\n out = elemwise.Sum(axis=axis, dtype=dtype, acc_dtype=acc_dtype)(input)\n\n if keepdims:\n out = makeKeepDims(input, out, axis)\n return out\n\n\npprint.assign(Sum(), printing.FunctionPrinter(\"sum\"))\n\n\n@constructor\ndef prod(\n input,\n axis=None,\n dtype=None,\n keepdims=False,\n acc_dtype=None,\n no_zeros_in_input=False,\n):\n \"\"\"\n Computes the product along the given axis(es) of a tensor `input`.\n\n When axis is None (the default value), the product is performed\n over the flattened tensor.\n\n For full documentation see ``tensor.elemwise.Prod``.\n\n Parameters\n ----------\n keepdims: bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n \"\"\"\n\n out = elemwise.Prod(\n axis, dtype=dtype, acc_dtype=acc_dtype, no_zeros_in_input=no_zeros_in_input\n )(input)\n\n if keepdims:\n out = makeKeepDims(input, out, axis)\n return out\n\n\nclass Mean(elemwise.CAReduce):\n def __init__(self, axis=None):\n super().__init__(scal.add, axis)\n assert self.axis is None or len(self.axis) == 1\n\n def __str__(self):\n if self.axis is not None:\n return \"Mean{%s}\" % (\", \".join(str(x) for x in self.axis))\n else:\n return \"Mean\"\n\n def _output_dtype(self, idtype):\n # we want to protect against overflow\n return \"float64\"\n\n def perform(self, node, inp, out):\n (input,) = inp\n (output,) = out\n if self.axis is None:\n axis = None\n else:\n axis = self.axis[0]\n # numpy.asarray is needed as otherwise we can end up with a\n # numpy scalar.\n output[0] = np.asarray(np.mean(input, dtype=\"float64\", axis=axis))\n\n def c_code(self, node, name, inames, onames, sub):\n if self.axis is not None:\n return super(Op, self).c_code(node, name, inames, onames, sub)\n ret = super().c_code(node, name, inames, onames, sub)\n # TODO: c_code perform support only axis is None\n return (\n ret\n + f\"\"\"\n *((double *)PyArray_DATA({onames[0]})) /= PyArray_SIZE({inames[0]});\n \"\"\"\n )\n\n\n# TODO: implement the grad. When done and tested, you can make this the default\n# version.\n# def grad(self, (x,), (gout,)):\n# import pdb;pdb.set_trace()\n# return grad(mean(x, self.axis, op=False),[x])\n\n\n@constructor\ndef mean(input, axis=None, dtype=None, op=False, keepdims=False, acc_dtype=None):\n \"\"\"\n Computes the mean value along the given axis(es) of a tensor `input`.\n\n Parameters\n ----------\n axis : None or int or (list of int) (see `Sum`)\n Compute the mean along this axis of the tensor.\n None means all axes (like numpy).\n dtype: None or string\n Dtype to cast the result of the inner summation into.\n For instance, by default, a sum of a float32 tensor will be\n done in float64 (acc_dtype would be float64 by default),\n but that result will be casted back in float32.\n keepdims: bool\n If this is set to True, the axes which are reduced are\n left in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original tensor.\n acc_dtype: None or string\n Dtype to use for the inner summation. This will not\n necessarily be the dtype of the output (in particular\n if it is a discrete (int/uint) dtype, the output will\n be in a float type). If None, then we use the same rules as `sum()`.\n\n Notes\n -----\n For gpu, if you specify dtype=float32, everything will be done on the gpu.\n\n \"\"\"\n input = as_tensor_variable(input)\n if op:\n if dtype not in (None, \"float64\"):\n raise NotImplementedError(\n \"The Mean op does not support the dtype argument, \"\n \"and will always use float64. If you want to specify \"\n \"the dtype, call tensor.mean(..., op=False).\",\n dtype,\n )\n if acc_dtype not in (None, \"float64\"):\n raise NotImplementedError(\n \"The Mean op does not support the acc_dtype argument, \"\n \"and will always use float64. If you want to specify \"\n \"acc_dtype, call tensor.mean(..., op=False).\",\n dtype,\n )\n out = Mean(axis)(input)\n if keepdims:\n out = makeKeepDims(input, out, axis)\n return out\n\n if dtype is not None:\n # The summation will be done with the specified dtype.\n # sum() will complain if it is not suitable.\n sum_dtype = dtype\n else:\n sum_dtype = None\n # float16 overflows on the cast way too often\n if input.dtype == \"float16\":\n sum_dtype = \"float32\"\n\n s = sum(input, axis=axis, dtype=sum_dtype, keepdims=keepdims, acc_dtype=acc_dtype)\n shp = shape(input)\n\n # Cast shp into a float type\n # TODO Once we have a consistent casting policy, we could simply\n # use true_div.\n if s.dtype in (\"float16\", \"float32\", \"complex64\"):\n shp = cast(shp, \"float32\")\n else:\n shp = cast(shp, \"float64\")\n\n if axis is None:\n axis = list(range(input.ndim))\n elif isinstance(axis, (int, np.integer)):\n axis = [axis]\n elif isinstance(axis, np.ndarray) and axis.ndim == 0:\n axis = [int(axis)]\n else:\n axis = [int(a) for a in axis]\n\n # This sequential division will possibly be optimized by Theano:\n for i in axis:\n s = true_div(s, shp[i])\n\n # This can happen when axis is an empty list/tuple\n if s.dtype != shp.dtype and s.dtype in discrete_dtypes:\n s = cast(s, shp.dtype)\n\n if dtype == \"float16\" or (dtype is None and input.dtype == \"float16\"):\n s = cast(s, \"float16\")\n s.name = \"mean\"\n return s\n\n\n@constructor\ndef var(input, axis=None, ddof=0, keepdims=False, corrected=False):\n \"\"\"\n Computes the variance along the given axis(es) of a tensor `input`.\n\n Parameters\n ----------\n axis: None or int or (list of int) (see `Sum`)\n Compute the variance along this axis of the tensor.\n None means all axes (like numpy).\n ddof: Degrees of freedom; 0 would compute the ML estimate, 1 would compute\n the unbiased estimate.\n keepdims : bool\n If this is set to True, the axes which are reduced are\n left in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original tensor.\n corrected : bool\n If this is set to True, the 'corrected_two_pass' algorithm is\n used to compute the variance.\n Refer : http://www.cs.yale.edu/publications/techreports/tr222.pdf\n\n Notes\n -----\n Default uses the two-pass algorithm (reference below).\n https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm\n Also supports 'corrected_two_pass' algorithm (using the 'corrected' flag)\n which is numerically more stable. There exist other implementations that\n offer better stability, but probably slower.\n\n \"\"\"\n\n if isinstance(ddof, (bool)):\n raise ValueError(\n \"Parameter keepdims is now at index 3: (input, \\\n axis=None, ddof=0, keepdims=False, corrected=False)\"\n )\n\n input_ndim = input.type.ndim\n if axis is None:\n axis = list(range(input_ndim))\n elif isinstance(axis, (int, np.integer)):\n axis = [axis]\n elif isinstance(axis, np.ndarray) and axis.ndim == 0:\n axis = [int(axis)]\n else:\n axis = [int(a) for a in axis]\n\n # compute the axis-wise mean\n mean_input = mean(input, axis, keepdims=True)\n\n # center the input\n centered_input = input - mean_input\n\n # return the mean sqr\n two = constant(2, dtype=centered_input.dtype)\n if ddof == 0:\n v = mean((centered_input ** two), axis, keepdims=keepdims)\n else:\n shp = shape(input) - ddof\n v = sum((centered_input ** two), axis=axis, keepdims=keepdims)\n for i in axis:\n v = true_div(v, shp[i])\n\n # use 'corrected_two_pass' algorithm\n if corrected:\n if ddof == 0:\n error = mean(centered_input, axis, keepdims=keepdims) ** 2\n else:\n shp = shape(input) - ddof\n shp_inp = shape(input)\n error = sum(centered_input, axis=axis, keepdims=keepdims) ** 2\n for i in axis:\n error = true_div(error, shp[i] * shp_inp[i])\n v = v - error\n\n v.name = \"var\"\n return v\n\n\n@constructor\ndef std(input, axis=None, ddof=0, keepdims=False, corrected=False):\n \"\"\"\n Computes the standard deviation along the given axis(es) of a tensor `input`.\n\n Parameters\n ----------\n axis: None or int or (list of int) (see `Sum`)\n Compute the variance along this axis of the tensor.\n None means all axes (like numpy).\n ddof: Degrees of freedom; 0 would compute the ML estimate, 1 would compute\n the unbiased estimate.\n keepdims : bool\n If this is set to True, the axes which are reduced are\n left in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original tensor.\n corrected : bool\n If this is set to True, the 'corrected_two_pass' algorithm is\n used to compute the variance.\n Refer : http://www.cs.yale.edu/publications/techreports/tr222.pdf\n\n Notes\n -----\n It calls 'var()' and 'var()' uses the two-pass algorithm (reference below).\n https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm\n Function 'var()' also supports 'corrected_two_pass' algorithm (using the\n 'corrected' flag) which is numerically more stable. There exist other\n implementations that offer better stability, but probably slower.\n\n \"\"\"\n\n if isinstance(ddof, (bool)):\n raise ValueError(\n \"Parameter keepdims is now at index 3: (input, \\\n axis=None, ddof=0, keepdims=False, corrected=False)\"\n )\n\n ret = sqrt(\n var(input=input, axis=axis, ddof=ddof, keepdims=keepdims, corrected=corrected)\n )\n ret.name = \"std\"\n return ret\n\n\nclass Default(gof.Op):\n \"\"\"\n Takes an input x and a default value.\n\n If the input is not None, a reference to it is returned.\n If the input is None, a copy of the default value is returned instead.\n The input and the default must have exactly the same type.\n\n \"\"\"\n\n view_map = {0: [0]}\n __props__ = ()\n\n def make_node(self, x, default):\n x, default = as_tensor_variable(x), as_tensor_variable(default)\n if x.type != default.type:\n raise TypeError(\"Both default() arguments must have same type\", x, default)\n return gof.Apply(self, [x, default], [default.type()])\n\n def perform(self, node, inp, out_):\n x, default = inp\n (out,) = out_\n if x is None:\n # why copy? Theano can't yet understand out[0] being a view of\n # either x or y, so we can be a view of x, but only a copy of y.\n out[0] = default.copy()\n else:\n out[0] = x\n\n\ndefault = Default()\nsetdefault = default # legacy\n\n\n##########################\n# Arithmetics\n##########################\n@_scal_elemwise\ndef maximum(x, y):\n \"\"\"elemwise maximum. See max for the maximum in one tensor\"\"\"\n # see decorator for function body\n\n\n@_scal_elemwise\ndef minimum(x, y):\n \"\"\"elemwise minimum. See min for the minimum in one tensor\"\"\"\n # see decorator for function body\n\n\ndef div_proxy(x, y):\n \"\"\"Proxy for either true_div or int_div, depending on types of x, y.\"\"\"\n f = scal.int_or_true_div(\n as_tensor_variable(x).dtype in discrete_dtypes,\n as_tensor_variable(y).dtype in discrete_dtypes,\n )\n if f is scal.int_div:\n return int_div(x, y)\n else:\n return true_div(x, y)\n\n\ndef divmod(x, y):\n \"\"\"elementvise divmod, using floor_div and mod_check\"\"\"\n return floor_div(x, y), mod_check(x, y)\n\n\n@_scal_elemwise\ndef add(a, *other_terms):\n \"\"\"elementwise addition\"\"\"\n # see decorator for function body\n\n\n@_scal_elemwise\ndef sub(a, b):\n \"\"\"elementwise subtraction\"\"\"\n # see decorator for function body\n\n\n@_scal_elemwise\ndef mul(a, *other_terms):\n \"\"\"elementwise multiplication\"\"\"\n # see decorator for function body\n\n\n@_scal_elemwise\ndef true_div(a, b):\n \"\"\"elementwise [true] division (inverse of multiplication)\"\"\"\n # see decorator for function body\n\n\n@_scal_elemwise\ndef int_div(a, b):\n \"\"\"elementwise [floor] division (inverse of multiplication)\"\"\"\n # see decorator for function body\n\n\n# floor_div and int_div are the same thing\nfloor_div = int_div\n\n\ndef ceil_intdiv(a, b):\n \"\"\"\n Safely compute ceil(float_division(a, b)).\n\n Works for all dtypes, but mostly useful when a and b are int.\n\n \"\"\"\n # If a and b are int with not many significant bits, we could\n # cast them to float to avoid doing the modulo. We do not know if this\n # is faster or not. But this is not safe for int64 as the cast will\n # lose precision.\n # e.g.: cast(cast(a, scalar.upcast(a, 'float32')) / b, scal.upcast(a, b))\n\n # We cast for the case when a and b are uint*. Otherwise neq will\n # force their upcast to int.\n div = int_div(a, b)\n ret = cast(neq(a % b, 0), div.dtype) + div\n assert ret.dtype == scal.upcast(div.owner.inputs[0], div.owner.inputs[1])\n return ret\n\n\ndef mod_check(x, y):\n \"\"\"Make sure we do not try to use complex numbers.\"\"\"\n if (\n as_tensor_variable(x).dtype in complex_dtypes\n or as_tensor_variable(y).dtype in complex_dtypes\n ):\n # Currently forbidden.\n raise scal.Mod.complex_error\n else:\n return mod(x, y)\n\n\n@_scal_elemwise\ndef mod(a, b):\n \"\"\"elementwise modulo\"\"\"\n # see decorator for function body\n\n\n@_scal_elemwise\ndef pow(a, b):\n \"\"\"elementwise power\"\"\"\n # see decorator for function body\n\n\n@_scal_elemwise\ndef clip(x, min, max):\n \"\"\"\n Clip x to be between min and max.\n\n Notes\n -----\n When `x` is equal to the boundaries, the output is considered\n to be `x`, so at these points, the gradient of the cost wrt the output\n will be propagated to `x`, not to `min` nor `max`. In other words,\n on these points, the gradient wrt `x` will be equal to the gradient wrt\n the output, and the gradient wrt `min` and `max` will be zero.\n\n \"\"\"\n # see decorator for function body\n # for grep: clamp, bound\n\n\npprint.assign(add, printing.OperatorPrinter(\"+\", -2, \"either\"))\npprint.assign(mul, printing.OperatorPrinter(\"*\", -1, \"either\"))\npprint.assign(sub, printing.OperatorPrinter(\"-\", -2, \"left\"))\npprint.assign(neg, printing.OperatorPrinter(\"-\", 0, \"either\"))\npprint.assign(true_div, printing.OperatorPrinter(\"/\", -1, \"left\"))\npprint.assign(int_div, printing.OperatorPrinter(\"//\", -1, \"left\"))\npprint.assign(pow, printing.OperatorPrinter(\"**\", 1, \"right\"))\n\n\n##########################\n# View Operations\n##########################\n\n\ndef extract_constant(x, elemwise=True, only_process_constants=False):\n \"\"\"\n This function is basically a call to tensor.get_scalar_constant_value.\n\n The main difference is the behaviour in case of failure. While\n get_scalar_constant_value raises an TypeError, this function returns x,\n as a tensor if possible. If x is a ScalarVariable from a\n scalar_from_tensor, we remove the conversion. If x is just a\n ScalarVariable, we convert it to a tensor with tensor_from_scalar.\n\n \"\"\"\n try:\n x = get_scalar_constant_value(x, elemwise, only_process_constants)\n except NotScalarConstantError:\n pass\n if isinstance(x, scal.ScalarVariable) or isinstance(\n x, scal.sharedvar.ScalarSharedVariable\n ):\n if x.owner and isinstance(x.owner.op, ScalarFromTensor):\n x = x.owner.inputs[0]\n else:\n x = tensor_from_scalar(x)\n return x\n\n\ndef transpose(x, axes=None):\n \"\"\"\n Reorder the dimensions of x. (Default: reverse them)\n\n This is a macro around dimshuffle that matches the numpy.transpose function.\n\n \"\"\"\n if axes is None:\n axes = list(range((x.ndim - 1), -1, -1))\n ret = DimShuffle(x.broadcastable, axes)(x)\n if x.name and axes == list(range((x.ndim - 1), -1, -1)):\n ret.name = x.name + \".T\"\n return ret\n\n\ndef batched_dot(a, b):\n \"\"\"\n Compute the batched dot product of two variables:\n\n batched_dot(a, b)[i] = dot(a[i], b[i])\n\n Note that this batched_dot function does one of three things, in the\n following sequence:\n\n 1. If either a or b is a vector, it returns the batched elementwise\n product without calling the Theano BatchedDot op.\n\n 2. If both a and b have either 2 or 3 dimensions, it calls Theano's\n BatchedDot op on a and b.\n\n 3. If either a or b has more than 3 dimensions, it calls Theano's\n batched_tensordot function with appropriate axes. The\n batched_tensordot function expresses high-dimensional batched\n dot products in terms of batched matrix-matrix dot products, so\n it may be possible to futherize optimize for performance.\n \"\"\"\n a, b = as_tensor_variable(a), as_tensor_variable(b)\n\n if a.ndim == 0:\n raise TypeError(\"a must have at least one (batch) axis\")\n elif b.ndim == 0:\n raise TypeError(\"b must have at least one (batch) axis\")\n elif a.ndim == 1:\n return a.dimshuffle(*([0] + [\"x\"] * (b.ndim - 1))) * b\n elif b.ndim == 1:\n return a * b.dimshuffle(*([0] + [\"x\"] * (a.ndim - 1)))\n elif a.ndim > 3 or b.ndim > 3:\n return batched_tensordot(a, b, [[a.ndim - 1], [np.maximum(1, b.ndim - 2)]])\n else:\n # avoid circular import\n return theano.tensor.blas.BatchedDot()(a, b)\n\n\ndef batched_tensordot(x, y, axes=2):\n \"\"\"\n Compute a batched tensordot product.\n\n A hybrid of batched_dot and tensordot, this function computes the\n tensordot product between the two tensors, by iterating over the\n first dimension to perform a sequence of tensordots.\n\n Parameters\n ----------\n x : tensor\n A Tensor with sizes e.g.: for 3D (dim1, dim3, dim2)\n y : tensor\n A Tensor with sizes e.g.: for 3D (dim1, dim2, dim4)\n axes: int or array-like of length 2\n If an integer, the number of axes to sum over.\n If an array, it must have two array elements containing the axes to sum\n over in each tensor.\n\n If an integer i, it is converted to an array containing\n the last i dimensions of the first tensor and the first\n i dimensions of the second tensor (excluding the first\n (batch) dimension):\n axes = [list(range(a.ndim - i, b.ndim)), list(range(1,i+1))]\n\n If an array, its two elements must contain compatible axes\n of the two tensors. For example, [[1, 2], [2, 4]] means sum\n over the 2nd and 3rd axes of a and the 3rd and 5th axes of b.\n (Remember axes are zero-indexed!) The 2nd axis of a and the\n 3rd axis of b must have the same shape; the same is true for\n the 3rd axis of a and the 5th axis of b.\n\n Like tensordot, this function uses a series of dimshuffles and\n reshapes to reduce the tensor dot product to a matrix or vector\n dot product. Finally, it calls batched_dot to compute the result.\n \"\"\"\n return _tensordot_as_dot(x, y, axes, dot=batched_dot, batched=True)\n\n\ndef split(x, splits_size, n_splits, axis=0):\n the_split = Split(n_splits)\n return the_split(x, axis, splits_size)\n\n\nclass Split(Op):\n \"\"\"Partition a `TensorVariable` along some axis.\n\n Examples\n --------\n >>> x = vector()\n >>> splits = lvector()\n You have to declare right away how many split_points there will be.\n >>> ra, rb, rc = split(x, splits, n_splits = 3, axis = 0)\n >>> f = function([x, splits], [ra, rb, rc])\n >>> a, b, c = f([0,1,2,3,4,5], [3, 2, 1])\n a == [0,1,2]\n b == [3, 4]\n c == [5]\n\n \"\"\"\n\n len_splits = None\n \"\"\"A Split instance will have this many outputs, and require that\n the splits argument to `perform` have exactly this many elements.\n \"\"\"\n __props__ = (\"len_splits\",)\n\n def __init__(self, len_splits):\n self.len_splits = int(len_splits)\n\n def __str__(self):\n return \"{self.__class__.__name__ }{{{self.len_splits}}}\"\n\n def make_node(self, x, axis, splits):\n \"\"\"WRITEME\"\"\"\n x = as_tensor_variable(x)\n axis = as_tensor_variable(axis)\n splits = as_tensor_variable(splits)\n\n if splits.type not in int_vector_types:\n raise TypeError(\"splits must have type tensor.lvector\", splits.type)\n if axis.type not in int_types:\n raise TypeError(\"axis must have type lscalar\", axis.type)\n\n # # The following lines are necessary if we allow splits of zero\n # if isinstance(axis, gof.Constant):\n # x = unbroadcast(x, int(axis.data))\n # else:\n # x = unbroadcast(x, *range(x.type.ndim))\n\n inputs = [x, axis, splits]\n outputs = [x.type() for i in range(self.len_splits)]\n\n return Apply(self, inputs, outputs)\n\n def perform(self, node, inputs, outputs):\n \"\"\"WRITEME\"\"\"\n x, axis, splits = inputs\n\n try:\n len_along_axis = x.shape[axis]\n except Exception:\n raise ValueError(\n f\"Split.perform() with axis=({axis}) is invalid\"\n f\" for x.shape==({x.shape})\"\n )\n if len(splits) != self.len_splits:\n raise ValueError(\n \"In Split.perform(), len(splits) != len_splits.\",\n (len(splits), self.len_splits),\n )\n\n if np.sum(splits) != len_along_axis:\n raise ValueError(\n f\"The splits sum to {np.sum(splits)}, expected {len_along_axis}\"\n )\n if builtins.any([nb < 0 for nb in splits]):\n raise ValueError(\n \"Split: you tried to make an ndarray with a \"\n \"negative number of elements.\"\n )\n\n # Checking is done, let's roll the splitting algorithm!\n # Basically we step along the given axis of x, extracting\n # subtensors of size splits[i] as we go along.\n\n general_key = [slice(None, None, None) for s in x.shape]\n lower_idx = 0\n for i in range(self.len_splits):\n upper_idx = lower_idx + splits[i]\n general_key[axis] = slice(lower_idx, upper_idx, None)\n outputs[i][0] = x.__getitem__(tuple(general_key)).copy()\n lower_idx = upper_idx\n\n def infer_shape(self, node, in_shapes):\n axis = node.inputs[1]\n splits = node.inputs[2]\n shp_x, shp_axis, shp_splits = in_shapes\n out_shapes = []\n for i in range(self.len_splits):\n temp = as_tensor_variable(shp_x)\n temp = theano.tensor.subtensor.set_subtensor(temp[axis], splits[i])\n temp = [temp[i] for i in range(len(shp_x))]\n out_shapes.append(temp)\n return out_shapes\n\n def grad(self, inputs, g_outputs):\n \"\"\"Join the gradients along the axis that was used to split x.\"\"\"\n x, axis, n = inputs\n outputs = self(*inputs, **dict(return_list=True))\n # If all the output gradients are disconnected, then so are the inputs\n if builtins.all([isinstance(g.type, DisconnectedType) for g in g_outputs]):\n return [\n DisconnectedType()(),\n grad_undefined(self, 1, axis),\n grad_undefined(self, 2, n),\n ]\n # Else, we have to make them zeros before joining them\n new_g_outputs = []\n for o, g in zip(outputs, g_outputs):\n if isinstance(g.type, DisconnectedType):\n new_g_outputs.append(o.zeros_like())\n else:\n new_g_outputs.append(g)\n\n return [\n join(axis, *new_g_outputs),\n grad_undefined(self, 1, axis),\n grad_undefined(self, 2, n),\n ]\n\n def R_op(self, inputs, eval_points):\n if eval_points[0] is None:\n return [None for i in self.len_splits]\n return self.make_node(eval_points[0], *inputs[1:]).outputs\n\n def c_code_cache_version(self):\n return (2,)\n\n def c_support_code(self):\n return \"\"\"\n /* Return 1 if output has the correct shape. */\n int split_output_shape_is_correct (\n PyArrayObject* output, PyArrayObject* array_to_split, int axis_to_split, npy_intp split_size\n ) {\n return\n PyArray_NDIM(output) == PyArray_NDIM(array_to_split)\n && memcmp(\n PyArray_DIMS(output),\n PyArray_DIMS(array_to_split),\n axis_to_split * sizeof(npy_intp)\n ) == 0\n && memcmp(\n PyArray_DIMS(output) + axis_to_split + 1,\n PyArray_DIMS(array_to_split) + axis_to_split + 1,\n (PyArray_NDIM(array_to_split) - axis_to_split - 1) * sizeof(npy_intp)\n ) == 0\n && split_size == PyArray_DIM(output, axis_to_split);\n }\n \"\"\"\n\n def c_code(self, node, name, inputs, outputs, sub):\n if self.len_splits == 0:\n # There are no outputs, then nothing to do.\n return \"\"\n\n # outputs_pointers lists the addresses of the pointers to the outputs.\n outputs_pointers = \"&\" + (\", &\".join(outputs))\n x, axis, splits = inputs\n fail = sub[\"fail\"]\n x_typenum = np.dtype(node.inputs[0].dtype).num\n x_itemsize = np.dtype(node.inputs[0].dtype).itemsize\n axis_dtype = node.inputs[1].type.dtype_specs()[1]\n splits_dtype = node.inputs[2].type.dtype_specs()[1]\n expected_splits_count = self.len_splits\n\n return (\n \"\"\"\n int ndim = PyArray_NDIM(%(x)s);\n int axis = (int)(*(%(axis_dtype)s*)PyArray_GETPTR1(%(axis)s, 0));\n int splits_count = PyArray_DIM(%(splits)s, 0);\n npy_intp len_along_axis, sum_of_splits = 0, current_split_length = 0, current_split_start = 0;\n npy_intp* split_dims = NULL;\n PyObject* split_view = NULL;\n npy_intp data_offset;\n int i;\n PyArrayObject** outputs[] = {%(outputs_pointers)s};\n\n /* Check inputs. */\n\n if (splits_count != %(expected_splits_count)s) {\n PyErr_Format(PyExc_ValueError,\n \"Split: splits count (%%d) != expected count (%%d).\", splits_count, %(expected_splits_count)s);\n %(fail)s\n }\n\n if (axis < 0) {\n axis += ndim;\n }\n if (axis < 0 || axis >= ndim) {\n PyErr_Format(PyExc_IndexError, \"Split: invalid axis %%d for a %%d-D array.\", axis, ndim);\n %(fail)s\n }\n len_along_axis = PyArray_DIM(%(x)s, axis);\n\n for (i = 0; i < splits_count; ++i) {\n current_split_length = (npy_intp)(*(%(splits_dtype)s*)PyArray_GETPTR1(%(splits)s, i));\n if (current_split_length < 0) {\n PyErr_Format(PyExc_ValueError,\n \"Split: you try to take a negative number (%%ld) of elements.\", current_split_length);\n %(fail)s\n }\n sum_of_splits += current_split_length;\n }\n if (sum_of_splits != len_along_axis) {\n PyErr_Format(PyExc_ValueError, \"Split: the splits sums to %%ld, expected %%ld.\", sum_of_splits, len_along_axis);\n %(fail)s\n }\n\n /* Check outputs. */\n\n split_dims = (npy_intp*) malloc(ndim * sizeof(npy_intp));\n if (split_dims == NULL) {\n PyErr_NoMemory();\n %(fail)s\n }\n\n memcpy(split_dims, PyArray_DIMS(%(x)s), ndim * sizeof(npy_intp));\n\n for (i = 0; i < splits_count; ++i) {\n PyArrayObject** output = outputs[i];\n current_split_length = (npy_intp) (* (%(splits_dtype)s*) PyArray_GETPTR1(%(splits)s, i));\n if (*output == NULL || !split_output_shape_is_correct(*output, %(x)s, axis, current_split_length)) {\n Py_XDECREF(*output);\n split_dims[axis] = current_split_length;\n *output = (PyArrayObject*)PyArray_EMPTY(ndim, split_dims, %(x_typenum)s, PyArray_IS_F_CONTIGUOUS(%(x)s));\n if (outputs == NULL) {\n PyErr_SetString(PyExc_RuntimeError, \"Split: unable to allocate an output.\");\n free(split_dims);\n %(fail)s\n }\n }\n }\n\n /* Compute split. */\n\n for (i = 0; i < splits_count; ++i) {\n current_split_length = (npy_intp) (* (%(splits_dtype)s*) PyArray_GETPTR1(%(splits)s, i));\n data_offset = PyArray_STRIDE(%(x)s, axis) * current_split_start;\n split_dims[axis] = current_split_length;\n split_view = PyArray_New(&PyArray_Type,\n ndim, split_dims,\n %(x_typenum)s,\n PyArray_STRIDES(%(x)s),\n PyArray_BYTES(%(x)s) + data_offset,\n %(x_itemsize)s,\n PyArray_FLAGS(%(x)s),\n NULL);\n if (split_view == NULL) {\n PyErr_SetString(PyExc_RuntimeError, \"Split: unable to create a view for a split.\");\n free(split_dims);\n %(fail)s\n }\n if (PyArray_CopyInto(*outputs[i], (PyArrayObject*)split_view) != 0) {\n PyErr_SetString(PyExc_RuntimeError, \"Split: unable to copy a split view into the output.\");\n Py_XDECREF(split_view);\n free(split_dims);\n %(fail)s\n }\n Py_XDECREF(split_view);\n current_split_start += current_split_length;\n }\n\n free(split_dims);\n \"\"\"\n % locals()\n )\n\n\ndef addbroadcast(x, *axes):\n \"\"\"\n Make the input broadcastable in the specified axes.\n\n For example, addbroadcast(x, 0) will make the first dimension of\n x broadcastable. When performing the function, if the length of\n x along that dimension is not 1, a ValueError will be raised.\n\n We apply the opt here not to pollute the graph especially during\n the gpu optimization\n\n Parameters\n ----------\n x : tensor_like\n Input theano tensor.\n axis : an int or an iterable object such as list or tuple of int values\n The dimension along which the tensor x should be broadcastable.\n If the length of x along these dimensions is not 1, a ValueError will\n be raised.\n\n Returns\n -------\n tensor\n A theano tensor, which is broadcastable along the specified dimensions.\n\n \"\"\"\n rval = Rebroadcast(*[(axis, True) for axis in axes])(x)\n return theano.tensor.opt.apply_rebroadcast_opt(rval)\n\n\ndef unbroadcast(x, *axes):\n \"\"\"\n Make the input impossible to broadcast in the specified axes.\n\n For example, addbroadcast(x, 0) will make the first dimension\n of x broadcastable. When performing the function, if the length\n of x along that dimension is not 1, a ValueError will be raised.\n\n We apply the opt here not to pollute the graph especially during\n the gpu optimization\n\n Parameters\n ----------\n x : tensor_like\n Input theano tensor.\n axis : an int or an iterable object such as list or tuple of int values\n The dimension along which the tensor x should be unbroadcastable.\n If the length of x along these dimensions is not 1, a ValueError will\n be raised.\n\n Returns\n -------\n tensor\n A theano tensor, which is unbroadcastable along the specified dimensions.\n\n \"\"\"\n rval = Rebroadcast(*[(axis, False) for axis in axes])(x)\n return theano.tensor.opt.apply_rebroadcast_opt(rval)\n\n\ndef patternbroadcast(x, broadcastable):\n \"\"\"\n Make the input adopt a specific broadcasting pattern.\n\n Broadcastable must be iterable. For example,\n patternbroadcast(x, (True, False)) will make the first\n dimension of x broadcastable and the second dimension\n not broadcastable, so x will now be a row.\n\n We apply the opt here not to pollute the graph especially during the gpu\n optimization.\n\n Parameters\n ----------\n x : tensor_like\n Input theano tensor.\n broadcastable : an iterable object such as list or tuple of bool values\n A set of boolean values indicating whether a dimension should be\n broadcastable or not. If the length of x along these dimensions is\n not 1, a ValueError will be raised.\n\n Returns\n -------\n tensor\n A theano tensor, which is unbroadcastable along the specified dimensions.\n\n \"\"\"\n rval = Rebroadcast(*[(i, broadcastable[i]) for i in range(len(broadcastable))])(x)\n return theano.tensor.opt.apply_rebroadcast_opt(rval)\n\n\nclass Join(Op):\n \"\"\"\n Concatenate multiple `TensorVariable`s along some axis.\n\n The axis must be given as first argument. All tensors must have the same\n shape along all dimensions other than this axis.\n Of course, TensorVariable instances do not have a shape, so this error\n cannot be caught until runtime. See `perform()`.\n\n See Also\n --------\n stack : For joins involving scalar values\n\n Examples\n --------\n >>> x, y, z = tensor.matrix(), tensor.matrix(), tensor.matrix()\n >>> u = tensor.vector()\n\n >>> r = join(0, x, y, z)\n >>> c = join(1, x, y, z)\n >>> join(2, x, y, z) # WRONG: the axis has to be an index into the shape\n >>> join(0, x, u) # WRONG: joined tensors must have the same rank\n\n \"\"\"\n\n check_input = False\n __props__ = (\"view\",)\n\n def __init__(self, view=-1):\n self.view = view\n if view != -1:\n # since the first input is always the axis, the tensors\n # start from index 1.\n self.view_map = {0: [1 + view]}\n\n def __str__(self):\n if self.view == -1:\n return self.__class__.__name__\n else:\n return \"{}{{{}}}\".format(\n self.__class__.__name__,\n \", \".join(\n \"{}={!r}\".format(p, getattr(self, p)) for p in self.__props__\n ),\n )\n\n def __setstate__(self, d):\n self.__dict__.update(d)\n if not hasattr(self, \"view\"):\n self.view = -1\n\n def make_node(self, *axis_and_tensors):\n \"\"\"\n Parameters\n ----------\n axis: an Int or integer-valued Variable\n tensors\n A variable number (but not zero) of tensors to\n concatenate along the specified axis. These tensors must have\n the same shape along all dimensions other than this axis.\n\n Returns\n -------\n A symbolic Variable\n It has the same ndim as the input tensors, and the most inclusive\n dtype.\n\n \"\"\"\n axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]\n if not tensors:\n raise ValueError(\"Cannot join an empty list of tensors\")\n as_tensor_variable_args = [as_tensor_variable(x) for x in tensors]\n\n dtypes = [x.type.dtype for x in as_tensor_variable_args]\n out_dtype = scal.upcast(*dtypes)\n\n def output_maker(bcastable):\n return tensor(dtype=out_dtype, broadcastable=bcastable)\n\n return self._make_node_internal(\n axis, tensors, as_tensor_variable_args, output_maker\n )\n\n def _make_node_internal(self, axis, tensors, as_tensor_variable_args, output_maker):\n if not builtins.all(targs.type.ndim for targs in as_tensor_variable_args):\n raise TypeError(\n \"Join cannot handle arguments of dimension 0.\"\n \" For joining scalar values, see @stack\"\n )\n # Handle single-tensor joins immediately.\n if len(as_tensor_variable_args) == 1:\n bcastable = list(as_tensor_variable_args[0].type.broadcastable)\n else:\n # When the axis is fixed, a dimension should be\n # broadcastable if at least one of the inputs is\n # broadcastable on that dimension (see justification below),\n # except for the axis dimension.\n # Initialize bcastable all false, and then fill in some trues with\n # the loops.\n bcastable = [False] * len(as_tensor_variable_args[0].type.broadcastable)\n ndim = len(bcastable)\n # Axis can also be a constant\n if not isinstance(axis, int):\n try:\n # Note : `get_scalar_constant_value` returns a ndarray not\n # an int\n axis = int(get_scalar_constant_value(axis))\n\n except NotScalarConstantError:\n pass\n if isinstance(axis, int):\n # Basically, broadcastable -> length 1, but the\n # converse does not hold. So we permit e.g. T/F/T\n # joins, and if they fail at runtime they fail, but if\n # they don't then it means that the argument where\n # that broadcastable flag was False had length 1 along\n # this dimension, and therefore this dimension should\n # be broadcastable for the output.\n\n if axis < -ndim:\n raise IndexError(\n f\"Join axis {int(axis)} out of bounds [0, {int(ndim)})\"\n )\n if axis < 0:\n axis += ndim\n\n for x in as_tensor_variable_args:\n for current_axis, bflag in enumerate(x.type.broadcastable):\n # Constant negative axis can no longer be negative at\n # this point. It safe to compare this way.\n if current_axis == axis:\n continue\n if bflag:\n bcastable[current_axis] = True\n try:\n bcastable[axis] = False\n except IndexError:\n raise ValueError(\n 'Join argument \"axis\" is out of range'\n \" (given input dimensions)\"\n )\n else:\n # When the axis may vary, no dimension can be guaranteed to be\n # broadcastable.\n bcastable = [False] * len(as_tensor_variable_args[0].type.broadcastable)\n\n if not builtins.all(\n [x.ndim == len(bcastable) for x in as_tensor_variable_args[1:]]\n ):\n raise TypeError(\n \"Join() can only join tensors with the same \" \"number of dimensions.\"\n )\n\n inputs = [as_tensor_variable(axis)] + list(as_tensor_variable_args)\n if inputs[0].type not in int_types:\n raise TypeError(\n \"Axis could not be cast to an integer type\",\n axis,\n inputs[0].type,\n int_types,\n )\n\n outputs = [output_maker(bcastable)]\n\n node = Apply(self, inputs, outputs)\n return node\n\n def perform(self, node, axis_and_tensors, out_):\n (out,) = out_\n view = self.view\n axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]\n # we check these tensors for being empty.\n if (view != -1) and np.all(\n [\n tensor.shape[axis] == 0\n for tensor in tensors[0:view] + tensors[view + 1 :]\n ]\n ):\n out[0] = tensors[view]\n\n else:\n ndim = tensors[0].ndim\n if axis < -ndim:\n raise IndexError(\n f\"Join axis {int(axis)} out of bounds [0, {int(ndim)})\"\n )\n\n out[0] = theano._asarray(\n np.concatenate(tensors, axis=axis), dtype=node.outputs[0].type.dtype\n )\n\n def c_code_cache_version(self):\n return (5,)\n\n def c_code(self, node, name, inputs, outputs, sub):\n axis, tensors = inputs[0], inputs[1:]\n view = self.view\n non_empty_tensor = tensors[view]\n input_1 = tensors[0]\n l = len(tensors)\n (out,) = outputs\n fail = sub[\"fail\"]\n adtype = node.inputs[0].type.dtype_specs()[1]\n copy_to_list = []\n\n for i, inp in enumerate(tensors):\n copy_to_list.append(\n f\"\"\"Py_INCREF({inp});\n PyList_SetItem(list, {i}, (PyObject*){inp});\"\"\"\n )\n\n copy_inputs_to_list = \"\\n\".join(copy_to_list)\n n = len(tensors)\n\n code = (\n \"\"\"\n int axis = ((%(adtype)s *)PyArray_DATA(%(axis)s))[0];\n PyObject* list = PyList_New(%(l)s);\n %(copy_inputs_to_list)s\n int tensors_lens_sum;\n if(%(view)s != -1) {\n tensors_lens_sum = 0;\n\n for(int i=0; i < %(n)s; i++){\n tensors_lens_sum += PyArray_DIM((PyArrayObject *)(PyList_GetItem(list, i)), axis);\n }\n tensors_lens_sum -= PyArray_DIM(%(non_empty_tensor)s, axis);\n }\n if(%(view)s != -1 && tensors_lens_sum == 0) {\n Py_XDECREF(%(out)s);\n Py_INCREF(%(non_empty_tensor)s);\n %(out)s = %(non_empty_tensor)s;\n }else{\n //PyObject* PyArray_Concatenate(PyObject* obj, int axis)\n int ndim = PyArray_NDIM(%(input_1)s);\n if( axis < -ndim ){\n PyErr_Format(PyExc_IndexError,\n \"Join axis %%d out of bounds [0, %%d)\", axis, ndim);\n %(fail)s\n }\n Py_XDECREF(%(out)s);\n %(out)s = (PyArrayObject *)PyArray_Concatenate(list, axis);\n Py_DECREF(list);\n if(!%(out)s){\n %(fail)s\n }\n }\n \"\"\"\n % locals()\n )\n return code\n\n def R_op(self, inputs, eval_points):\n if None in eval_points[1:]:\n return [None]\n return self.make_node(inputs[0], *eval_points[1:]).outputs\n\n def grad(self, axis_and_tensors, grads):\n \"\"\"The gradient wrt a join op is a `Split`, used to partition\n the gradient along the `axis` which was used for joining.\n \"\"\"\n (gz,) = grads\n axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]\n\n rval = [grad_undefined(self, 0, axis)]\n\n dtypes = [as_tensor_variable(x).type.dtype for x in tensors]\n out_dtype = scal.upcast(*dtypes)\n\n if \"float\" in out_dtype or \"complex\" in out_dtype:\n # assume that this is differentiable\n split = Split(len(tensors))\n split_gz = split(gz, axis, stack([shape(x)[axis] for x in tensors]))\n # If there is only one split, it might not be in a list.\n if not isinstance(split_gz, list):\n split_gz = [split_gz]\n # Split.make_node isn't always able to infer the right\n # broadcast. As the grad need to keep the information,\n # read it if needed.\n split_gz = [\n patternbroadcast(g, t.broadcastable) for t, g in zip(tensors, split_gz)\n ]\n rval = rval + split_gz\n else:\n # the output has integer type, so the gradient through it\n # is 0\n rval = rval + [tensor.zeros_like(dtype=config.floatX) for tensor in tensors]\n\n return rval\n\n def infer_shape(self, node, ishapes):\n # ishapes[0] contains the size of the axis on which we join\n # Join op should get at least one input to join\n assert len(ishapes) > 1\n n_dim = len(ishapes[1])\n for shp in ishapes[1:]:\n assert shp is not None\n assert len(shp) == n_dim\n\n # The joining dimension could be negative, but we need it to be\n # in [0, n_dim) in the loop below.\n # An axis < -n_dim or >= ndim would be invalid, but this is\n # not checked here. An Assert op would be a way of addressing that,\n # but it may disrupt optimizations.\n join_dim = switch(ge(node.inputs[0], 0), node.inputs[0], node.inputs[0] + n_dim)\n out_shapes = []\n for dim in range(n_dim):\n # we have to deal with 2 possible cases in here :\n # a) we are dealing with the dimension for which we join\n # (called t_side from true side of the if, where the if\n # compares current dimension with the joining dimension)\n # b) a non joining dimension ( in which maybe a symbolic\n # assertion can be used to make sure all tensors have\n # the same number of elements on this non-joined dimension\n # this is f_side\n # initialize\n t_side = ishapes[1][dim]\n f_side = ishapes[1][dim]\n # loop over tensors and sum for the joining dimension\n for shp in ishapes[2:]:\n t_side = t_side + shp[dim]\n # return the dimensions found\n out_shapes.append(switch(eq(dim, join_dim), t_side, f_side))\n\n return [tuple(out_shapes)]\n\n\njoin_ = Join()\npprint.assign(Join, printing.FunctionPrinter(\"join\"))\n\n\ndef join(axis, *tensors_list):\n \"\"\"\n Convenience function to concatenate `TensorType`s along the given axis.\n\n This function will not add the op in the graph when it is not useful.\n For example, in the case that the list of tensors to be concatenated\n is one, it will just return the tensor.\n\n Parameters\n ----------\n tensors : list of tensors (or list-like)\n A list of tensors to be concatenated along the given axis.\n The shapes of the tensors to be concatenated must be all\n identical, except in the dimension (`axis`) on which they are to\n be joined.\n axis : int (symbolic or literal)\n On which dimension should the tensors be joined? The `axis`\n must be a valid index into the shape of the tensors to be\n concatenated.\n The `axis` parameter may either be an integer or an object that\n can be converted to a scalar using `as_scalar`(`axis`). In the\n former case, the axis is fixed at construction, while in the\n latter it may vary over time depending on the value of the\n `axis` variable.\n \"\"\"\n if len(tensors_list) == 1:\n return tensors_list[0]\n else:\n return join_(axis, *tensors_list)\n\n\ndef roll(x, shift, axis=None):\n \"\"\"\n Convenience function to roll TensorTypes along the given axis.\n\n Syntax copies numpy.roll function.\n\n Parameters\n ----------\n x : tensor_like\n Input tensor.\n shift : int (symbolic or literal)\n The number of places by which elements are shifted.\n axis : int (symbolic or literal), optional\n The axis along which elements are shifted. By default, the array\n is flattened before shifting, after which the original\n shape is restored.\n\n Returns\n -------\n tensor\n Output tensor, with the same shape as ``x``.\n\n \"\"\"\n if axis is None:\n if x.ndim > 1:\n y = x.flatten()\n return roll(y, shift, axis=0).reshape(x.shape)\n else:\n axis = 0\n\n if axis < 0:\n axis += x.ndim\n\n # Shift may be larger than the size of the axis. If so, since the\n # roll operation is cyclic, we can take the shift modulo the size\n # of the axis\n shift = shift % x.shape[axis]\n\n # A slice of all elements in a dimension ':'\n allslice = slice(None)\n # List of slices describing the front half [:, :, shift:, :]\n front_slice = slice(-shift, None)\n front_list = [allslice] * axis + [front_slice] + [allslice] * (x.ndim - axis - 1)\n # List of slices describing the back half [:, :, :shift, :]\n end_slice = slice(0, -shift)\n end_list = [allslice] * axis + [end_slice] + [allslice] * (x.ndim - axis - 1)\n return join(axis, x.__getitem__(tuple(front_list)), x.__getitem__(tuple(end_list)))\n\n\n@constructor\ndef shape_padleft(t, n_ones=1):\n \"\"\"Reshape `t` by left-padding the shape with `n_ones` 1s.\n\n See Also\n --------\n shape_padaxis\n shape_padright\n Dimshuffle\n\n \"\"\"\n _t = as_tensor_variable(t)\n\n pattern = [\"x\"] * n_ones + [i for i in range(_t.type.ndim)]\n return DimShuffle(_t.broadcastable, pattern)(_t)\n\n\n@constructor\ndef shape_padright(t, n_ones=1):\n \"\"\"Reshape `t` by right-padding the shape with `n_ones` 1s.\n\n See Also\n --------\n shape_padaxis\n shape_padleft\n Dimshuffle\n\n \"\"\"\n _t = as_tensor_variable(t)\n\n pattern = [i for i in range(_t.type.ndim)] + [\"x\"] * n_ones\n return DimShuffle(_t.broadcastable, pattern)(_t)\n\n\n@constructor\ndef shape_padaxis(t, axis):\n \"\"\"Reshape `t` by inserting 1 at the dimension `axis`.\n\n Examples\n --------\n >>> tensor = theano.tensor.tensor3()\n >>> theano.tensor.shape_padaxis(tensor, axis=0)\n DimShuffle{x,0,1,2}.0\n >>> theano.tensor.shape_padaxis(tensor, axis=1)\n DimShuffle{0,x,1,2}.0\n >>> theano.tensor.shape_padaxis(tensor, axis=3)\n DimShuffle{0,1,2,x}.0\n >>> theano.tensor.shape_padaxis(tensor, axis=-1)\n DimShuffle{0,1,2,x}.0\n\n See Also\n --------\n shape_padleft\n shape_padright\n Dimshuffle\n\n \"\"\"\n _t = as_tensor_variable(t)\n\n ndim = _t.ndim + 1\n if not -ndim <= axis < ndim:\n msg = \"axis {0} is out of bounds [-{1}, {1})\".format(axis, ndim)\n raise IndexError(msg)\n if axis < 0:\n axis += ndim\n\n pattern = [i for i in range(_t.type.ndim)]\n pattern.insert(axis, \"x\")\n return DimShuffle(_t.broadcastable, pattern)(_t)\n\n\n@constructor\ndef stack(*tensors, **kwargs):\n \"\"\"Stack tensors in sequence on given axis (default is 0).\n\n Take a sequence of tensors and stack them on given axis to make a single\n tensor. The size in dimension `axis` of the result will be equal to the number\n of tensors passed.\n\n Note: The interface stack(*tensors) is deprecated, you should use\n stack(tensors, axis=0) insted.\n\n Parameters\n ----------\n tensors : list or tuple of tensors\n A list of tensors to be stacked.\n axis : int\n The index of the new axis. Default value is 0.\n\n Examples\n --------\n >>> a = theano.tensor.scalar()\n >>> b = theano.tensor.scalar()\n >>> c = theano.tensor.scalar()\n >>> x = theano.tensor.stack([a, b, c])\n >>> x.ndim # x is a vector of length 3.\n 1\n >>> a = theano.tensor.tensor4()\n >>> b = theano.tensor.tensor4()\n >>> c = theano.tensor.tensor4()\n >>> x = theano.tensor.stack([a, b, c])\n >>> x.ndim # x is a 5d tensor.\n 5\n >>> rval = x.eval(dict((t, np.zeros((2, 2, 2, 2))) for t in [a, b, c]))\n >>> rval.shape # 3 tensors are stacked on axis 0\n (3, 2, 2, 2, 2)\n >>> x = theano.tensor.stack([a, b, c], axis=3)\n >>> x.ndim\n 5\n >>> rval = x.eval(dict((t, np.zeros((2, 2, 2, 2))) for t in [a, b, c]))\n >>> rval.shape # 3 tensors are stacked on axis 3\n (2, 2, 2, 3, 2)\n >>> x = theano.tensor.stack([a, b, c], axis=-2)\n >>> x.ndim\n 5\n >>> rval = x.eval(dict((t, np.zeros((2, 2, 2, 2))) for t in [a, b, c]))\n >>> rval.shape # 3 tensors are stacked on axis -2\n (2, 2, 2, 3, 2)\n \"\"\"\n # ---> Remove this when moving to the new interface:\n if not tensors and not kwargs:\n raise Exception(\n \"theano.tensor.stack(tensors, axis) must have at least\" \" one parameter\"\n )\n\n if not kwargs and not isinstance(tensors[0], (list, tuple)):\n warnings.warn(\n \"stack(*tensors) interface is deprecated, use\"\n \" stack(tensors, axis=0) instead.\",\n DeprecationWarning,\n stacklevel=3,\n )\n axis = 0\n elif \"tensors\" in kwargs:\n tensors = kwargs[\"tensors\"]\n if \"axis\" in kwargs:\n axis = kwargs[\"axis\"]\n else:\n axis = 0\n else:\n if len(tensors) == 2:\n axis = tensors[1]\n elif \"axis\" in kwargs:\n axis = kwargs[\"axis\"]\n else:\n axis = 0\n tensors = tensors[0]\n # <--- Until here.\n\n if len(tensors) == 0:\n raise Exception(\n \"tensors is empty. You should at least provide one\"\n \" tensor to theano.tensor.stack(tensors, axis).\"\n )\n\n # If all tensors are scalars of the same type, call make_vector.\n # It makes the graph simpler, by not adding DimShuffles and Rebroadcasts\n\n # This should be an optimization!\n # Doing it here make the graph less canonicalized\n # (more type need to be understood by all optimization)\n # And DebugMode can't detect error in this code as it is not in an\n # optimization.\n # See ticket #660\n if np.all(\n [ # in case there is direct int in tensors.\n isinstance(t, (np.number, float, int, builtins.complex))\n or (\n isinstance(t, Variable)\n and isinstance(t.type, TensorType)\n and t.ndim == 0\n )\n for t in tensors\n ]\n ):\n # in case there is direct int\n tensors = list(map(as_tensor_variable, tensors))\n dtype = scal.upcast(*[i.dtype for i in tensors])\n return theano.tensor.opt.MakeVector(dtype)(*tensors)\n return join(axis, *[shape_padaxis(t, axis) for t in tensors])\n\n\n@constructor\ndef concatenate(tensor_list, axis=0):\n \"\"\"Alias for `join`(axis, *tensor_list).\n\n This function is similar to `join`, but uses the signature of\n numpy's concatenate function.\n\n Raises\n ------\n TypeError\n The tensor_list must be a tuple or list.\n\n \"\"\"\n # Check someone did not make the common mistake to do something like:\n # c = concatenate(x, y)\n # instead of\n # c = concatenate((x, y))\n if not isinstance(tensor_list, (tuple, list)):\n raise TypeError(\n \"The 'tensors' argument must be either a tuple \"\n \"or a list, make sure you did not forget () or [] around \"\n \"arguments of concatenate.\",\n tensor_list,\n )\n return join(axis, *tensor_list)\n\n\ndef get_vector_length(v):\n \"\"\"Return the run-time length of a symbolic vector.\n\n Parameters\n ----------\n v\n A rank-1 TensorType variable.\n\n Raises\n ------\n TypeError\n `v` hasn't the proper type.\n ValueError\n No special case applies, the length is not known.\n In general this is not possible, but for a number of special cases\n the length can be determined at compile / graph-construction time.\n This function implements these special cases.\n\n \"\"\"\n v = as_tensor_variable(v)\n if v.ndim != 1:\n raise TypeError(f\"argument must be symbolic vector, got '{v}'\")\n if v.type.broadcastable[0]:\n return 1\n if isinstance(v, theano.tensor.sharedvar.TensorSharedVariable) and v.type.ndim == 1:\n return len(v.get_value())\n if isinstance(v, gof.Constant) and v.type.ndim == 1:\n return len(v.data)\n if v.owner and isinstance(v.owner.op, theano.tensor.opt.MakeVector):\n return len(v.owner.inputs)\n if v.owner and isinstance(v.owner.op, Shape):\n return v.owner.inputs[0].type.ndim\n # If we take a slice, we know how many elements it will result in\n if (\n v.owner\n and isinstance(v.owner.op, theano.tensor.subtensor.Subtensor)\n and isinstance(v.owner.op.idx_list[0], slice)\n and v.owner.inputs[0].owner\n and isinstance(v.owner.inputs[0].owner.op, theano.compile.ops.Shape)\n ):\n start = extract_constant(\n theano.tensor.subtensor.get_idx_list(v.owner.inputs, v.owner.op.idx_list)[\n 0\n ].start\n )\n stop = extract_constant(\n theano.tensor.subtensor.get_idx_list(v.owner.inputs, v.owner.op.idx_list)[\n 0\n ].stop\n )\n step = extract_constant(\n theano.tensor.subtensor.get_idx_list(v.owner.inputs, v.owner.op.idx_list)[\n 0\n ].step\n )\n\n ndim = v.owner.inputs[0].owner.inputs[0].ndim\n types = (numbers.Integral, np.integer)\n if start is None:\n start = 0\n elif isinstance(start, types) and start < 0:\n start += ndim\n if start < 0:\n start = 0\n if stop is None:\n stop = ndim\n elif isinstance(stop, types):\n if stop > ndim:\n stop = ndim\n elif stop < 0:\n stop += ndim\n if step is None:\n step = 1\n\n if (\n isinstance(stop, types)\n and isinstance(start, types)\n and isinstance(step, types)\n and start >= 0\n and stop >= 0\n and step > 0\n and stop >= start\n ):\n return (stop - start - 1) // step + 1\n if isinstance(v, Variable):\n msg = theano.printing.debugprint(v, file=\"str\")\n else:\n msg = str(v)\n raise ValueError(f\"length not known: {msg}\")\n\n\n@constructor\ndef horizontal_stack(*args):\n \"\"\"\n Horizontally stack two L{TensorType}s.\n\n Stack two L{TensorType}s along the second axis (column wise). These\n L{TensorType}s must have the same shape along all dimensions but the\n second.\n\n \"\"\"\n # Note: 'horizontal_stack' and 'vertical_stack' do not behave exactly like\n # Numpy's hstack and vstack functions. This is intended, because Numpy's\n # functions have potentially confusing/incoherent behavior (try them on 1D\n # arrays). If this is fixed in a future version of Numpy, it may be worth\n # trying to get closer to Numpy's way of doing things. In the meantime,\n # better keep different names to emphasize the implementation divergences.\n assert len(args) >= 2\n for arg in args:\n assert arg.type.ndim == 2\n return concatenate(args, axis=1)\n\n\n@constructor\ndef vertical_stack(*args):\n assert len(args) >= 2\n for arg in args:\n assert arg.type.ndim == 2\n return concatenate(args, axis=0)\n\n\nclass Reshape(Op):\n \"\"\"Perform a reshape operation of the input x to the new shape shp.\n The number of dimensions to which to reshape to (ndim) must be\n known at graph build time.\n \"\"\"\n\n view_map = {0: [0]} # output 0 is potentially aliased to inputs [0]\n _f16_ok = True\n\n check_input = False\n __props__ = (\"ndim\",)\n params_type = ParamsType(ndim=int32)\n # name does not participate because it doesn't affect computations\n\n def __init__(self, ndim, name=None):\n self.ndim = int(ndim)\n if ndim < 0:\n raise ValueError(\"The output dimensions after reshape must be 0 or greater\")\n assert name is None, \"name attribute for Reshape has been deprecated\"\n\n def __str__(self):\n return f\"{self.__class__.__name__}{{{self.ndim}}}\"\n\n def make_node(self, x, shp):\n x = as_tensor_variable(x)\n shp_orig = shp\n shp = as_tensor_variable(shp, ndim=1)\n if not (\n shp.dtype in int_dtypes\n or (isinstance(shp, TensorConstant) and shp.data.size == 0)\n ):\n # It raises an error if shp is not of integer type,\n # except when shp is constant and empty\n # (in this case, shp.dtype does not matter anymore).\n raise TypeError(\"Shape must be integers\", shp, shp.dtype)\n assert shp.ndim == 1\n if isinstance(shp, TensorConstant):\n bcast = [s == 1 for s in shp.data]\n return gof.Apply(self, [x, shp], [tensor(x.type.dtype, bcast)])\n else:\n bcasts = [False] * self.ndim\n shp_list = shp_orig\n if hasattr(shp_orig, \"ndim\") and shp_orig.ndim == 0:\n shp_list = [shp_orig]\n for index in range(self.ndim):\n y = shp_list[index]\n y = as_tensor_variable(y)\n # Try to see if we can infer that y has a constant value of 1.\n # If so, that dimension should be broadcastable.\n try:\n bcasts[index] = (\n hasattr(y, \"get_scalar_constant_value\")\n and y.get_scalar_constant_value() == 1\n )\n except NotScalarConstantError:\n pass\n return gof.Apply(self, [x, shp], [tensor(x.type.dtype, bcasts)])\n\n def perform(self, node, inp, out_, params):\n x, shp = inp\n (out,) = out_\n if len(shp) != self.ndim:\n raise ValueError(\n (\n \"shape argument to Reshape.perform has incorrect\"\n f\" length {len(shp)}\"\n f\", should be {self.ndim}\"\n ),\n shp,\n )\n try:\n out[0] = np.reshape(x, shp)\n except Exception:\n raise ValueError(f\"Cannot reshape input of shape {x.shape} to shape {shp}\")\n\n def connection_pattern(self, node):\n return [[True], [False]]\n\n def grad(self, inp, grads):\n x, shp = inp\n (g_out,) = grads\n return [reshape(g_out, shape(x), ndim=x.ndim), DisconnectedType()()]\n\n def R_op(self, inputs, eval_points):\n if eval_points[0] is None:\n return [None]\n return self(eval_points[0], *inputs[1:], **dict(return_list=True))\n\n def infer_shape(self, node, ishapes):\n # inputs[1] can contain at most one value of '-1', meaning the actual\n # shape of the output will be automatically computed by reshape, so\n # that the total number of elements stays the same.\n # TODO: Maybe put that formula here?\n # It's not trivial, because we would have to check if the product of\n # all the non-minus-one shapes is a divisor of the product of the\n # original shapes.\n\n # The following expression leads to cycles in feature_shape,\n # because it tries to replace the Shape_i node by the switch\n # statement, which depends on Shape_i.\n # return [tuple([switch(eq(node.inputs[1][i], -1),\n # theano.tensor.opt.Shape_i(i)(node.outputs[0]),\n # node.inputs[1][i])\n # for i in range(self.ndim)]\n # )]\n\n # Here, we only simplify if the shape (node.inputs[1]) is a constant,\n # ideally it would suffice to check that it is always non-negative.\n\n # If current variable is a scalar and its dimensionality should\n # change to self.ndim, then use size 1 for all new dimensions.\n if len(ishapes[0]) == 0:\n return [(1,) * self.ndim]\n\n requ = node.inputs[1]\n input_size = mul(*ishapes[0])\n if isinstance(requ, theano.tensor.TensorConstant):\n requ = list(requ.data)\n requ_part = [ele for ele in requ if ele != -1]\n crit = len(requ) - len(requ_part)\n if crit == 1 and len(requ_part) > 0:\n # If there are both 0 and -1 in requ_size, it is impossible\n # to determine a right output, but we can at least prevent\n # a division by 0. We do not want to keep a negative\n # size here as it could lead to further weird errors\n # after other optimizations.\n requ_size = mul(*requ_part)\n missing = input_size // (1 if requ_size == 0 else requ_size)\n for i, ele in enumerate(requ):\n if ele == -1:\n requ[i] = missing\n elif crit == 1: # we reshape to -1\n requ = [input_size] if ishapes[0] else [1]\n elif crit > 1:\n raise ValueError(\n \"shape argument to Reshape.perform\"\n \" must have at most one entry equal to -1\"\n )\n return [requ]\n else:\n requ = [requ[i] for i in range(self.ndim)]\n # since new_dims can have negative value (-1), the\n # multiplication of all values should be negated\n # to give a positive value.\n # To avoid optimization complexity, we avoid checking\n # for the case when there are two or more '-1' values.\n if self.ndim:\n requ_size = -mul(*requ)\n # If there are both 0 and -1 in requ_size, it is impossible\n # to determine a right output, but we can at least prevent\n # a division by 0. We do not want to keep a negative\n # size here as it could lead to further weird errors\n # after other optimizations.\n rest_size = input_size // maximum(requ_size, 1)\n return [\n tuple(\n [\n switch(eq(requ[i], -1), rest_size, requ[i])\n for i in range(self.ndim)\n ]\n )\n ]\n\n def c_code_cache_version(self):\n return (8,)\n\n def c_code(self, node, name, inputs, outputs, sub):\n if isinstance(node.inputs[0], TensorVariable):\n x, shp = inputs\n (z,) = outputs\n sdtype = node.inputs[1].type.dtype_specs()[1]\n fail = sub[\"fail\"]\n params = sub[\"params\"]\n return (\n \"\"\"\n assert (PyArray_NDIM(%(shp)s) == 1);\n npy_intp new_dims[%(params)s->ndim];\n PyArray_Dims newshape;\n newshape.ptr = new_dims;\n newshape.len = %(params)s->ndim;\n for (int ii = 0; ii < %(params)s->ndim; ++ii)\n {\n // -- We do not want an explicit cast here. the shp can be any\n // -- int* dtype. The compiler will explicitly upcast it, but\n // -- will err if this will downcast. This could happen if the\n // -- user pass an int64 dtype, but npy_intp endup being int32.\n new_dims[ii] = ((%(sdtype)s*)(\n PyArray_BYTES(%(shp)s) +\n ii * PyArray_STRIDES(%(shp)s)[0]))[0];\n }\n Py_XDECREF(%(z)s);\n %(z)s = (PyArrayObject *) PyArray_Newshape(%(x)s, &newshape, NPY_CORDER);\n if (!%(z)s)\n {\n //The error message should have been set by PyArray_Newshape\n %(fail)s;\n }\n \"\"\"\n % locals()\n )\n else:\n return Op.c_code(self, node, name, inputs, outputs, sub)\n\n\ndef reshape(x, newshape, ndim=None):\n if ndim is None:\n newshape = as_tensor_variable(newshape)\n if newshape.ndim != 1:\n raise TypeError(\n \"New shape in reshape must be a vector or a list/tuple of\"\n f\" scalar. Got {newshape} after conversion to a vector.\"\n )\n try:\n ndim = get_vector_length(newshape)\n except ValueError:\n raise ValueError(\n f\"The length of the provided shape ({newshape}) cannot \"\n \"be automatically determined, so Theano is not able \"\n \"to know what the number of dimensions of the reshaped \"\n \"variable will be. You can provide the 'ndim' keyword \"\n \"argument to 'reshape' to avoid this problem.\"\n )\n op = Reshape(ndim)\n rval = op(x, newshape)\n return rval\n\n\nclass Flatten(Op):\n \"\"\"\n Flatten a tensor.\n\n Flattens a tensor to `outdim` dimensions by preserving the leading\n outdim - 1 shape components.\n\n .. note:: The interface Flatten(Op) is deprecated, you should use flatten.\n \"\"\"\n\n view_map = {0: [0]}\n\n check_input = False\n __props__ = (\"outdim\",)\n\n def __init__(self, outdim=1):\n warnings.warn(\n \"Flatten class is deprecated, \" \"please use flatten method instead.\",\n DeprecationWarning,\n stacklevel=4,\n )\n self.outdim = int(outdim)\n\n def __str__(self):\n return f\"{self.__class__.__name__}{{{self.outdim}}}\"\n\n def make_node(self, x):\n t_x = as_tensor_variable(x)\n if self.outdim < 1 or (x.ndim and self.outdim > x.ndim):\n raise ValueError(\n f\"invalid output ndimensions ({self.outdim}) for tensor of \"\n f\"rank {t_x.ndim}\"\n )\n\n # Infer the broadcastable pattern of the output. For every dimension\n # unaffected by the flatten, the broadcast flag should be unchanged.\n # For the dimension resulting from the collapse of other dimensions,\n # it should be broadcastable iff all the collapsed dimensions were\n # broadcastable.\n bcast_kept_dims = x.broadcastable[: self.outdim - 1]\n bcast_new_dim = builtins.all(x.broadcastable[self.outdim - 1 :])\n broadcastable = bcast_kept_dims + (bcast_new_dim,)\n\n return gof.Apply(self, [t_x], [tensor(x.type.dtype, broadcastable)])\n\n def perform(self, node, inp, out_):\n (x,) = inp\n (out,) = out_\n outdim = self.outdim\n if outdim == 1:\n try:\n out[0] = x.reshape(x.size)\n except AttributeError:\n out[0] = x.reshape((np.prod(x.shape),))\n elif outdim == len(x.shape):\n out[0] = x\n else:\n newshape = x.shape[: outdim - 1] + (np.prod(x.shape[outdim - 1 :]),)\n out[0] = x.reshape(newshape)\n\n def infer_shape(self, node, in_shapes):\n (in_shp,) = in_shapes\n part1 = in_shp[: self.outdim - 1]\n part2 = in_shp[self.outdim - 1 :]\n\n if len(part2) > 1:\n part2 = (prod(part2, dtype=\"int64\"),)\n elif len(part2) == 1:\n # We do not want to force an upcast of part2 if its length is 1\n pass\n else:\n if len(in_shp) == 0 and self.outdim == 1:\n part2 = (1,)\n else:\n raise ValueError(\n f\"invalid output ndimensions ({self.outdim}) for tensor \"\n f\"of rank {len(in_shp)}\"\n )\n\n out_shape = part1 + part2\n return [out_shape]\n\n def grad(self, inp, grads):\n (x,) = inp\n (g_out,) = grads\n return [reshape(g_out, shape(x), x.ndim)]\n\n def R_op(self, inputs, eval_points):\n if None in eval_points:\n return [None]\n return self.make_node(*eval_points).outputs\n\n def c_code_cache_version(self):\n return (1, 1)\n\n def c_code(self, node, name, inputs, outputs, sub):\n (x,) = inputs\n (out,) = outputs\n outdim = self.outdim\n fail = sub[\"fail\"]\n return (\n \"\"\"\n if (%(outdim)s == PyArray_NDIM(%(x)s))\n {\n Py_XDECREF(%(out)s);\n Py_XINCREF(%(x)s);\n %(out)s = %(x)s;\n }\n else\n {\n Py_XDECREF(%(out)s);\n\n if (%(outdim)s == 1)\n {\n npy_intp size = PyArray_SIZE(%(x)s);\n PyArray_Dims newshape;\n newshape.ptr = &size;\n newshape.len = 1;\n %(out)s = (PyArrayObject*)PyArray_Newshape(%(x)s,\n &newshape,\n NPY_CORDER);\n }\n else\n {\n npy_intp *oldshape = PyArray_DIMS(%(x)s);\n npy_intp newshape_dims[%(outdim)s];\n\n int i;\n for (i = 0; i < %(outdim)s - 1; ++i)\n newshape_dims[i] = oldshape[i];\n\n newshape_dims[i] = 1;\n\n for (int j = %(outdim)s - 1; j < PyArray_NDIM(%(x)s); ++j)\n newshape_dims[i] *= oldshape[j];\n\n PyArray_Dims newshape;\n newshape.ptr = newshape_dims;\n newshape.len = %(outdim)s;\n %(out)s = (PyArrayObject*)PyArray_Newshape(%(x)s,\n &newshape,\n NPY_CORDER);\n }\n }\n if (!%(out)s)\n {\n //The error message should have been set by\n // PyArray_Newshape\n %(fail)s;\n }\n \"\"\"\n % locals()\n )\n\n\ndef is_flat(var, ndim=None, outdim=None):\n \"\"\"\n Verifies the dimensionality of the var is equal to\n outdim. This method is usually called after flatten method on a\n variable, where the first outdim-1 dimension size(s) of the variable\n is kept intact, and the last dimension size of the variable is made\n equal to the multiplication of its remaining dimension size(s), such that\n the variable would end up with as many dimension as outdim.\n\n Parameters\n ----------\n var : theano.tensor.var.TensorVariable\n the theano var on which the dimensionality is checked.\n\n outdim : int\n the expected dimensionality of var.\n\n Returns\n -------\n bool\n the comparison result of var's dim\n and the expected outdim.\n \"\"\"\n if outdim is None and ndim is None:\n ndim = 1\n elif outdim is not None and ndim is not None:\n raise ValueError(\"You should only specify ndim\")\n elif outdim is not None:\n warnings.warn(\"flatten outdim parameter is deprecated, use ndim instead.\")\n ndim = outdim\n return var.ndim == ndim\n\n\ndef flatten(x, ndim=None, outdim=None):\n \"\"\"\n Reshapes the variable x by keeping\n the first outdim-1 dimension size(s) of x the same,\n and making the last dimension size of x equal to\n the multiplication of its remaining dimension size(s).\n\n Parameters\n ----------\n x : theano.tensor.var.TensorVariable\n the variable that should be reshaped.\n\n ndim : int\n the number of dimensions of the returned variable\n Default 1.\n outdim : int\n DEPRECATED synonym for ndim\n Returns\n -------\n theano.tensor.var.TensorVariable\n the flattend variable with dimensionality of outdim\n \"\"\"\n if outdim is None and ndim is None:\n ndim = 1\n elif outdim is not None and ndim is not None:\n raise ValueError(\"You should only specify ndim\")\n elif outdim is not None:\n warnings.warn(\"flatten outdim parameter is deprecated, use ndim instead.\")\n\n ndim = outdim\n # Any input variable can be flattened to have ndim of 1,\n # even if it's a scalar. Otherwise, ndim must be positive\n # and smaller than x.ndim.\n if ndim < 1 or (ndim > 1 and ndim > x.ndim):\n raise ValueError(f\"ndim {ndim} out of bound [1, {x.ndim + 1})\")\n\n if ndim > 1:\n dims = tuple(x.shape[: ndim - 1]) + (-1,)\n else:\n dims = (-1,)\n x_reshaped = x.reshape(dims)\n bcast_kept_dims = x.broadcastable[: ndim - 1]\n bcast_new_dim = builtins.all(x.broadcastable[ndim - 1 :])\n broadcastable = bcast_kept_dims + (bcast_new_dim,)\n x_reshaped = theano.tensor.addbroadcast(\n x_reshaped, *filter(lambda i: broadcastable[i], range(ndim))\n )\n return x_reshaped\n\n\n# class TileGrad(Op):\n# \"\"\"\n# Calculates the gradient of the Tile Op.\n# \"\"\"\n# # this is so weird, I can't think of how to make this a general thing.\n# def make_node(self, x, reps, g_out):\n# return gof.Apply(self, [x, reps, g_out], [x.type()])\n#\n# def perform(self, node, inp, out):\n# x, reps, g_out = inp\n# gx, = out\n# xsh = x.shape\n# if len(reps) == 2 and reps[1] == 1 and len(x.shape) == 1:\n# gx[0] = numpy.sum(g_out, axis=0)\n# else:\n# raise NotImplementedError('x.shape, reps combination not '\n# 'supported', (x.shape, reps))\n#\n# tilegrad = TileGrad()\n\n\nclass Tile(Op):\n \"\"\"\n Construct an array by repeating the input x according to reps pattern.\n\n .. note:: Deprecated\n Use tile() instead.\n\n Tiles its input according to reps. The length of reps is the number of\n dimension of x and contains the number of times to tile x in each\n dimension.\n\n See Also\n --------\n numpy.tile : http://docs.scipy.org/doc/numpy/reference/generated/numpy.tile.html\n\n \"\"\"\n\n __props__ = (\"ndim\",)\n\n def __init__(self, ndim):\n self.ndim = ndim\n\n def __str__(self):\n return f\"{self.__class__.__name__ }{{ndim={self.ndim}}}\"\n\n def make_node(self, x, reps):\n warnings.warn(\n (\"Tile op is deprecated, use tile function instead.\"), stacklevel=3\n )\n x = as_tensor_variable(x)\n reps = as_tensor_variable(reps)\n return gof.Apply(self, [x, reps], [tensor(x.type.dtype, [False] * self.ndim)])\n\n def perform(self, node, inp, out_):\n x, reps = inp\n (out,) = out_\n res = np.tile(x, reps)\n if res.ndim != self.ndim:\n raise ValueError(\"Tile.perform produced incorrect number of dimensions\")\n\n if (np.asarray(reps) == 1).all():\n # In that case, some NumPy version return a view! As this\n # op isn't declared as inplace, we need to check that and\n # copy the data.\n if np.may_share_memory(res, x):\n res = res.copy()\n out[0] = res\n\n def infer_shape(self, node, in_shapes):\n # Note: in contrast with numpy, it is assumed that x.shape and reps\n # have equal length; see also tile function below\n\n # Note: if reps were to be allowed not to be a constant and x.shape\n # and reps to be unequal, the following block of code could be used:\n # prepend 1 to x.shape if needed\n # if self.ndim > x.ndim:\n # shp = concatenate(ones(self.ndim - x.ndim), shp)\n # prepend 1 to reps if needed\n # reps = concatenate(ones(self.ndim - reps.shape[0]), reps)\n\n x, reps = node.inputs\n shp = in_shapes[0]\n tiled_shp = shp * reps\n out_shape = []\n for i in range(self.ndim):\n out_shape.append(tiled_shp[i])\n return [out_shape]\n\n def grad(self, inp, grads):\n x, reps = inp\n (g_out,) = grads\n # return [tilegrad(x, reps, g_out), None]\n raise NotImplementedError()\n\n\ndef tile(x, reps, ndim=None):\n \"\"\"\n Tile input array `x` according to `reps`.\n\n See the docstring of `numpy.tile` for details.\n\n 'reps' can be constant integer (e.g. 3), constant vector(e.g. [2 3]),\n symbolic scalar (e.g. tensor.iscalar()), symbolic vector (e.g. tensor.ivector())\n or a list of symbolic scalar (e.g. [tensor.iscalar(), tensor.iscalar()]).\n\n ndim is the number of the dimensions of the output, if it is provided, ndim\n should be equal or larger than x.ndim and len(reps), otherwise, we will use\n max(x.ndim, len(reps)) as ndim. If reps is symbolic vector, the ndim has to\n be provided.\n\n \"\"\"\n\n if ndim is not None and ndim < x.ndim:\n raise ValueError(\"ndim should be equal or larger than x.ndim\")\n\n # if reps is tensor.scalar, integer or tensor.vector, we convert it to a list.\n if not isinstance(reps, (list, tuple)):\n reps_astensor = as_tensor_variable(reps)\n ndim_check = reps_astensor.ndim\n if reps_astensor.dtype not in theano.tensor.discrete_dtypes:\n raise ValueError(\"elements of reps must be integer dtype\")\n\n # tensor.scalar/integer case\n if ndim_check == 0:\n reps = [reps]\n\n # tensor.vector case\n elif ndim_check == 1:\n if ndim is None:\n raise ValueError(\n \"if reps is tensor.vector, you should specify \" \"the ndim\"\n )\n else:\n offset = ndim - reps.shape[0]\n\n # assert that reps.shape[0] does not exceed ndim\n offset = theano.tensor.opt.assert_(offset, ge(offset, 0))\n\n # if reps.ndim is less than x.ndim, we pad the reps with\n # \"1\" so that reps will have the same ndim as x.\n reps_ = [switch(i < offset, 1, reps[i - offset]) for i in range(ndim)]\n reps = reps_\n\n # other raise error\n else:\n raise ValueError(\"the dimension of reps should not exceed 1\")\n else:\n if ndim is not None and len(reps) > ndim:\n raise ValueError(\"len(reps) should be equal or less than ndim\")\n if not np.all(\n [\n isinstance(r, int)\n or (\n isinstance(r, TensorVariable)\n and r.dtype in theano.tensor.discrete_dtypes\n )\n for r in reps\n ]\n ):\n raise ValueError(\"elements of reps must be scalars of integer dtype\")\n\n # if reps.ndim is less than x.ndim, we pad the reps with\n # \"1\" so that reps will have the same ndim as x.\n reps = list(reps)\n if ndim is None:\n ndim = builtins.max(len(reps), x.ndim)\n if len(reps) < ndim:\n reps = [1] * (ndim - len(reps)) + reps\n\n shape = [1] * (ndim - x.ndim) + [x.shape[i] for i in range(x.ndim)]\n alloc_shape = reps + shape\n y = alloc(x, *alloc_shape)\n shuffle_ind = np.arange(ndim * 2).reshape(2, ndim)\n shuffle_ind = shuffle_ind.transpose().flatten()\n y = y.dimshuffle(*shuffle_ind)\n new_shapes = [sh * reps[i] for i, sh in enumerate(shape)]\n y = y.reshape(new_shapes)\n\n return y\n\n\nclass ARange(Op):\n \"\"\"Create an array containing evenly spaced values within a given interval.\n\n Parameters and behaviour are the same as numpy.arange().\n\n \"\"\"\n\n __props__ = (\"dtype\",)\n\n def __init__(self, dtype):\n self.dtype = dtype\n\n def make_node(self, start, stop, step):\n start, stop, step = map(as_tensor_variable, (start, stop, step))\n assert start.ndim == 0\n assert stop.ndim == 0\n assert step.ndim == 0\n\n inputs = [start, stop, step]\n outputs = [tensor(self.dtype, (False,))]\n\n return Apply(self, inputs, outputs)\n\n @theano.configparser.change_flags(warn_float64=\"ignore\")\n def infer_shape(self, node, i_shapes):\n # Note start, stop and step can be float numbers.\n start, stop, step = node.inputs\n\n def is_constant_value(var, value):\n try:\n v = get_scalar_constant_value(var)\n return np.all(v == value)\n except NotScalarConstantError:\n pass\n return False\n\n def upcast(var):\n if (\n var.dtype in integer_dtypes\n and\n # We do not want to cast uint64 to int64 as this can\n # loose information. If we upcast uint64 with int64,\n # this give float64. This is safer then checking for\n # uint64 in case we support [u]int128 or other in the\n # future.\n scal.upcast(var.dtype, \"int64\") == \"int64\"\n ):\n return cast(var, \"int64\")\n return var\n\n if is_constant_value(step, 1):\n if is_constant_value(start, 0):\n return [(cast(stop, \"int64\"),)]\n else:\n stop = upcast(stop)\n start = upcast(start)\n return [(maximum(cast(stop - start, \"int64\"), 0),)]\n else:\n stop = upcast(stop)\n start = upcast(start)\n return [\n (\n maximum(\n cast(ceil(cast((stop - start), \"float64\") / step), \"int64\"), 0\n ),\n )\n ]\n\n def perform(self, node, inp, out_):\n start, stop, step = inp\n (out,) = out_\n start = start.item()\n stop = stop.item()\n step = step.item()\n out[0] = np.arange(start, stop, step, dtype=self.dtype)\n\n def connection_pattern(self, node):\n\n return [[True], [False], [True]]\n\n def L_op(self, inputs, outputs, grads):\n start, stop, step = inputs\n (gz,) = grads\n # `start` and `step` affect the output values\n # but the outputs are integers so there's\n # no gradient through them.\n # When they are not integers, the gradients are\n # as expressed below.\n # `stop` does not affect the output values,\n # just the output shape, so it is disconnected.\n\n if self.dtype in discrete_dtypes:\n return [\n start.zeros_like(dtype=config.floatX),\n DisconnectedType()(),\n step.zeros_like(dtype=config.floatX),\n ]\n else:\n num_steps_taken = outputs[0].shape[0]\n return [\n gz.sum(),\n DisconnectedType()(),\n (gz * arange(num_steps_taken, dtype=self.dtype)).sum(),\n ]\n\n def R_op(self, inputs, eval_points):\n return [None]\n\n\n_arange = {}\n\n\ndef arange(start, stop=None, step=1, dtype=None):\n # If only one argument is provided, it is in fact the \"stop\" argument,\n # and start is 0.\n if stop is None:\n start, stop = 0, start\n\n start, stop, step = map(as_tensor_variable, (start, stop, step))\n # If dtype is not provided, infer it from the other arguments\n if dtype is None:\n dtype = scal.upcast(start.type.dtype, stop.type.dtype, step.type.dtype)\n # don't try to be stingy and byte-optimize, this leads to\n # overflow problems.\n if dtype in int_dtypes:\n dtype = \"int64\"\n if dtype in uint_dtypes:\n dtype = \"uint64\"\n if config.cast_policy in (\"numpy\", \"numpy+floatX\"):\n # We enforce numpy semantics, except in the special case where\n # `config.cast_policy` is 'numpy+floatX' and we want to use float32\n # rather than float64.\n # As an example, if `start`, `stop` and `step` are all int32,\n # `numpy.arange` returns an int64 array (on 64-bit platforms),\n # while the upcast above returns int32.\n numpy_dtype = np.arange(\n start=np.array(0, dtype=start.dtype),\n stop=np.array(1, dtype=stop.dtype),\n step=np.array(1, dtype=step.dtype),\n ).dtype\n if numpy_dtype != dtype:\n if (\n config.cast_policy == \"numpy+floatX\"\n and config.floatX == \"float32\"\n and numpy_dtype == \"float64\"\n and\n # No explicit float64 in the three arguments?\n builtins.all(\n dt != \"float64\" for dt in [s.dtype for s in (start, stop, step)]\n )\n ):\n # We use float32 instead.\n assert dtype != \"float64\"\n dtype = \"float32\"\n else:\n # We use the same dtype as numpy instead of the result of\n # the upcast.\n dtype = str(numpy_dtype)\n\n if dtype not in _arange:\n _arange[dtype] = ARange(dtype)\n return _arange[dtype](start, stop, step)\n\n\nclass _nd_grid:\n \"\"\"Create a dense n-dimensional 'meshgrid' with equally spaced points.\n\n Used to create the instance ``mgrid`` and ``ogrid`` which act similarly\n to their numpy equivalents.\n\n Parameters\n ----------\n sparse : boolean, optional, default=True\n Specifying False leads to the equivalent of numpy's mgrid functionality.\n Specifying True leads to the equivalent of ogrid.\n\n Examples\n --------\n >>> a = T.mgrid[0:5, 0:3]\n >>> a[0].eval()\n array([[0, 0, 0],\n [1, 1, 1],\n [2, 2, 2],\n [3, 3, 3],\n [4, 4, 4]], dtype=int8)\n >>> a[1].eval()\n array([[0, 1, 2],\n [0, 1, 2],\n [0, 1, 2],\n [0, 1, 2],\n [0, 1, 2]], dtype=int8)\n >>> b = T.ogrid[0:5, 0:3]\n >>> b[0].eval()\n array([[0],\n [1],\n [2],\n [3],\n [4]], dtype=int8)\n >>> b[1].eval()\n array([[0, 1, 2, 3]], dtype=int8)\n\n \"\"\"\n\n def __init__(self, sparse=False):\n self.sparse = sparse\n\n def __getitem__(self, *args):\n\n if isinstance(args[0], slice):\n sl = args[0]\n return arange(sl.start or 0, sl.stop, sl.step or 1)\n\n ndim = len(args[0])\n for sl in args[0]:\n if isinstance(sl.step, builtins.complex):\n raise NotImplementedError(\n \"Not implemented for slices \" \"whose step is complex\"\n )\n ranges = [arange(sl.start or 0, sl.stop, sl.step or 1) for sl in args[0]]\n shapes = [\n tuple([1] * j + [r.shape[0]] + [1] * (ndim - 1 - j))\n for j, r in enumerate(ranges)\n ]\n ranges = [r.reshape(shape) for r, shape in zip(ranges, shapes)]\n if self.sparse:\n grids = ranges\n else:\n grids = []\n ones = [ones_like(r) for r in ranges]\n for i in range(ndim):\n grid = 1\n for j in range(ndim):\n if j == i:\n grid = grid * ranges[j]\n else:\n grid = grid * ones[j]\n grids.append(grid)\n return grids\n\n\nmgrid = _nd_grid()\nogrid = _nd_grid(sparse=True)\n\n\nclass PermuteRowElements(Op):\n \"\"\"Permute the elements of each row (inner-most dim) of a tensor.\n\n A permutation will be applied to every row (vector) of the input tensor x.\n Depending on the dimensionality of x and the permutation tensor y,\n different cases are possible.\n If y.ndim = 1, y is a single permutation, that will be applied to every\n vector of x. For instance, if x is a matrix, the same permutation will be\n applied to each row of x.\n If x.ndim = y.ndim, each row of x corresponds to a row of y, containing\n a permutation that will be applied to that row. For instance, if x and y\n are two matrices, a different permutation will be applied to each row of x.\n If x.ndim > y.ndim, y will be broadcasted to fit x, then each row (vector)\n of x will be reordered according to the corresponding row of y. (This is\n a generalization of the first case).\n If x.ndim = 1, every permutation in y will be applied to x, and the output\n will contain all the results.\n If x.ndim < y.ndim, x will be broadcasted to fit y, and different\n permutations contained in y will be applied to each vector in x. (This is\n a generalization of the previous case).\n\n If the \"inverse\" argument is True, the Op will perform the inverse\n permutation instead.\n \"\"\"\n\n __props__ = ()\n\n def make_node(self, x, y, inverse):\n x = as_tensor_variable(x)\n y = as_tensor_variable(y)\n if inverse: # as_tensor_variable does not accept booleans\n inverse = as_tensor_variable(1)\n else:\n inverse = as_tensor_variable(0)\n\n # y should contain integers\n assert y.type.dtype in integer_dtypes\n # Inverse should be an integer scalar\n assert inverse.type.ndim == 0 and inverse.type.dtype in integer_dtypes\n\n # Match shapes of x and y\n x_dim = x.type.ndim\n y_dim = y.type.ndim\n\n if x_dim > y_dim:\n y = shape_padleft(y, n_ones=(x_dim - y_dim))\n elif x_dim < y_dim:\n x = shape_padleft(x, n_ones=(y_dim - x_dim))\n\n # Compute the broadcastable pattern of the output\n out_broadcastable = [\n xb and yb for xb, yb in zip(x.type.broadcastable, y.type.broadcastable)\n ]\n out_type = tensor(dtype=x.type.dtype, broadcastable=out_broadcastable)\n\n inputlist = [x, y, inverse]\n outputlist = [out_type]\n return Apply(self, inputlist, outputlist)\n\n def _rec_perform(self, node, x, y, inverse, out, curdim):\n \"\"\"Perform the permutation by doing a recursion over the input\n dimensions.\n\n For every dimension, starting with the leftmost, the right set of\n indices is determined (depending if broadcasting or not), then\n the function is recursively called on the appropriate subtensors.\n\n The terminal case is reached when the current tensors are vector,\n then the permutation contained in y is applied to x.\n\n Parameters\n ----------\n x : tensor\n The input tensor, on which the permutation is applied.\n y : tensor\n Tensor containing the permutations to apply.\n out : tensor\n Tensor storing the output result.\n curdim : int\n Counter of the current depth of recursion.\n inverse\n Wether to apply permutations or their inverse.\n\n \"\"\"\n if len(x.shape) == 1:\n # Numpy advanced indexing works in this case\n if inverse:\n out[y] = x[:]\n else:\n out[:] = x[y]\n else:\n xs0 = x.shape[0]\n ys0 = y.shape[0]\n if xs0 == ys0:\n for i in range(xs0):\n self._rec_perform(node, x[i], y[i], inverse, out[i], curdim + 1)\n elif ys0 == 1 and node.inputs[1].type.broadcastable[curdim]:\n # Broadcast y\n for i in range(xs0):\n self._rec_perform(node, x[i], y[0], inverse, out[i], curdim + 1)\n elif xs0 == 1 and node.inputs[0].type.broadcastable[curdim]:\n # Broadcast x\n for i in range(ys0):\n self._rec_perform(node, x[0], y[i], inverse, out[i], curdim + 1)\n else:\n raise ValueError(f\"Dimension mismatch: {xs0}, {ys0}\")\n\n def perform(self, node, inp, out):\n x, y, inverse = inp\n (outs,) = out\n x_s = x.shape\n y_s = y.shape\n assert len(x_s) == len(y_s)\n\n # Make sure the output is big enough\n out_s = []\n for xdim, ydim in zip(x_s, y_s):\n if xdim == ydim:\n outdim = xdim\n elif xdim == 1:\n outdim = ydim\n elif ydim == 1:\n outdim = xdim\n else:\n raise ValueError(f\"Dimension mismatch: {xdim}, {ydim}\")\n out_s.append(outdim)\n\n if outs[0] is None or outs[0].shape != out_s:\n outs[0] = np.empty(out_s, dtype=x.dtype)\n\n self._rec_perform(node, x, y, inverse, outs[0], curdim=0)\n\n def infer_shape(self, node, in_shapes):\n shp_x = in_shapes[0]\n shp_y = in_shapes[1]\n assert len(shp_x) == len(shp_y)\n out_shape = []\n for i in range(len(shp_x)):\n out_shape.append(maximum(shp_x[i], shp_y[i]))\n return [out_shape]\n\n def grad(self, inp, grads):\n x, y, inverse = inp\n (gz,) = grads\n # First, compute the gradient wrt the broadcasted x.\n # If 'inverse' is False (0), apply the inverse of y on gz.\n # Else, apply y on gz.\n gx = permute_row_elements(gz, y, eq(inverse, 0))\n\n # If x has been broadcasted along some axes, we need to sum\n # the gradient over these axes, but keep the dimension (as\n # broadcastable)\n broadcasted_dims = [\n dim\n for dim in range(gz.type.ndim)\n if x.type.broadcastable[dim] and not gz.type.broadcastable[dim]\n ]\n gx = Sum(axis=broadcasted_dims)(gx)\n\n # Sum(...) removed the dimensions in broadcasted_dims,\n # so we need to put them back.\n newdims = []\n i = 0\n for dim in range(gz.type.ndim):\n if dim in broadcasted_dims:\n newdims.append(\"x\")\n else:\n newdims.append(i)\n i += 1\n\n gx = DimShuffle(gx.type.broadcastable, newdims)(gx)\n assert gx.type.broadcastable == x.type.broadcastable\n\n # if x is an integer type, then so is the output.\n # this means f(x+eps) = f(x) so the gradient with respect\n # to x is zero\n if x.type.dtype in discrete_dtypes:\n gx = x.zeros_like()\n\n # The elements of y and of inverse both affect the output,\n # so they are connected to the output,\n # and the transformation isn't defined if their values\n # are non-integer, so the gradient with respect to them is\n # undefined\n\n return [gx, grad_undefined(self, 1, y), grad_undefined(self, 1, inverse)]\n\n\n_permute_row_elements = PermuteRowElements()\n\n\ndef permute_row_elements(x, y, inverse=0):\n return _permute_row_elements(x, y, inverse)\n\n\ndef inverse_permutation(perm):\n \"\"\"Computes the inverse of permutations.\n\n Each row of input should contain a permutation of the first integers.\n\n \"\"\"\n return permute_row_elements(\n arange(perm.shape[-1], dtype=perm.dtype), perm, inverse=True\n )\n\n\n#########################\n# Linalg : Dot\n#########################\n#\n# For BLAS-related ops see blas.py\n#\n# TODO: Dotinv should go here, Eigs, Svd, etc.\n\n\nclass Dot(Op):\n \"\"\"\n Computes the dot product of two variables. For two matrices, this is\n equivalent to matrix multiplication. For two vectors, this is the inner\n product.\n\n Notes\n -----\n Matrix-matrix products are sometimes optimized to Dot22 or Gemm ops\n (see tensor.blas).\n Vector-vector products are sometimes optimized to Ger or CGer (see\n tensor.blas).\n Matrix-vector products are sometimes optimized to Gemv, CGemv (see\n tensor.blas).\n\n \"\"\"\n\n __props__ = ()\n\n # the rationale for Dot22 is related to getting GEMM Ops into the\n # graph. See Dot22 in tensor.blas for details.\n\n def make_node(self, *inputs):\n inputs = list(map(as_tensor_variable, inputs))\n\n if len(inputs) != 2:\n raise TypeError(\n f\"theano.tensor.Dot: 2 arguments required, {len(inputs)} given \"\n )\n if inputs[0].ndim not in (1, 2):\n raise TypeError(\n \"theano.tensor.Dot: input 0 (0-indexed) must have ndim of \"\n f\"1 or 2, {int(inputs[0].ndim)} given. Consider calling \"\n \"theano.tensor.dot instead.\"\n )\n if inputs[1].ndim not in (1, 2):\n raise TypeError(\n \"theano.tensor.Dot: input 1 (0-indexed) must have ndim of \"\n f\"1 or 2, {int(inputs[1].ndim)} given. Consider calling \"\n \"theano.tensor.dot instead.\"\n )\n\n i_broadcastables = [input.type.broadcastable for input in inputs]\n bx, by = i_broadcastables\n if len(by) == 2: # y is a matrix\n bz = bx[:-1] + by[-1:]\n elif len(by) == 1: # y is vector\n bz = bx[:-1]\n\n i_dtypes = [input.type.dtype for input in inputs]\n outputs = [tensor(scal.upcast(*i_dtypes), bz)]\n return Apply(self, inputs, outputs)\n\n def perform(self, node, inp, out):\n x, y = inp\n (z,) = out\n\n # the asarray is here because dot between two vectors\n # gives a numpy float object but we need to return a 0d\n # ndarray\n z[0] = np.asarray(np.dot(x, y))\n\n def grad(self, inp, grads):\n\n x, y = inp\n (gz,) = grads\n xdim, ydim, gdim = x.type.ndim, y.type.ndim, gz.type.ndim\n\n # grad is scalar, so x is vector and y is vector\n if gdim == 0:\n xgrad = gz * y\n ygrad = gz * x\n\n # x is vector, y is matrix, grad is vector\n elif xdim == 1 and ydim == 2:\n xgrad = dot(gz, y.T)\n ygrad = outer(x.T, gz)\n\n # x is matrix, y is vector, grad is vector\n elif xdim == 2 and ydim == 1:\n xgrad = outer(gz, y.T)\n ygrad = dot(x.T, gz)\n\n # x is matrix, y is matrix, grad is matrix\n elif xdim == ydim == 2:\n xgrad = dot(gz, y.T)\n ygrad = dot(x.T, gz)\n\n # If x or y contain broadcastable dimensions but only one of\n # them know that a matching dimensions is broadcastable, the\n # above code don't always return the right broadcast pattern.\n # This cause problem down the road. See gh-1461.\n if xgrad.broadcastable != x.broadcastable:\n xgrad = patternbroadcast(xgrad, x.broadcastable)\n if ygrad.broadcastable != y.broadcastable:\n ygrad = patternbroadcast(ygrad, y.broadcastable)\n\n rval = xgrad, ygrad\n\n for elem in rval:\n assert elem.dtype.find(\"float\") != -1\n\n return rval\n\n def R_op(self, inputs, eval_points):\n # R_op for a \\dot b evaluted at c for a and d for b is\n # simply c \\dot b + a \\dot d\n\n assert len(inputs) == 2\n assert len(eval_points) == 2\n if eval_points[0] is None and eval_points[1] is None:\n return [None]\n\n if eval_points[0]:\n t1 = self(eval_points[0], inputs[1])\n if eval_points[1]:\n t2 = self(inputs[0], eval_points[1])\n\n if eval_points[0] and eval_points[1]:\n return [t1 + t2]\n elif eval_points[0]:\n return [t1]\n else:\n return [t2]\n\n def infer_shape(self, node, shapes):\n xshp, yshp = shapes\n x, y = node.inputs\n\n # vector / vector\n if x.ndim == 1 and y.ndim == 1:\n return [()]\n # matrix / vector\n if x.ndim == 2 and y.ndim == 1:\n return [xshp[:-1]]\n # vector / matrix\n if x.ndim == 1 and y.ndim == 2:\n return [yshp[-1:]]\n # matrix / matrix\n if x.ndim == 2 and y.ndim == 2:\n return [xshp[:-1] + yshp[-1:]]\n raise NotImplementedError()\n\n def __str__(self):\n return \"dot\"\n\n\n_dot = Dot()\npprint.assign(\n _dot, printing.OperatorPrinter(printing.special[\"middle_dot\"], -1, \"left\")\n)\n\n\ndef dot(l, r):\n \"\"\"Return a symbolic dot product.\n\n This is designed to work with both sparse and dense tensors types.\n \"\"\"\n\n if not isinstance(l, Variable):\n l = as_tensor_variable(l)\n\n if not isinstance(r, Variable):\n r = as_tensor_variable(r)\n\n try:\n res = l.__dot__(r)\n if res is NotImplemented:\n raise NotImplementedError\n except (NotImplementedError, AttributeError, TypeError):\n res = r.__rdot__(l)\n if res is NotImplemented:\n raise NotImplementedError()\n\n return res\n\n\ndef dense_dot(a, b):\n \"\"\"\n Computes the dot product of two variables.\n\n For two matrices, this is equivalent to matrix multiplication.\n For two vectors, this is the inner product.\n When one variable is a scalar, this is like elementwise multiplication.\n For N dimensions, this is a sum product over the last axis\n of the first array and the second-to-last axis of the second array:\n\n dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])\n\n Note that this dot function does one of three things, in the following\n sequence:\n\n 1. If either a or b is scalar, it returns the elementwise product\n without calling the Theano Dot op.\n\n 2. If either a or b has more than 2 dimensions, it calls Theano's\n tensordot function with appropriate axes. The tensordot function\n expresses high-dimensional dot products in terms of 2D matrix\n multiplications, so it may be possible to futherize optimize for\n performance.\n\n 3. If both a and b have either 1 or 2 dimensions, it calls Theano's\n Dot op on a and b.\n\n Notes\n -----\n Matrix-matrix products are sometimes optimized to Dot22 or Gemm ops\n (see tensor.blas).\n Vector-vector products are sometimes optimized to Ger or CGer (see\n tensor.blas).\n Matrix-vector products are sometimes optimized to Gemv, CGemv (see\n tensor.blas).\n\n \"\"\"\n a, b = as_tensor_variable(a), as_tensor_variable(b)\n\n if a.ndim == 0 or b.ndim == 0:\n return a * b\n elif a.ndim > 2 or b.ndim > 2:\n return tensordot(a, b, [[a.ndim - 1], [np.maximum(0, b.ndim - 2)]])\n else:\n return _dot(a, b)\n\n\n#########################\n# Linalg : TensorDot\n#########################\n\n\ndef _tensordot_as_dot(a, b, axes, dot, batched):\n \"\"\"\n Reduces a tensor dot product to a matrix or vector dot product. Based\n on code from Tijmen Tieleman's gnumpy\n (http://www.cs.toronto.edu/~tijmen/gnumpy.html).\n\n Please see the documentation of tensordot for the meaning of the a, b\n and axes arguments.\n\n :param dot: a function that accepts two symbolic variables and computes\n the appropriate dot product (e.g. dot, batched_dot)\n :type dot: function\n\n :param batched: whether to treat the first axis of a and b as a batch\n axis. If so, this axis will be preserved in the output,\n allowing this function to be used also for batched\n tensor dot products.\n :type batched: boolean\n\n :returns: a tensor with shape equal to the concatenation of a's shape\n (less any dimensions that were summed over) and b's shape\n (less the first dimension and any dimensions that were summed\n over).\n :rtype: symbolic tensor\n \"\"\"\n a, b = as_tensor_variable(a), as_tensor_variable(b)\n\n if not np.isscalar(axes) and len(axes) != 2:\n raise ValueError(\n \"Axes should be an integer or a \"\n \"list/tuple of len 2 ({axes} was provided)\"\n )\n\n # if 'axes' is a number of axes to multiply and sum over (trailing axes\n # of a, leading axes of b), we can just reshape and use dot.\n elif np.isscalar(axes):\n axes = int(axes)\n\n for operand_name, operand in ((\"a\", a), (\"b\", b)):\n if axes > operand.ndim:\n raise ValueError(\n f\"axes can not be larger than the dimension of {operand_name} \"\n f\"({operand_name}.ndim={operand.ndim}, axes={axes})\"\n )\n if batched and axes == operand.ndim:\n raise ValueError(\n \"axes to sum over must not include the batch axis \"\n f\"of {operand_name} ({operand_name}.ndim={operand.ndim}, axes={axes})\"\n )\n\n batch_axes = 1 if batched else 0\n a_outaxes = slice(0, a.ndim - axes)\n b_outaxes = slice(batch_axes + axes, b.ndim)\n outshape = concatenate([a.shape[a_outaxes], b.shape[b_outaxes]])\n outbcast = a.broadcastable[a_outaxes] + b.broadcastable[b_outaxes]\n outndim = len(outbcast)\n\n a_shape = [1] * 2\n b_shape = [1] * 2\n\n # compute total size of summed axes\n for i in range(0, axes):\n a_shape[1] *= a.shape[-(i + 1)]\n b_shape[0] *= b.shape[batch_axes + i]\n # compute total size of other axes\n for i in range(0, a.ndim - axes - batch_axes):\n a_shape[0] *= a.shape[batch_axes + i]\n for i in range(0, b.ndim - axes - batch_axes):\n b_shape[1] *= b.shape[-(i + 1)]\n\n if batched:\n a_shape.insert(0, a.shape[0])\n b_shape.insert(0, b.shape[0])\n\n a_reshaped = a.reshape(a_shape)\n b_reshaped = b.reshape(b_shape)\n\n out_reshaped = dot(a_reshaped, b_reshaped)\n out = out_reshaped.reshape(outshape, outndim)\n # Make sure the broadcastable pattern of the result is correct,\n # since some shape information can be lost in the reshapes.\n return patternbroadcast(out, outbcast)\n\n # if 'axes' is a list, transpose a and b such that the summed axes of a\n # are last and the summed axes of b are first.\n else:\n axes = [_pack(axes_) for axes_ in axes]\n\n if len(axes[0]) != len(axes[1]):\n raise ValueError(\"Axes elements must have the same length.\")\n\n for i, (operand_name, operand) in enumerate(((\"a\", a), (\"b\", b))):\n if len(axes[i]) > operand.ndim:\n raise ValueError(\n f\"axes[{i}] should be array_like with length less than \"\n f\"the dimensions of {operand_name} ({operand_name}.ndim={operand.ndim}, len(axes[0])={len(axes[i])}).\"\n )\n if len(axes[i]) > 0 and np.max(axes[i]) >= operand.ndim:\n raise ValueError(\n f\"axes[{i}] contains dimensions greater than or equal \"\n f\"to {operand_name}.ndim ({operand_name}.ndim={operand.ndim}, max(axes[0])={np.max(np.array(axes[i]))}).\"\n )\n if batched and 0 in axes[i]:\n raise ValueError(\n \"axes to sum over must not contain the batch axis \"\n f\"(axes[{i}]={axes[i]})\"\n )\n\n batch_axes = [0] if batched else []\n other_axes = [\n [x for x in range(operand.ndim) if x not in axes[i] and x not in batch_axes]\n for i, operand in enumerate((a, b))\n ]\n\n a_shuffled = a.dimshuffle(batch_axes + other_axes[0] + axes[0])\n b_shuffled = b.dimshuffle(batch_axes + axes[1] + other_axes[1])\n\n # now that a and b are in the right order, recur with integer axes\n return _tensordot_as_dot(\n a_shuffled, b_shuffled, len(axes[0]), dot=dot, batched=batched\n )\n\n\ndef tensordot(a, b, axes=2):\n \"\"\"\n Compute a generalized dot product over provided axes.\n\n Given two tensors a and b, tensordot computes a generalized dot product over\n the provided axes. Theano's implementation reduces all expressions to\n matrix or vector dot products and is based on code from Tijmen Tieleman's\n gnumpy (http://www.cs.toronto.edu/~tijmen/gnumpy.html).\n\n Parameters\n ----------\n a: symbolic tensor\n The first tensor variable.\n b: symbolic tensor\n The second tensor variable\n axes: int or array-like of length 2\n If an integer, the number of axes to sum over.\n If an array, it must have two array elements containing the axes\n to sum over in each tensor.\n\n Note that the default value of 2 is not guaranteed to work\n for all values of a and b, and an error will be raised if\n that is the case. The reason for keeping the default is to\n maintain the same signature as numpy's tensordot function\n (and np.tensordot raises analogous errors for non-compatible\n inputs).\n\n If an integer i, it is converted to an array containing\n the last i dimensions of the first tensor and the first\n i dimensions of the second tensor:\n axes = [list(range(a.ndim - i, b.ndim)), list(range(i))]\n\n If an array, its two elements must contain compatible axes\n of the two tensors. For example, [[1, 2], [2, 0]] means sum\n over the 2nd and 3rd axes of a and the 3rd and 1st axes of b.\n (Remember axes are zero-indexed!) The 2nd axis of a and the\n 3rd axis of b must have the same shape; the same is true for\n the 3rd axis of a and the 1st axis of b.\n\n Returns\n -------\n symbolic tensor\n A tensor with shape equal to the concatenation of a's shape\n (less any dimensions that were summed over) and b's shape\n (less any dimensions that were summed over).\n\n Examples\n --------\n It may be helpful to consider an example to see what tensordot does.\n Theano's implementation is identical to NumPy's. Here a has shape (2, 3, 4)\n and b has shape (5, 6, 4, 3). The axes to sum over are [[1, 2], [3, 2]] --\n note that a.shape[1] == b.shape[3] and a.shape[2] == b.shape[2]; these axes\n are compatible. The resulting tensor will have shape (2, 5, 6) -- the\n dimensions that are not being summed:\n\n >>> a = np.random.random((2,3,4))\n >>> b = np.random.random((5,6,4,3))\n\n #tensordot\n >>> c = np.tensordot(a, b, [[1,2],[3,2]])\n\n #loop replicating tensordot\n >>> a0, a1, a2 = a.shape\n >>> b0, b1, _, _ = b.shape\n >>> cloop = np.zeros((a0,b0,b1))\n\n #loop over non-summed indices -- these exist\n #in the tensor product.\n >>> for i in range(a0):\n ... for j in range(b0):\n ... for k in range(b1):\n ... #loop over summed indices -- these don't exist\n ... #in the tensor product.\n ... for l in range(a1):\n ... for m in range(a2):\n ... cloop[i,j,k] += a[i,l,m] * b[j,k,m,l]\n\n >>> np.allclose(c, cloop)\n true\n\n This specific implementation avoids a loop by transposing a and b such that\n the summed axes of a are last and the summed axes of b are first. The\n resulting arrays are reshaped to 2 dimensions (or left as vectors, if\n appropriate) and a matrix or vector dot product is taken. The result is\n reshaped back to the required output dimensions.\n\n In an extreme case, no axes may be specified. The resulting tensor\n will have shape equal to the concatenation of the shapes of a and b:\n\n >>> c = np.tensordot(a, b, 0)\n >>> print(a.shape)\n (2,3,4)\n >>> print(b.shape)\n (5,6,4,3)\n >>> print(c.shape)\n (2,3,4,5,6,4,3)\n\n See the documentation of numpy.tensordot for more examples.\n\n \"\"\"\n return _tensordot_as_dot(a, b, axes, dot=dot, batched=False)\n\n\ndef outer(x, y):\n \"\"\"Return vector-vector outer product.\n\n If an input isn't a vector, we flatten it first.\n\n \"\"\"\n if x.ndim != 1:\n x = x.flatten()\n if y.ndim != 1:\n y = y.flatten()\n return dot(x.dimshuffle(0, \"x\"), y.dimshuffle(\"x\", 0))\n\n\ndef any(x, axis=None, keepdims=False):\n out = elemwise.Any(axis)(x)\n\n if keepdims:\n out = makeKeepDims(x, out, axis)\n return out\n\n\ndef all(x, axis=None, keepdims=False):\n out = elemwise.All(axis)(x)\n\n if keepdims:\n out = makeKeepDims(x, out, axis)\n return out\n\n\n# Some NumPy version like 1.9.2 return a view for numpy.diagonal\nx = np.zeros((4, 4))\nnumpy_diagonal_return_view = np.may_share_memory(np.diagonal(x), x)\ndel x\n\n\nclass ExtractDiag(Op):\n \"\"\"\n Return specified diagonals.\n\n If x is 2-D, returns the diagonal of x with the given offset,\n i.e., the collection of elements of the form x[i, i+offset].\n If x has more than two dimensions, then the axes specified by\n axis1 and axis2 are used to determine the 2-D sub-array whose\n diagonal is returned. The shape of the resulting array can be\n determined by removing axis1 and axis2 and appending an index\n to the right equal to the size of the resulting diagonals.\n\n Parameters\n ----------\n x: A tensor variable with x.ndim >= 2.\n\n offset: Offset of the diagonal from the main diagonal.\n Can be positive or negative.\n Defaults to main diagonal (0).\n\n axis1: Axis to be used as the first axis of the 2-D\n sub-arrays from which the diagonals should be taken.\n Defaults to first axis (0).\n\n axis2: Axis to be used as the second axis of the 2-D\n sub-arrays from which the diagonals should be taken.\n Defaults to second axis (1).\n\n\n\n Returns\n -------\n array_of_diagonals:\n If x is 2-D, a 1-D array of the same type as a\n containing the diagonal is returned.\n If the dimension of x is greater than two, then an\n array of diagonals is returned, \"packed\" from left-most\n dimension to right-most (e.g., if x is 3-D, then the\n diagonals are \"packed\" along rows).\n\n\n\n Raises\n ------\n ValueError\n If the dimension of x is less than 2.\n\n\n See Also\n --------\n numpy.diagonal:\n https://docs.scipy.org/doc/numpy-dev/reference/generated/numpy.diagonal.html\n \"\"\"\n\n __props__ = (\"offset\", \"axis1\", \"axis2\", \"view\")\n\n def __init__(self, offset=0, axis1=0, axis2=1, view=False):\n self.view = view\n if self.view and not numpy_diagonal_return_view:\n warnings.warn(\n \"View will forced to False. ExtractDiag property view is \"\n f\"set to True but numpy version {np.version.version} and prior versions of \"\n \"numpy.diagonal() do not return a view. Update \"\n \"numpy to use ExtractDiag(view=True)\"\n )\n self.view = False\n if self.view:\n self.view_map = {0: [0]}\n self.offset = offset\n self.axis1 = axis1\n self.axis2 = axis2\n\n def make_node(self, x):\n x = as_tensor_variable(x)\n\n if x.ndim < 2:\n raise ValueError(\n \"ExtractDiag needs an input with 2 or more \" \"dimensions\", x\n )\n return Apply(\n self,\n [x],\n [x.type.__class__(dtype=x.dtype, broadcastable=[False] * (x.ndim - 1))()],\n )\n\n def perform(self, node, inputs, outputs):\n (x,) = inputs\n (z,) = outputs\n z[0] = x.diagonal(self.offset, self.axis1, self.axis2)\n if not self.view:\n z[0] = z[0].copy()\n\n def grad(self, inputs, gout):\n (x,) = inputs\n (gz,) = gout\n\n if x.ndim == 2:\n x = theano.tensor.zeros_like(x)\n xdiag = theano.tensor.AllocDiag(offset=self.offset)(gz)\n return [\n theano.tensor.set_subtensor(\n x[: xdiag.shape[0], : xdiag.shape[1]], xdiag\n )\n ]\n else:\n warnings.warn(\n \"gradient of theano.tensor.basic.ExtractDiag only\" \"works for matrices.\"\n )\n return [grad_not_implemented(self, 0, x)]\n\n def infer_shape(self, node, shapes):\n (in_shape,) = shapes\n dim1 = in_shape[self.axis1]\n dim2 = in_shape[self.axis2]\n out_shape = [\n d for i, d in enumerate(in_shape) if i not in (self.axis1, self.axis2)\n ]\n # The following logic is inspired by C code of PyArray_Diagonal().\n offset = self.offset\n if offset > 0:\n diag_size = clip(dim2 - offset, 0, dim1)\n elif offset < 0:\n diag_size = clip(dim1 + offset, 0, dim2)\n else:\n diag_size = minimum(dim1, dim2)\n out_shape.append(diag_size)\n return [tuple(out_shape)]\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n if self.view and not numpy_diagonal_return_view:\n warnings.warn(\n \"View will forced to False. ExtractDiag property view is \"\n f\"set to True but numpy version {np.version.version} and prior versions of \"\n \"set to True but numpy version %s and prior versions of \"\n \"numpy.diagonal() do not return a view. Update \"\n \"numpy to use ExtractDiag(view=True)\"\n )\n self.view = False\n\n if self.view:\n self.view_map = {0: [0]}\n\n if \"offset\" not in state:\n self.offset = 0\n if \"axis1\" not in state:\n self.axis1 = 0\n if \"axis2\" not in state:\n self.axis2 = 1\n\n\ndef diagonal(a, offset=0, axis1=0, axis2=1):\n \"\"\"\n A helper function for `theano.tensor.ExtractDiag`. It accepts tensor with\n `ndim >= 2` as input. The name `diagonal` is just meant to keep it\n consistent with numpy.\n\n Parameters\n ----------\n a : symbolic tensor\n offset : int\n offset\n axis1 : int\n axis2 : int\n\n Returns\n -------\n tensor : symbolic tensor\n\n \"\"\"\n return ExtractDiag(offset, axis1, axis2)(a)\n\n\nclass AllocDiag(Op):\n \"\"\"\n An op that copies a vector to the diagonal of an empty matrix. It does the\n inverse of ExtractDiag.\n\n Usage: T.AllocDiag()(x)\n\n `x` should be a tensor vector. The parenthesis in the front should indicate\n which main diagonal the vector value goes into. By default it is set to\n `0`, which corresponds to setting the values of x to the main diagonal in\n the returned matrix.\n\n Parameters\n ----------\n axis1: Axis to be used as the first axis of the 2-D\n sub-arrays to which the diagonals will be allocated.\n Defaults to first axis (0).\n\n axis2: Axis to be used as the second axis of the 2-D\n sub-arrays to which the diagonals will be allocated.\n Defaults to second axis (1).\n\n offset: Offset of the diagonal from the main diagonal defined by `axis1`\n and `axis2`.\n Can be positive or negative.\n Defaults to main diagonal (0).\n\n x: symbolic vector\n A tensor vector consists of diagonal values.\n\n Returns\n -------\n tensor : symbolic tenstor\n A tensor with passed tensor values at their corresponding diagonals.\n\n \"\"\"\n\n __props__ = (\"offset\", \"axis1\", \"axis2\")\n\n def __init__(self, offset=0, axis1=0, axis2=1):\n self.offset = offset\n self.axis1 = axis1\n self.axis2 = axis2\n\n def make_node(self, diag):\n diag = as_tensor_variable(diag)\n if diag.type.ndim < 1:\n raise ValueError(\n \"AllocDiag needs an input with 1 or more \" \"dimensions\", diag.type\n )\n return Apply(\n self,\n [diag],\n [\n diag.type.__class__(\n dtype=diag.dtype, broadcastable=[False] * (diag.ndim + 1)\n )()\n ],\n )\n\n def perform(self, node, inputs, outputs):\n (x,) = inputs\n (z,) = outputs\n\n axis1 = np.minimum(self.axis1, self.axis2)\n axis2 = np.maximum(self.axis1, self.axis2)\n offset = self.offset\n\n # Create array with one extra dimension for resulting matrix\n result_shape = x.shape[:-1] + (x.shape[-1] + abs(offset),) * 2\n result = np.zeros(result_shape, dtype=x.dtype)\n\n # Create slice for diagonal in final 2 axes\n idxs = np.arange(x.shape[-1])\n diagonal_slice = (len(result_shape) - 2) * [slice(None)] + [\n idxs + np.maximum(0, -offset),\n idxs + np.maximum(0, offset),\n ]\n\n # Fill in final 2 axes with x\n result[tuple(diagonal_slice)] = x\n\n if len(x.shape) > 1:\n # Re-order axes so they correspond to diagonals at axis1, axis2\n axes = list(range(len(x.shape[:-1])))\n last_idx = axes[-1]\n axes = axes[:axis1] + [last_idx + 1] + axes[axis1:]\n axes = axes[:axis2] + [last_idx + 2] + axes[axis2:]\n result = result.transpose(axes)\n\n z[0] = result\n\n def grad(self, inputs, gout):\n (gz,) = gout\n return [diagonal(gz, offset=self.offset, axis1=self.axis1, axis2=self.axis2)]\n\n def infer_shape(self, nodes, shapes):\n (x_shape,) = shapes\n axis1 = np.minimum(self.axis1, self.axis2)\n axis2 = np.maximum(self.axis1, self.axis2)\n\n result_shape = list(x_shape[:-1])\n diag_shape = x_shape[-1] + abs(self.offset)\n result_shape = result_shape[:axis1] + [diag_shape] + result_shape[axis1:]\n result_shape = result_shape[:axis2] + [diag_shape] + result_shape[axis2:]\n return [tuple(result_shape)]\n\n def __setstate__(self, state):\n if \"view_map\" in state:\n del state[\"view_map\"]\n\n self.__dict__.update(state)\n\n if \"offset\" not in state:\n self.offset = 0\n if \"axis1\" not in state:\n self.axis1 = 0\n if \"axis2\" not in state:\n self.axis2 = 1\n\n\ndef diag(v, k=0):\n \"\"\"\n A helper function for two ops: `theano.tensor.ExtractDiag` and\n `theano.tensor.AllocDiag`. The name `diag` is meant to keep it consistent\n with numpy. It both accepts tensor vector and tensor matrix.\n While the passed tensor variable `v` has `v.ndim>=2`, it builds a\n `ExtractDiag` instance, and returns a vector with its entries equal to\n `v`'s main diagonal; otherwise if `v.ndim` is `1`, it builds an `AllocDiag`\n instance, and returns a matrix with `v` at its k-th diaogonal.\n\n Parameters\n ----------\n v : symbolic tensor\n k : int\n offset\n\n Returns\n -------\n tensor : symbolic tensor\n\n \"\"\"\n\n if v.ndim == 1:\n return AllocDiag(k)(v)\n elif v.ndim >= 2:\n return diagonal(v, offset=k)\n else:\n raise ValueError(\"Input must has v.ndim >= 1.\")\n\n\ndef stacklists(arg):\n \"\"\"\n Recursively stack lists of tensors to maintain similar structure.\n\n This function can create a tensor from a shaped list of scalars:\n\n Examples\n --------\n >>> from theano.tensor import stacklists, scalars, matrices\n >>> from theano import function\n >>> a, b, c, d = scalars('abcd')\n >>> X = stacklists([[a, b], [c, d]])\n >>> f = function([a, b, c, d], X)\n >>> f(1, 2, 3, 4)\n array([[ 1., 2.],\n [ 3., 4.]], dtype=float32)\n\n We can also stack arbitrarily shaped tensors. Here we stack matrices into\n a 2 by 2 grid:\n\n >>> from numpy import ones\n >>> a, b, c, d = matrices('abcd')\n >>> X = stacklists([[a, b], [c, d]])\n >>> f = function([a, b, c, d], X)\n >>> x = ones((4, 4), 'float32')\n >>> f(x, x, x, x).shape\n (2, 2, 4, 4)\n\n \"\"\"\n if isinstance(arg, (tuple, list)):\n return stack(list(map(stacklists, arg)))\n else:\n return arg\n\n\ndef ptp(a, axis=None):\n \"\"\"\n Range of values (maximum - minimum) along an axis.\n\n The name of the function comes from the acronym for peak to peak.\n\n Parameters\n ----------\n a\n Input tensor.\n axis\n Axis along which to find the peaks. By default, flatten the array.\n\n Returns\n -------\n array\n A new array holding the result.\n\n \"\"\"\n\n a = as_tensor_variable(a)\n\n out = max(a, axis) - min(a, axis)\n\n return out\n\n\ndef power(x, y):\n return x ** y\n\n\ndef swapaxes(y, axis1, axis2):\n \"swap axes of inputted tensor\"\n y = as_tensor_variable(y)\n ndim = y.ndim\n li = list(range(0, ndim))\n li[axis1], li[axis2] = li[axis2], li[axis1]\n return y.dimshuffle(li)\n\n\ndef choose(a, choices, out=None, mode=\"raise\"):\n \"\"\"\n Construct an array from an index array and a set of arrays to choose from.\n\n First of all, if confused or uncertain, definitely look at the Examples -\n in its full generality, this function is less simple than it might seem\n from the following code description (below ndi = numpy.lib.index_tricks):\n\n np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)]).\n\n But this omits some subtleties. Here is a fully general summary:\n\n Given an ``index`` array (a) of integers and a sequence of n arrays\n (choices), a and each choice array are first broadcast, as necessary,\n to arrays of a common shape; calling these Ba and\n Bchoices[i], i = 0,...,n-1 we have that, necessarily,\n Ba.shape == Bchoices[i].shape for each i.\n Then, a new array with shape Ba.shape is created as follows:\n\n - if mode=raise (the default), then, first of all, each element of a\n (and thus Ba) must be in the range [0, n-1]; now, suppose that\n i (in that range) is the value at the (j0, j1, ..., jm) position in Ba -\n then the value at the same position in the new array is the value in\n Bchoices[i] at that same position;\n\n - if mode=wrap, values in a (and thus Ba) may be any (signed) integer;\n modular arithmetic is used to map integers outside the range [0, n-1]\n back into that range; and then the new array is constructed as above;\n\n - if mode=clip, values in a (and thus Ba) may be any (signed) integer;\n negative integers are mapped to 0; values greater than n-1 are mapped\n to n-1; and then the new array is constructed as above.\n\n Parameters\n ----------\n a : int array\n This array must contain integers in [0, n-1], where n is the number of\n choices, unless mode=wrap or mode=clip, in which cases any integers\n are permissible.\n choices : sequence of arrays\n Choice arrays. a and all of the choices must be broadcastable to\n the same shape. If choices is itself an array (not recommended),\n then its outermost dimension (i.e., the one corresponding to\n choices.shape[0]) is taken as defining the ``sequence``.\n out : array, optional\n If provided, the result will be inserted into this array.\n It should be of the appropriate shape and dtype.\n mode : {``raise`` (default), ``wrap``, ``clip``}, optional\n Specifies how indices outside [0, n-1] will be treated:\n ``raise`` : an exception is raised\n ``wrap`` : value becomes value mod n\n ``clip`` : values < 0 are mapped to 0, values > n-1 are mapped to n-1\n\n Returns\n -------\n merged_array - array\n The merged result.\n\n Raises\n ------\n ValueError - shape mismatch\n If a and each choice array are not all broadcastable to the same shape.\n\n \"\"\"\n # This is done to keep the same function signature then NumPy.\n assert out is None\n return Choose(mode)(a, choices)\n\n\nclass Choose(Op):\n __props__ = (\"mode\",)\n\n def __init__(self, mode):\n assert mode in (\"raise\", \"wrap\", \"clip\")\n self.mode = mode\n\n def infer_shape(self, node, shapes):\n\n a_shape, choices_shape = shapes\n out_shape = theano.tensor.extra_ops.broadcast_shape(\n a_shape, choices_shape[1:], arrays_are_shapes=True\n )\n\n return [out_shape]\n\n def make_node(self, a, choices):\n # Import here as it isn't imported by default and we can't\n # import at the top as it would cause circular import.\n import theano.typed_list\n\n a = as_tensor_variable(a)\n if a.dtype not in theano.tensor.discrete_dtypes:\n raise TypeError(\n f\"choose first argument must have an [u]int* dtype. Got {a.dtype}.\"\n )\n\n # Only use make_list if choices have inconsistent shapes\n # otherwise use as_tensor_variable\n if isinstance(choices, (tuple, list)):\n choice = theano.typed_list.make_list(choices)\n else:\n choice = as_tensor_variable(choices)\n (out_shape,) = self.infer_shape(\n None, [tuple(a.shape), tuple(theano.tensor.basic.shape(choice))]\n )\n\n bcast = []\n for s in out_shape:\n try:\n s_val = theano.get_scalar_constant_value(s)\n except (theano.tensor.basic.NotScalarConstantError, AttributeError):\n s_val = None\n\n if s_val == 1:\n bcast.append(True)\n else:\n bcast.append(False)\n\n o = TensorType(choice.dtype, bcast)\n return Apply(self, [a, choice], [o()])\n\n def perform(self, node, inputs, outputs):\n (z,) = outputs\n a = inputs[0]\n choice = inputs[1]\n # TODO reuse out?\n z[0] = np.choose(a, choice, mode=self.mode)\n\n\nclass AllocEmpty(gof.Op):\n \"\"\"Implement Alloc on the cpu, but without initializing memory.\"\"\"\n\n __props__ = (\"dtype\",)\n params_type = ParamsType(typecode=int32)\n\n # specify the type of the data\n def __init__(self, dtype):\n assert isinstance(dtype, str), dtype\n self.dtype = dtype.lower()\n\n @property\n def typecode(self):\n return np.dtype(self.dtype).num\n\n def make_node(self, *shape):\n shape, bcast = alloc_validate_shape(shape)\n otype = TensorType(dtype=self.dtype, broadcastable=bcast)\n output = otype()\n\n output.tag.values_eq_approx = values_eq_approx_always_true\n # The outut can contain nan/inf. output.type is a new\n # instance, so we can do this only for that variable.\n output.type.filter_checks_isfinite = False\n\n # We can't reuse filter_checks_isfinite as by default it is\n # False and it is set to true only in DebugMode.\n # We can't set it in the type as other make_node can reuse the type.\n # We can't set it in the variable as it isn't copied when we copy\n # the variale. So we set it in the tag.\n output.tag.nan_guard_mode_check = False\n return Apply(self, shape, [output])\n\n def debug_perform(self, node, inputs, out_, params):\n self.perform(node, inputs, out_, params)\n out_[0][0].fill(-123456789)\n\n def perform(self, node, inputs, out_, params):\n (out,) = out_\n sh = tuple([int(i) for i in inputs])\n if out[0] is None or out[0].shape != sh:\n out[0] = np.empty(sh, dtype=self.dtype)\n\n def c_code(self, node, name, inputs, out_, sub):\n (out,) = out_\n fail = sub[\"fail\"]\n shps = inputs\n nd = len(shps)\n params = sub[\"params\"]\n str = f\"npy_intp dims[{nd}];\\n\"\n for idx, sh in enumerate(shps):\n str += (\n \"dims[%(idx)s] =\"\n \"((npy_intp)((dtype_%(sh)s*)\"\n \" PyArray_DATA(%(sh)s))[0]);\\n\" % locals()\n )\n\n # Validate that the output storage exists\n str += f\"if({out}==NULL\\n\"\n for idx, sh in enumerate(shps):\n str += f\"||PyArray_DIMS({out})[{idx}]!=dims[{idx}]\"\n\n str += (\n \"\"\"){\n /* Reference received to invalid output variable.\n Decrease received reference's ref count and allocate new\n output variable */\n Py_XDECREF(%(out)s);\n %(out)s = (PyArrayObject*)PyArray_EMPTY(%(nd)s,\n dims,\n %(params)s->typecode,\n 0);\n if (!%(out)s)\n {\n PyErr_SetString(PyExc_MemoryError, \"alloc failed\");\n %(fail)s;\n }\n }\n \"\"\"\n % locals()\n )\n return str\n\n def infer_shape(self, node, input_shapes):\n return [node.inputs]\n\n def c_code_cache_version(self):\n return (4,)\n\n def do_constant_folding(self, node):\n return False\n\n def connection_pattern(self, node):\n return [[False] for i in node.inputs]\n\n def grad(self, inputs, grads):\n return [DisconnectedType()() for i in inputs]\n\n def R_op(self, inputs, eval_points):\n return [zeros(inputs, self.dtype)]\n"
] | [
[
"numpy.dot",
"numpy.minimum",
"numpy.asarray",
"numpy.dtype",
"numpy.concatenate",
"numpy.max",
"numpy.all",
"numpy.mean",
"numpy.tri",
"numpy.iinfo",
"numpy.allclose",
"numpy.may_share_memory",
"numpy.reshape",
"numpy.arange",
"numpy.eye",
"numpy.choose",
"numpy.argmax",
"numpy.zeros",
"numpy.nonzero",
"numpy.int64",
"numpy.array",
"numpy.diagonal",
"numpy.sum",
"numpy.maximum",
"numpy.tile",
"numpy.complex",
"numpy.isscalar",
"numpy.prod",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Beaver48/kaggle-chest-xray-abnormalities | [
"d41f32d1c59cb5c925795df3291e929b3ea6d5fd",
"d41f32d1c59cb5c925795df3291e929b3ea6d5fd",
"d41f32d1c59cb5c925795df3291e929b3ea6d5fd"
] | [
"vinbigdata/preprocess.py",
"vinbigdata/visualize.py",
"vinbigdata/postprocess.py"
] | [
"import glob\nimport shutil\nfrom abc import ABC, abstractmethod\nfrom pathlib import Path\nfrom typing import List, Optional, Tuple\n\nimport cv2\nimport numpy as np\nfrom albumentations import BboxParams, Compose, Resize\nfrom pascal_voc_writer import Writer\nfrom pydicom import dcmread\nfrom pydicom.pixel_data_handlers.util import apply_voi_lut\nfrom typing_extensions import TypedDict\nfrom vinbigdata import BoxCoordsFloat, BoxCoordsInt, classname2mmdetid\nfrom vinbigdata.utils import abs2rel\n\nImageMeta = TypedDict(\n 'ImageMeta', {\n 'image_id': str,\n 'class_name': str,\n 'rad_id': Optional[str],\n 'x_min': float,\n 'y_min': float,\n 'x_max': float,\n 'y_max': float\n })\n\n\nclass BaseTransform(ABC):\n \"\"\" Base transformation\n \"\"\"\n\n def __init__(self, fized_size: Tuple[int, int]) -> None:\n self.resize_transform = Compose([Resize(fized_size[0], fized_size[0], always_apply=True)],\n bbox_params=BboxParams(\n format='pascal_voc', min_visibility=0.0, label_fields=['classes']))\n\n @abstractmethod\n def __call__(self, img_name: str, img: np.array, bboxes: List[BoxCoordsInt],\n classes: List[str]) -> Tuple[np.array, List[BoxCoordsInt], List[str]]:\n raise NotImplementedError('call method not implemented')\n\n\nclass GrayscaleTransform(BaseTransform):\n \"\"\" Transformation for grayscale\n \"\"\"\n\n def __init__(self, fized_size: Tuple[int, int] = (1024, 1024)) -> None:\n super(GrayscaleTransform, self).__init__(fized_size)\n\n def __call__(self, img_name: str, img: np.array, bboxes: List[BoxCoordsInt],\n classes: List[str]) -> Tuple[np.array, List[BoxCoordsInt], List[str]]:\n assert len(img.shape) == 2\n res = self.resize_transform(image=cv2.cvtColor(img, cv2.COLOR_GRAY2RGB), bboxes=bboxes, classes=classes)\n return (res['image'], res['bboxes'], res['classes'])\n\n\nclass MaskTransform(BaseTransform):\n \"\"\" Transformation for grayscale\n \"\"\"\n\n def __init__(self, mask_path: str, fized_size: Tuple[int, int] = (1024, 1024)) -> None:\n super(MaskTransform, self).__init__(fized_size)\n self.masks = {Path(mask).name: mask for mask in glob.glob(mask_path + '/*')}\n\n def __call__(self, img_name: str, img: np.array, bboxes: List[BoxCoordsInt],\n classes: List[str]) -> Tuple[np.array, List[BoxCoordsInt], List[str]]:\n mask = cv2.resize(\n cv2.imread(self.masks[Path(img_name).name]), (img.shape[1], img.shape[0]),\n interpolation=cv2.INTER_LANCZOS4)[:, :, 0]\n assert len(mask.shape) == 2\n img = np.concatenate([img[:, :, np.newaxis], img[:, :, np.newaxis], mask[:, :, np.newaxis]], axis=2)\n res = self.resize_transform(image=img, bboxes=bboxes, classes=classes)\n assert len(img.shape) == 3\n return (res['image'], res['bboxes'], res['classes'])\n\n\nclass EqualizeTransform(BaseTransform):\n \"\"\" Transformation with equalization\n \"\"\"\n\n def __init__(\n self,\n clahe_clip_limit: float = 4.0,\n clahe_grid: Tuple[int, int] = (8, 8),\n fized_size: Tuple[int, int] = (1024, 1024)\n ) -> None:\n super(EqualizeTransform, self).__init__(fized_size)\n self.clahe_clip_limit = clahe_clip_limit\n self.clahe_grid = clahe_grid\n\n def __call__(self, img_name: str, img: np.array, bboxes: List[BoxCoordsInt],\n classes: List[str]) -> Tuple[np.array, List[BoxCoordsInt], List[str]]:\n assert len(img.shape) == 2\n clahe = cv2.createCLAHE(clipLimit=self.clahe_clip_limit, tileGridSize=self.clahe_grid)\n img = np.concatenate(\n [img[:, :, np.newaxis],\n cv2.equalizeHist(img)[:, :, np.newaxis],\n clahe.apply(img)[:, :, np.newaxis]],\n axis=2)\n res = self.resize_transform(image=img, bboxes=bboxes, classes=classes)\n return res['image'], res['bboxes'], res['classes']\n\n\nclass BaseWriter(ABC):\n \"\"\" Base class for writers\n \"\"\"\n\n def __init__(self, directory: str, clear: bool, image_prepocessor: BaseTransform) -> None:\n self.image_prepocessor = image_prepocessor\n self.annotations_dir, self.images_dir, self.image_sets_dir = self._create_dirs(directory, clear=clear)\n\n @abstractmethod\n def process_image(\n self,\n img_name: str,\n img: np.array,\n bboxes: List[BoxCoordsInt],\n classes: List[str],\n ) -> Tuple[int, int]:\n raise NotImplementedError()\n\n @abstractmethod\n def write_image_set(self, ids: List[str], file_name: str) -> None:\n raise NotImplementedError()\n\n @staticmethod\n @abstractmethod\n def _create_dirs(data_dir: str, clear: bool = False) -> Tuple[Path, Path, Path]:\n raise NotImplementedError()\n\n\nclass VocWriter(BaseWriter):\n \"\"\" Class for writing data in PASCALVOC 2012 format\n \"\"\"\n\n def process_image(\n self,\n img_name: str,\n img: np.array,\n bboxes: List[BoxCoordsInt],\n classes: List[str],\n ) -> Tuple[int, int]:\n image_path = self.images_dir / img_name\n xml_path = self.annotations_dir / img_name.replace('.jpg', '.xml').replace('.png', '.xml')\n img, bboxes, classes = self.image_prepocessor(image_path.name, img, bboxes, classes)\n if not (self.images_dir / img_name).exists():\n cv2.imwrite(str(self.images_dir / img_name), img)\n self.write_xml(xml_path, image_path, bboxes, classes, img.shape[0:2])\n return img.shape\n\n @staticmethod\n def write_xml(xml_path: Path, image_path: Path, bboxes: List[BoxCoordsInt], classes: List[str],\n img_shape: Tuple[int, int]) -> None:\n writer = Writer(image_path, img_shape[1], img_shape[0])\n if bboxes is not None:\n for bbox, class_name in zip(bboxes, classes):\n if bbox[3] > img_shape[0] or bbox[1] > img_shape[1]:\n continue\n writer.addObject(class_name, int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]))\n writer.save(xml_path)\n\n @staticmethod\n def _create_dirs(data_dir: str, clear: bool = False) -> Tuple[Path, Path, Path]:\n base_dir = Path(data_dir)\n if clear and base_dir.exists():\n shutil.rmtree(base_dir)\n annotations = base_dir / 'Annotations'\n images = base_dir / 'JPEGImages'\n image_sets = base_dir / 'image_sets'\n\n annotations.mkdir(parents=True, exist_ok=True)\n images.mkdir(parents=True, exist_ok=True)\n image_sets.mkdir(parents=True, exist_ok=True)\n return (annotations, images, image_sets)\n\n def write_image_set(self, ids: List[str], file_name: str) -> None:\n with open(self.image_sets_dir / file_name, 'w') as writer:\n writer.write('\\n'.join(ids))\n\n\nclass ScaledYoloWriter(BaseWriter):\n \"\"\" Class for writing data in ScaledYolo format\n \"\"\"\n\n def process_image(\n self,\n img_name: str,\n img: np.array,\n bboxes: List[BoxCoordsInt],\n classes: List[str],\n ) -> Tuple[int, int]:\n image_path = self.images_dir / img_name\n ann_path = self.annotations_dir / img_name.replace('.jpg', '.txt').replace('.png', '.txt')\n img, bboxes, classes = self.image_prepocessor(image_path.name, img, bboxes, classes)\n if not (self.images_dir / img_name).exists():\n cv2.imwrite(str(self.images_dir / img_name), img)\n self.write_ann(ann_path, bboxes, classes, img.shape[0:2])\n return img.shape\n\n @staticmethod\n def write_ann(ann_path: Path, bboxes: List[BoxCoordsInt], classes: List[str], img_shape: Tuple[int, int]) -> None:\n class_ids = [classname2mmdetid[cls] for cls in classes]\n normalized_boxes = [abs2rel(box, img_shape) for box in bboxes]\n normalized_boxes = [((box[0] + box[2]) / 2, (box[1] + box[3]) / 2, box[2] - box[0], box[3] - box[1])\n for box in normalized_boxes]\n with open(ann_path, 'w') as writer:\n for bbox, class_id in zip(normalized_boxes, class_ids):\n writer.write(' '.join([str(class_id)] + [str(coord) for coord in bbox]) + '\\n')\n\n @staticmethod\n def _create_dirs(data_dir: str, clear: bool = False) -> Tuple[Path, Path, Path]:\n base_dir = Path(data_dir)\n if clear and base_dir.exists():\n shutil.rmtree(base_dir)\n annotations = base_dir / 'labels'\n images = base_dir / 'JPEGImages'\n image_sets = base_dir / 'yolo_image_sets'\n\n annotations.mkdir(parents=True, exist_ok=True)\n images.mkdir(parents=True, exist_ok=True)\n image_sets.mkdir(parents=True, exist_ok=True)\n return (annotations, images, image_sets)\n\n def write_image_set(self, ids: List[str], file_name: str) -> None:\n with open(self.image_sets_dir / file_name, 'w') as writer:\n writer.write('\\n'.join([str(self.images_dir / (id + '.png')) for id in ids]))\n\n\ndef read_dicom_img(path: str, apply_voi: bool = True) -> np.array:\n dicom_data = dcmread(path)\n if apply_voi:\n img_data = apply_voi_lut(dicom_data.pixel_array, dicom_data)\n else:\n img_data = dicom_data.pixel_array\n\n if dicom_data.PhotometricInterpretation == 'MONOCHROME1':\n img_data = np.amax(img_data) - img_data\n img_data = img_data - np.min(img_data)\n img_data = img_data / np.max(img_data)\n img_data = (img_data * 256).astype(np.uint8)\n return img_data\n\n\ndef convert_bboxmeta2arrays(bbox_metas: List[ImageMeta]) -> Tuple[List[BoxCoordsFloat], List[float], List[str]]:\n bboxes = [(bbox_meta['x_min'], bbox_meta['y_min'], bbox_meta['x_max'], bbox_meta['y_max'])\n for bbox_meta in bbox_metas if bbox_meta['class_name'] != 'No finding']\n labels = [bbox_meta['class_name'] for bbox_meta in bbox_metas if bbox_meta['class_name'] != 'No finding']\n scores = [1.0 for _ in range(len(bboxes))]\n return (bboxes, scores, labels)\n",
"import copy\nfrom typing import Dict, List, Tuple\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nfrom vinbigdata import BoxesMeta\nfrom vinbigdata.preprocess import ImageMeta, convert_bboxmeta2arrays\n\nCOLOR_MAP: Dict[str, Tuple[int, int, int]] = {}\n\n\ndef plot_bboxes(img: np.array,\n bboxes: List[Tuple[float, float, float, float]],\n scores: List[float],\n labels: List[str],\n threshold: float = 0.0) -> np.array:\n for bbox, score, label in zip(bboxes, scores, labels):\n if score < threshold:\n continue\n if label not in COLOR_MAP:\n color = sns.color_palette('tab20')[len(COLOR_MAP)]\n COLOR_MAP[label] = (int(color[0] * 255), int(color[1] * 255), int(color[2] * 255))\n img = cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), COLOR_MAP[label], 7)\n img = cv2.putText(img, label + '%.2f' % score, (int(bbox[0]), int(bbox[1])), cv2.FONT_HERSHEY_SIMPLEX,\n 1 * img.shape[0] / 1000, COLOR_MAP[label], int(2 * img.shape[0] / 1000))\n return img\n\n\ndef visualize_label_suppression(img: np.array,\n bbox_set1: List[ImageMeta],\n bbox_set2: List[ImageMeta],\n fig_size: Tuple[float, float] = (28, 28)) -> None:\n bboxes, scores, labels = convert_bboxmeta2arrays(bbox_set1)\n img1 = plot_bboxes(copy.deepcopy(img), bboxes, scores, labels)\n\n bboxes, scores, labels = convert_bboxmeta2arrays(bbox_set2)\n img2 = plot_bboxes(copy.deepcopy(img), bboxes, scores, labels)\n fig = plt.figure(figsize=fig_size)\n fig.add_subplot(1, 2, 1)\n plt.imshow(img1)\n fig.add_subplot(1, 2, 2)\n plt.imshow(img2)\n return fig\n\n\ndef visualize_two_bbox_set(img: np.array,\n bbox_set1: BoxesMeta,\n bbox_set2: BoxesMeta,\n threshold: float,\n fig_size: Tuple[float, float] = (28, 28)) -> None:\n bboxes, scores, labels = bbox_set1\n img1 = plot_bboxes(copy.deepcopy(img), bboxes, scores, labels, threshold)\n\n bboxes, scores, labels = bbox_set2\n img2 = plot_bboxes(copy.deepcopy(img), bboxes, scores, labels, threshold)\n fig = plt.figure(figsize=fig_size)\n fig.add_subplot(1, 2, 1)\n plt.imshow(img1)\n fig.add_subplot(1, 2, 2)\n plt.imshow(img2)\n return fig\n",
"import copy\nfrom typing import Dict, List, Tuple\n\nimport numpy as np\nfrom ensemble_boxes import nms\nfrom vinbigdata import BoxCoordsFloat, BoxesMeta, ImageMeta, classname2mmdetid, mmdetid2classname\nfrom vinbigdata.utils import abs2rel\n\n\ndef nms_models(data: Dict[str, List[ImageMeta]], iou_threshold: float = 0.5) -> List[ImageMeta]:\n result_suppressed = []\n for img_index in range(len(data[list(data.keys())[0]])):\n bboxes, scores, labels, weights = [], [], [], []\n for model in data.keys():\n tupl = data[model][img_index][2]\n bboxes.append(tupl[0] if len(tupl[0]) > 0 else np.zeros((0, 4)))\n scores.append(tupl[1])\n labels.append([classname2mmdetid[label] for label in tupl[2]])\n weights.append(1.0)\n if sum([len(arr) for arr in bboxes]) != 0:\n boxes_final, scores_final, labels_final = nms(bboxes, scores, labels, iou_thr=iou_threshold)\n labels_final = [mmdetid2classname[label] for label in labels_final]\n else:\n boxes_final, scores_final, labels_final = [], [], []\n dat = data[list(data.keys())[0]][img_index]\n result_suppressed.append((dat[0], dat[1], (list(boxes_final), list(scores_final), list(labels_final))))\n return result_suppressed\n\n\ndef normal_by_boxes(bboxes: List[BoxCoordsFloat], scores: List[float], labels: List[str],\n img_shape: Tuple[int, int]) -> BoxesMeta:\n bboxes, scores, labels = copy.deepcopy(bboxes), copy.deepcopy(scores), copy.deepcopy(labels)\n bboxes.append(np.array(abs2rel((0, 0, 1, 1), img_shape)))\n if len(scores) == 0:\n scores.append(1.0)\n else:\n scores.append(1 - np.max(scores))\n labels.append('No finding')\n return (bboxes, scores, labels)\n"
] | [
[
"numpy.concatenate",
"numpy.max",
"numpy.amax",
"numpy.min"
],
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.figure"
],
[
"numpy.max",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
drib861204/rlkit | [
"08969d004c1be6eb2967174b48ab59b8bb33d11d"
] | [
"rlkit/core/eval_util.py"
] | [
"\"\"\"\nCommon evaluation utilities.\n\"\"\"\n\nfrom collections import OrderedDict\nfrom numbers import Number\n\nimport numpy as np\n\nimport rlkit.pythonplusplus as ppp\n\n\ndef get_generic_path_information(paths, stat_prefix=''):\n \"\"\"\n Get an OrderedDict with a bunch of statistic names and values.\n \"\"\"\n statistics = OrderedDict()\n if len(paths) == 0:\n return statistics\n returns = [sum(path[\"rewards\"]) for path in paths]\n\n rewards = np.vstack([path[\"rewards\"] for path in paths])\n statistics.update(create_stats_ordered_dict('Rewards', rewards,\n stat_prefix=stat_prefix))\n statistics.update(create_stats_ordered_dict('Returns', returns,\n stat_prefix=stat_prefix))\n actions = [path[\"actions\"] for path in paths]\n if len(actions[0].shape) == 1:\n actions = np.hstack([path[\"actions\"] for path in paths])\n else:\n actions = np.vstack([path[\"actions\"] for path in paths])\n statistics.update(create_stats_ordered_dict(\n 'Actions', actions, stat_prefix=stat_prefix\n ))\n statistics['Num Paths'] = len(paths)\n statistics[stat_prefix + 'Average Returns'] = get_average_returns(paths)\n\n for info_key in ['env_infos', 'agent_infos']:\n if info_key in paths[0]:\n all_env_infos = [\n ppp.list_of_dicts__to__dict_of_lists(p[info_key])\n for p in paths\n ]\n for k in all_env_infos[0].keys():\n final_ks = np.array([info[k][-1] for info in all_env_infos])\n first_ks = np.array([info[k][0] for info in all_env_infos])\n all_ks = np.concatenate([info[k] for info in all_env_infos])\n statistics.update(create_stats_ordered_dict(\n stat_prefix + k,\n final_ks,\n stat_prefix='{}/final/'.format(info_key),\n ))\n statistics.update(create_stats_ordered_dict(\n stat_prefix + k,\n first_ks,\n stat_prefix='{}/initial/'.format(info_key),\n ))\n statistics.update(create_stats_ordered_dict(\n stat_prefix + k,\n all_ks,\n stat_prefix='{}/'.format(info_key),\n ))\n\n return statistics\n\n\ndef get_average_returns(paths):\n returns = [sum(path[\"rewards\"]) for path in paths]\n return np.mean(returns)\n\n\ndef create_stats_ordered_dict(\n name,\n data,\n stat_prefix=None,\n always_show_all_stats=True,\n exclude_max_min=False,\n):\n if stat_prefix is not None:\n name = \"{}{}\".format(stat_prefix, name)\n if isinstance(data, Number):\n return OrderedDict({name: data})\n\n if len(data) == 0:\n return OrderedDict()\n\n if isinstance(data, tuple):\n ordered_dict = OrderedDict()\n for number, d in enumerate(data):\n sub_dict = create_stats_ordered_dict(\n \"{0}_{1}\".format(name, number),\n d,\n )\n ordered_dict.update(sub_dict)\n return ordered_dict\n\n if isinstance(data, list):\n try:\n iter(data[0])\n except TypeError:\n pass\n else:\n data = np.concatenate(data)\n\n if (isinstance(data, np.ndarray) and data.size == 1\n and not always_show_all_stats):\n return OrderedDict({name: float(data)})\n\n stats = OrderedDict([\n (name + ' Mean', np.mean(data)),\n (name + ' Std', np.std(data)),\n ])\n if not exclude_max_min:\n stats[name + ' Max'] = np.max(data)\n stats[name + ' Min'] = np.min(data)\n return stats\n"
] | [
[
"numpy.hstack",
"numpy.min",
"numpy.concatenate",
"numpy.max",
"numpy.std",
"numpy.mean",
"numpy.array",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
leonMatzner/geoopt | [
"4a7058e43bf78ab5012b862076a74bec175df221"
] | [
"tests/test_gyrovector_math.py"
] | [
"\"\"\"\nTests ideas are taken mostly from https://github.com/dalab/hyperbolic_nn/blob/master/util.py with some changes\n\"\"\"\nimport torch\nimport random\nimport numpy as np\nimport pytest\nimport warnings\nimport itertools\nimport geoopt\nfrom geoopt.manifolds import stereographic\n\n\[email protected](scope=\"function\", autouse=True, params=range(30, 40))\ndef seed(request):\n seed = request.param\n torch.manual_seed(seed)\n random.seed(seed)\n np.random.seed(seed)\n return seed\n\n\[email protected](\n scope=\"function\", params=[torch.float64, torch.float32], ids=[\"float64\", \"float32\"]\n)\ndef dtype(request):\n return request.param\n\n\ndef tolerant_allclose_check(a, b, strict=True, **tolerance):\n if strict:\n np.testing.assert_allclose(a.detach(), b.detach(), **tolerance)\n else:\n try:\n np.testing.assert_allclose(a.detach(), b.detach(), **tolerance)\n except AssertionError as e:\n assert not torch.isnan(a).any(), \"Found nans\"\n assert not torch.isnan(b).any(), \"Found nans\"\n warnings.warn(\n \"Unstable numerics: \" + \" | \".join(str(e).splitlines()[3:6]),\n RuntimeWarning,\n )\n\n\[email protected](params=[True, False], ids=[\"negative\", \"positive\"])\ndef negative(request):\n return request.param\n\n\[email protected]()\ndef strict(seed, dtype, negative):\n return seed in {30, 31} and dtype == torch.float64 or negative\n\n\n# c = -k\[email protected]\ndef c(seed, dtype, negative):\n # test broadcasted and non broadcasted versions\n if seed == 30: # strict seed\n c = torch.tensor(0.0).to(dtype)\n elif seed == 31: # strict seed too\n c = torch.tensor(1.0).to(dtype)\n elif seed == 39:\n c = 10 ** torch.arange(-15, 1, dtype=dtype)[:, None]\n elif seed == 35:\n c = torch.zeros(100, 1, dtype=dtype)\n elif seed > 35:\n c = torch.rand(100, 1, dtype=dtype)\n else:\n c = torch.tensor(random.random()).to(dtype)\n if not negative:\n c = -c\n return c.requires_grad_(True)\n\n\[email protected]\ndef k(c):\n return -c\n\n\[email protected]\ndef manifold(k):\n return stereographic.Stereographic(k=k, learnable=True)\n\n\[email protected]\ndef B(c):\n if c.dim() > 1:\n return c.shape[0]\n else:\n return 100\n\n\[email protected]\ndef a(seed, c, manifold, B, dtype):\n r = manifold.radius\n a = torch.empty(B, 10, dtype=dtype).normal_(-1, 1)\n a /= a.norm(dim=-1, keepdim=True)\n a *= torch.where(torch.isfinite(r), r, torch.ones((), dtype=dtype)).clamp_max_(100)\n a *= torch.rand_like(a)\n return manifold.projx(a).detach().requires_grad_(True)\n\n\[email protected]\ndef b(seed, c, manifold, B, dtype):\n r = manifold.radius\n a = torch.empty(B, 10, dtype=dtype).normal_(-1, 1)\n a /= a.norm(dim=-1, keepdim=True)\n a *= torch.where(torch.isfinite(r), r, torch.ones((), dtype=dtype)).clamp_max_(100)\n a *= torch.rand_like(a)\n return manifold.projx(a).detach().requires_grad_(True)\n\n\[email protected]\ndef logunif_input(dtype):\n inp = 10 ** torch.arange(-15, 1, dtype=dtype)\n inp = torch.cat([-inp.flip(0), torch.zeros([1], dtype=dtype), inp])\n return inp.requires_grad_(True)\n\n\ndef test_tanh_grad(logunif_input):\n stereographic.math.tanh(logunif_input).sum().backward()\n assert torch.isfinite(logunif_input.grad).all()\n\n\ndef test_artanh_grad(logunif_input):\n stereographic.math.artanh(logunif_input).sum().backward()\n assert torch.isfinite(logunif_input.grad).all()\n\n\ndef test_arsinh_grad(logunif_input):\n stereographic.math.arsinh(logunif_input).sum().backward()\n assert torch.isfinite(logunif_input.grad).all()\n\n\ndef test_tan_k_grad(logunif_input):\n k = logunif_input.detach().clone().requires_grad_()\n stereographic.math.tan_k(logunif_input[None], k[:, None]).sum().backward()\n assert torch.isfinite(logunif_input.grad).all()\n assert torch.isfinite(k.grad).all()\n\n\ndef test_artan_k_grad(logunif_input):\n k = logunif_input.detach().clone().requires_grad_()\n stereographic.math.artan_k(logunif_input[None], k[:, None]).sum().backward()\n assert torch.isfinite(logunif_input.grad).all()\n assert torch.isfinite(k.grad).all()\n\n\ndef test_arsin_k_grad(logunif_input):\n k = logunif_input.detach().clone().requires_grad_()\n stereographic.math.arsin_k(logunif_input[None], k[:, None]).sum().backward()\n assert torch.isfinite(logunif_input.grad).all()\n assert torch.isfinite(k.grad).all()\n\n\ndef test_sin_k_grad(logunif_input):\n k = logunif_input.detach().clone().requires_grad_()\n stereographic.math.sin_k(logunif_input[None], k[:, None]).sum().backward()\n assert torch.isfinite(logunif_input.grad).all()\n assert torch.isfinite(k.grad).all()\n\n\ndef test_project_k_grad(logunif_input):\n vec = logunif_input[:, None] * torch.ones(logunif_input.shape[0], 10)\n k = logunif_input.detach().clone().requires_grad_()\n stereographic.math.project(vec, k=k[:, None]).sum().backward()\n assert torch.isfinite(logunif_input.grad).all()\n assert torch.isfinite(k.grad).all()\n\n\ndef test_mobius_addition_left_cancelation(a, b, manifold, dtype):\n res = manifold.mobius_add(-a, manifold.mobius_add(a, b))\n tolerance = {torch.float32: dict(atol=5e-5, rtol=5e-4), torch.float64: dict()}\n np.testing.assert_allclose(res.detach(), b.detach(), **tolerance[dtype])\n res.sum().backward()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(b.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\ndef test_mobius_addition_zero_a(b, manifold):\n a = torch.zeros_like(b)\n res = manifold.mobius_add(a, b)\n np.testing.assert_allclose(res.detach(), b.detach())\n res.sum().backward()\n assert torch.isfinite(b.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\ndef test_mobius_addition_zero_b(a, c, manifold):\n b = torch.zeros_like(a)\n res = manifold.mobius_add(a, b)\n np.testing.assert_allclose(res.detach(), a.detach())\n res.sum().backward()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\ndef test_mobius_addition_negative_cancellation(a, manifold, dtype):\n res = manifold.mobius_add(a, -a)\n tolerance = {\n torch.float32: dict(atol=1e-4, rtol=1e-6),\n torch.float64: dict(atol=1e-6),\n }\n np.testing.assert_allclose(res.detach(), torch.zeros_like(res), **tolerance[dtype])\n res.sum().backward()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\ndef test_mobius_negative_addition(a, b, manifold, dtype):\n res = manifold.mobius_add(-b, -a)\n res1 = -manifold.mobius_add(b, a)\n tolerance = {\n torch.float32: dict(atol=1e-7, rtol=1e-6),\n torch.float64: dict(atol=1e-10),\n }\n\n np.testing.assert_allclose(res.detach(), res1.detach(), **tolerance[dtype])\n res.sum().backward()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(b.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\[email protected](\"n\", list(range(5)))\ndef test_n_additions_via_scalar_multiplication(n, a, dtype, negative, manifold, strict):\n n = torch.as_tensor(n, dtype=a.dtype).requires_grad_()\n y = torch.zeros_like(a)\n for _ in range(int(n.item())):\n y = manifold.mobius_add(a, y)\n ny = manifold.mobius_scalar_mul(n, a)\n if negative:\n tolerance = {\n torch.float32: dict(atol=4e-5, rtol=1e-3),\n torch.float64: dict(atol=1e-5, rtol=1e-3),\n }\n else:\n tolerance = {\n torch.float32: dict(atol=2e-6, rtol=1e-3),\n torch.float64: dict(atol=1e-5, rtol=1e-3),\n }\n tolerant_allclose_check(y, ny, strict=strict, **tolerance[dtype])\n ny.sum().backward()\n assert torch.isfinite(n.grad).all()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\[email protected]\ndef r1(seed, dtype, B):\n if seed % 3 == 0:\n return (\n torch.tensor(random.uniform(-1, 1), dtype=dtype)\n .detach()\n .requires_grad_(True)\n )\n else:\n return (torch.rand(B, 1, dtype=dtype) * 2 - 1).detach().requires_grad_(True)\n\n\[email protected]\ndef r2(seed, dtype, B):\n if seed % 3 == 1:\n return (\n torch.tensor(random.uniform(-1, 1), dtype=dtype)\n .detach()\n .requires_grad_(True)\n )\n else:\n return (torch.rand(B, 1, dtype=dtype) * 2 - 1).detach().requires_grad_(True)\n\n\ndef test_scalar_multiplication_distributive(a, r1, r2, manifold, dtype):\n res = manifold.mobius_scalar_mul(r1 + r2, a)\n res1 = manifold.mobius_add(\n manifold.mobius_scalar_mul(r1, a),\n manifold.mobius_scalar_mul(r2, a),\n )\n res2 = manifold.mobius_add(\n manifold.mobius_scalar_mul(r1, a),\n manifold.mobius_scalar_mul(r2, a),\n )\n tolerance = {\n torch.float32: dict(atol=5e-6, rtol=1e-4),\n torch.float64: dict(atol=1e-7, rtol=1e-4),\n }\n np.testing.assert_allclose(res1.detach(), res.detach(), **tolerance[dtype])\n np.testing.assert_allclose(res2.detach(), res.detach(), **tolerance[dtype])\n res.sum().backward()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(r1.grad).all()\n assert torch.isfinite(r2.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\ndef test_scalar_multiplication_associative(a, r1, r2, manifold, dtype):\n res = manifold.mobius_scalar_mul(r1 * r2, a)\n res1 = manifold.mobius_scalar_mul(r1, manifold.mobius_scalar_mul(r2, a))\n res2 = manifold.mobius_scalar_mul(r2, manifold.mobius_scalar_mul(r1, a))\n tolerance = {\n torch.float32: dict(atol=1e-5, rtol=1e-5),\n torch.float64: dict(atol=1e-7, rtol=1e-7),\n }\n np.testing.assert_allclose(res1.detach(), res.detach(), **tolerance[dtype])\n np.testing.assert_allclose(res2.detach(), res.detach(), **tolerance[dtype])\n res.sum().backward()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(r1.grad).all()\n assert torch.isfinite(r2.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\ndef test_scaling_property(a, r1, manifold, dtype):\n x1 = a / a.norm(dim=-1, keepdim=True)\n ra = manifold.mobius_scalar_mul(r1, a)\n x2 = manifold.mobius_scalar_mul(abs(r1), a) / ra.norm(dim=-1, keepdim=True)\n tolerance = {\n torch.float32: dict(rtol=1e-5, atol=1e-6),\n torch.float64: dict(atol=1e-10),\n }\n np.testing.assert_allclose(x1.detach(), x2.detach(), **tolerance[dtype])\n x2.sum().backward()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(r1.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\ndef test_geodesic_borders(a, b, manifold, dtype):\n geo0 = manifold.geodesic(torch.tensor(0.0, dtype=dtype), a, b)\n geo1 = manifold.geodesic(torch.tensor(1.0, dtype=dtype), a, b)\n tolerance = {\n torch.float32: dict(rtol=1e-5, atol=5e-5),\n torch.float64: dict(atol=1e-10),\n }\n np.testing.assert_allclose(geo0.detach(), a.detach(), **tolerance[dtype])\n np.testing.assert_allclose(geo1.detach(), b.detach(), **tolerance[dtype])\n (geo0 + geo1).sum().backward()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(b.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\ndef test_geodesic_segment_length_property(a, b, manifold, dtype):\n extra_dims = len(a.shape)\n segments = 12\n t = torch.linspace(0, 1, segments + 1, dtype=dtype).view(\n (segments + 1,) + (1,) * extra_dims\n )\n gamma_ab_t = manifold.geodesic(t, a, b)\n gamma_ab_t0 = gamma_ab_t[:-1]\n gamma_ab_t1 = gamma_ab_t[1:]\n dist_ab_t0mt1 = manifold.dist(gamma_ab_t0, gamma_ab_t1, keepdim=True)\n speed = manifold.dist(a, b, keepdim=True).unsqueeze(0).expand_as(dist_ab_t0mt1)\n # we have exactly 12 line segments\n tolerance = {\n torch.float32: dict(rtol=1e-5, atol=5e-3),\n torch.float64: dict(rtol=1e-5, atol=5e-3),\n }\n length = speed / segments\n np.testing.assert_allclose(\n dist_ab_t0mt1.detach(), length.detach(), **tolerance[dtype]\n )\n (length + dist_ab_t0mt1).sum().backward()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(b.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\ndef test_geodesic_segement_unit_property(a, b, manifold, dtype):\n extra_dims = len(a.shape)\n segments = 12\n t = torch.linspace(0, 1, segments + 1, dtype=dtype).view(\n (segments + 1,) + (1,) * extra_dims\n )\n gamma_ab_t = manifold.geodesic_unit(t, a, b)\n gamma_ab_t0 = gamma_ab_t[:1]\n gamma_ab_t1 = gamma_ab_t\n dist_ab_t0mt1 = manifold.dist(gamma_ab_t0, gamma_ab_t1, keepdim=True)\n true_distance_travelled = t.expand_as(dist_ab_t0mt1)\n # we have exactly 12 line segments\n tolerance = {\n torch.float32: dict(atol=2e-4, rtol=5e-5),\n torch.float64: dict(atol=1e-10),\n }\n np.testing.assert_allclose(\n dist_ab_t0mt1.detach(), true_distance_travelled.detach(), **tolerance[dtype]\n )\n (true_distance_travelled + dist_ab_t0mt1).sum().backward()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(b.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\ndef test_expmap_logmap(a, b, manifold, dtype):\n # this test appears to be numerical unstable once a and b may appear on the opposite sides\n bh = manifold.expmap(x=a, u=manifold.logmap(a, b))\n tolerance = {torch.float32: dict(rtol=1e-5, atol=5e-5), torch.float64: dict()}\n np.testing.assert_allclose(bh.detach(), b.detach(), **tolerance[dtype])\n bh.sum().backward()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(b.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\ndef test_expmap0_logmap0(a, manifold, dtype):\n # this test appears to be numerical unstable once a and b may appear on the opposite sides\n v = manifold.logmap0(a)\n norm = manifold.norm(torch.zeros_like(v), v, keepdim=True)\n dist = manifold.dist0(a, keepdim=True)\n bh = manifold.expmap0(v)\n tolerance = {torch.float32: dict(atol=1e-5, rtol=1e-5), torch.float64: dict()}\n np.testing.assert_allclose(bh.detach(), a.detach(), **tolerance[dtype])\n np.testing.assert_allclose(norm.detach(), dist.detach(), **tolerance[dtype])\n (bh.sum() + dist.sum()).backward()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\ndef test_matvec_zeros(a, manifold):\n mat = a.new_zeros((3, a.shape[-1]))\n z = manifold.mobius_matvec(mat, a)\n np.testing.assert_allclose(z.detach(), 0.0)\n z.sum().backward()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\ndef test_matvec_via_equiv_fn_apply(a, negative, manifold, strict, dtype):\n mat = a.new(3, a.shape[-1]).normal_()\n y = manifold.mobius_fn_apply(lambda x: x @ mat.transpose(-1, -2), a)\n y1 = manifold.mobius_matvec(mat, a)\n tolerance = {torch.float32: dict(atol=1e-5, rtol=1e-5), torch.float64: dict()}\n\n tolerant_allclose_check(y, y1, strict=strict, **tolerance[dtype])\n y.sum().backward()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\ndef test_mobiusify(a, c, negative, strict, dtype):\n mat = a.new(3, a.shape[-1]).normal_()\n\n @stereographic.math.mobiusify\n def matvec(x):\n return x @ mat.transpose(-1, -2)\n\n y = matvec(a, k=-c)\n y1 = stereographic.math.mobius_matvec(mat, a, k=-c)\n tolerance = {torch.float32: dict(atol=1e-5, rtol=1e-5), torch.float64: dict()}\n\n tolerant_allclose_check(y, y1, strict=strict, **tolerance[dtype])\n y.sum().backward()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(c.grad).all()\n\n\ndef test_matvec_chain_via_equiv_fn_apply(a, negative, manifold, dtype):\n mat1 = a.new(a.shape[-1], a.shape[-1]).normal_()\n mat2 = a.new(a.shape[-1], a.shape[-1]).normal_()\n y = manifold.mobius_fn_apply_chain(\n a,\n lambda x: x @ mat1.transpose(-1, -2),\n lambda x: x @ mat2.transpose(-1, -2),\n )\n y1 = manifold.mobius_matvec(mat1, a)\n y1 = manifold.mobius_matvec(mat2, y1)\n tolerance = {torch.float32: dict(atol=1e-5, rtol=1e-5), torch.float64: dict()}\n\n tolerant_allclose_check(y, y1, strict=negative, **tolerance[dtype])\n y.sum().backward()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\ndef test_transp0_preserves_inner_products(a, manifold):\n # pointing to the center\n v_0 = torch.rand_like(a) + 1e-5\n u_0 = torch.rand_like(a) + 1e-5\n zero = torch.zeros_like(a)\n v_a = manifold.transp0(a, v_0)\n u_a = manifold.transp0(a, u_0)\n # compute norms\n vu_0 = manifold.inner(zero, v_0, u_0, keepdim=True)\n vu_a = manifold.inner(a, v_a, u_a, keepdim=True)\n np.testing.assert_allclose(vu_a.detach(), vu_0.detach(), atol=1e-6, rtol=1e-6)\n (vu_0 + vu_a).sum().backward()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\ndef test_transp0_is_same_as_usual(a, manifold):\n # pointing to the center\n v_0 = torch.rand_like(a) + 1e-5\n zero = torch.zeros_like(a)\n v_a = manifold.transp0(a, v_0)\n v_a1 = manifold.transp(zero, a, v_0)\n # compute norms\n np.testing.assert_allclose(v_a.detach(), v_a1.detach(), atol=1e-6, rtol=1e-6)\n (v_a + v_a1).sum().backward()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\ndef test_transp_a_b(a, b, manifold):\n # pointing to the center\n v_0 = torch.rand_like(a)\n u_0 = torch.rand_like(a)\n v_1 = manifold.transp(a, b, v_0)\n u_1 = manifold.transp(a, b, u_0)\n # compute norms\n vu_1 = manifold.inner(b, v_1, u_1, keepdim=True)\n vu_0 = manifold.inner(a, v_0, u_0, keepdim=True)\n np.testing.assert_allclose(vu_0.detach(), vu_1.detach(), atol=1e-6, rtol=1e-6)\n (vu_0 + vu_1).sum().backward()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(b.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\ndef test_add_infinity_and_beyond(a, b, c, negative, manifold, dtype):\n _a = a\n if torch.isclose(c, c.new_zeros(())).any():\n pytest.skip(\"zero not checked\")\n infty = b * 10000000\n for i in range(100):\n z = manifold.expmap(a, infty, project=False)\n z = manifold.projx(z)\n assert not torch.isnan(z).any(), (\"Found nans\", i, z)\n assert torch.isfinite(z).all(), (\"Found Infs\", i, z)\n z = manifold.mobius_scalar_mul(\n torch.tensor(1000.0, dtype=z.dtype), z, project=False\n )\n z = manifold.projx(z)\n assert not torch.isnan(z).any(), (\"Found nans\", i, z)\n assert torch.isfinite(z).all(), (\"Found Infs\", i, z)\n\n infty = manifold.transp(a, z, infty)\n assert torch.isfinite(infty).all(), (i, infty)\n a = z\n z = manifold.expmap(a, -infty)\n # they just need to be very far, exact answer is not supposed\n tolerance = {\n torch.float32: dict(rtol=3e-1, atol=2e-1),\n torch.float64: dict(rtol=1e-1, atol=1e-3),\n }\n if negative:\n np.testing.assert_allclose(z.detach(), -a.detach(), **tolerance[dtype])\n else:\n assert not torch.isnan(z).any(), \"Found nans\"\n assert not torch.isnan(a).any(), \"Found nans\"\n\n\ndef test_mobius_coadd(a, b, negative, manifold, strict):\n # (a \\boxplus_c b) \\ominus_c b = a\n ah = manifold.mobius_sub(manifold.mobius_coadd(a, b), b)\n tolerant_allclose_check(a, ah, strict=strict, atol=5e-5)\n ah.sum().backward()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(b.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\ndef test_mobius_cosub(a, b, negative, manifold, strict):\n # (a \\oplus_c b) \\boxminus b = a\n ah = manifold.mobius_cosub(manifold.mobius_add(a, b), b)\n tolerant_allclose_check(a, ah, strict=strict, atol=1e-5)\n ah.sum().backward()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(b.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\ndef test_distance2plane(a, manifold):\n v = torch.rand_like(a).requires_grad_()\n vr = v / manifold.norm(a, v, keepdim=True)\n z = manifold.expmap(a, vr)\n dist1 = manifold.dist(a, z)\n dist = manifold.dist2plane(z, a, vr)\n\n np.testing.assert_allclose(dist.detach(), dist1.detach(), atol=2e-4, rtol=1e-4)\n (dist + dist1).sum().backward()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(v.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\ndef test_sproj(manifold, a):\n ma = manifold.sproj(manifold.inv_sproj(a))\n np.testing.assert_allclose(ma.detach(), a.detach(), atol=1e-5)\n ma.sum().backward()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\ndef test_antipode(manifold, negative, a, dtype, seed):\n if seed == 39:\n pytest.skip(\"This is amazingly unstable when tested against extreme values\")\n ma = manifold.antipode(a)\n if manifold.k.le(0).all():\n np.testing.assert_allclose(ma.detach(), -a.detach())\n else:\n s = manifold.inv_sproj(a)\n ms = manifold.inv_sproj(ma)\n tolerance = {torch.float32: dict(atol=1e-5), torch.float64: dict(atol=1e-6)}\n np.testing.assert_allclose(ms.detach(), -s.detach(), **tolerance[dtype])\n ma.sum().backward()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\[email protected](\"_k,lincomb\", itertools.product([-1, 0, 1], [True, False]))\ndef test_weighted_midpoint(_k, lincomb):\n manifold = stereographic.Stereographic(_k, learnable=True)\n a = geoopt.ManifoldParameter(manifold.random(2, 3, 10))\n mid = manifold.weighted_midpoint(a, lincomb=lincomb)\n assert torch.isfinite(mid).all()\n assert mid.shape == (a.shape[-1],)\n mid.sum().backward()\n assert torch.isfinite(a.grad).all()\n assert not torch.isclose(manifold.k.grad, manifold.k.new_zeros(()))\n\n\[email protected](\"_k,lincomb\", itertools.product([-1, 0, 1], [True, False]))\ndef test_weighted_midpoint_reduce_dim(_k, lincomb):\n manifold = stereographic.Stereographic(_k, learnable=True)\n a = geoopt.ManifoldParameter(manifold.random(2, 3, 10))\n mid = manifold.weighted_midpoint(a, reducedim=[0], lincomb=lincomb)\n assert mid.shape == a.shape[-2:]\n assert torch.isfinite(mid).all()\n mid.sum().backward()\n assert torch.isfinite(a.grad).all()\n assert not torch.isclose(manifold.k.grad, manifold.k.new_zeros(()))\n\n\[email protected](\"_k,lincomb\", itertools.product([-1, 0, 1], [True, False]))\ndef test_weighted_midpoint_weighted(_k, lincomb):\n manifold = stereographic.Stereographic(_k, learnable=True)\n a = geoopt.ManifoldParameter(manifold.random(2, 3, 10))\n mid = manifold.weighted_midpoint(\n a, reducedim=[0], lincomb=lincomb, weights=torch.rand_like(a[..., 0])\n )\n assert mid.shape == a.shape[-2:]\n assert torch.isfinite(mid).all()\n mid.sum().backward()\n assert torch.isfinite(a.grad).all()\n assert not torch.isclose(manifold.k.grad, manifold.k.new_zeros(()))\n\n\[email protected](\"_k,lincomb\", itertools.product([-1, 0, 1], [True, False]))\ndef test_weighted_midpoint_zero(_k, lincomb):\n manifold = stereographic.Stereographic(_k, learnable=True)\n a = geoopt.ManifoldParameter(manifold.random(2, 3, 10))\n mid = manifold.weighted_midpoint(\n a, reducedim=[0], lincomb=lincomb, weights=torch.zeros_like(a[..., 0])\n )\n assert mid.shape == a.shape[-2:]\n assert torch.allclose(mid, torch.zeros_like(mid))\n mid.sum().backward()\n assert torch.isfinite(a.grad).all()\n assert torch.isfinite(manifold.k.grad).all()\n\n\[email protected](\"lincomb\", [True, False])\ndef test_weighted_midpoint_euclidean(lincomb):\n manifold = stereographic.Stereographic(0)\n a = geoopt.ManifoldParameter(manifold.random(2, 3, 10))\n mid = manifold.weighted_midpoint(a, reducedim=[0], lincomb=lincomb)\n assert mid.shape == a.shape[-2:]\n if lincomb:\n assert torch.allclose(mid, a.sum(0))\n else:\n assert torch.allclose(mid, a.mean(0))\n\n\[email protected](\"_k,lincomb\", itertools.product([-1, 0, 1], [True, False]))\ndef test_weighted_midpoint_weighted_zero_sum(_k, lincomb):\n manifold = stereographic.Stereographic(_k, learnable=True)\n a = geoopt.ManifoldParameter(\n manifold.expmap0(torch.eye(3, 10)).detach(), manifold=manifold\n )\n weights = torch.rand_like(a[..., 0])\n weights = weights - weights.sum() / weights.numel()\n mid = manifold.weighted_midpoint(\n a, lincomb=lincomb, weights=weights, posweight=True\n )\n if _k == 0 and lincomb:\n np.testing.assert_allclose(\n mid.detach(),\n torch.cat([weights, torch.zeros(a.size(-1) - a.size(0))]),\n atol=1e-6,\n )\n assert mid.shape == a.shape[-1:]\n assert torch.isfinite(mid).all()\n mid.sum().backward()\n assert torch.isfinite(a.grad).all()\n"
] | [
[
"torch.linspace",
"torch.ones",
"torch.empty",
"torch.rand_like",
"numpy.random.seed",
"torch.zeros",
"torch.manual_seed",
"torch.isnan",
"torch.zeros_like",
"torch.eye",
"torch.tensor",
"torch.isfinite",
"torch.rand",
"torch.arange",
"torch.as_tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JohnGiorgi/seq2rel | [
"5ada6d3914c0156e93467ab38f29212458f84d53"
] | [
"tests/modules/attention/test_multihead_attention.py"
] | [
"import numpy\nimport torch\nfrom allennlp.common import Params\nfrom allennlp.common.testing.test_case import AllenNlpTestCase\nfrom allennlp.modules.attention.attention import Attention\nfrom numpy.testing import assert_almost_equal\nfrom seq2rel.modules.attention.multihead_attention import MultiheadAttention\n\n\nclass TestMultiheadAttention(AllenNlpTestCase):\n def test_can_init_multihead(self):\n legacy_attention = Attention.from_params(\n Params({\"type\": \"multihead_attention\", \"embed_dim\": 4, \"num_heads\": 2})\n )\n isinstance(legacy_attention, MultiheadAttention)\n\n def test_multihead_similarity(self):\n attn = MultiheadAttention(embed_dim=4, num_heads=2)\n vector = torch.FloatTensor([[0, 0, 0, 0], [1, 1, 1, 1]])\n matrix = torch.FloatTensor(\n [[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]]]\n )\n with torch.no_grad():\n output = attn(vector, matrix)\n\n assert_almost_equal(\n output.sum(dim=-1).numpy(),\n numpy.array([1.0, 1.0]),\n decimal=2,\n )\n"
] | [
[
"torch.no_grad",
"numpy.array",
"torch.FloatTensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
schrodingho/new_v_Gnet | [
"ec7f25bfdc2eec86334f37340d775335a81b6106"
] | [
"code/models.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom dnc import DNC\nfrom layers import GraphConvolution\n\n'''\nOur model\n'''\nclass GCN(nn.Module):\n def __init__(self, voc_size, emb_dim, adj, device=torch.device('cpu:0')):\n super(GCN, self).__init__()\n self.voc_size = voc_size\n self.emb_dim = emb_dim\n self.device = device\n\n adj = self.normalize(adj + np.eye(adj.shape[0]))\n\n self.adj = torch.FloatTensor(adj).to(device)\n self.x = torch.eye(voc_size).to(device)\n\n self.gcn1 = GraphConvolution(voc_size, emb_dim)\n self.dropout = nn.Dropout(p=0.3)\n self.gcn2 = GraphConvolution(emb_dim, emb_dim)\n\n def forward(self):\n node_embedding = self.gcn1(self.x, self.adj)\n node_embedding = F.relu(node_embedding)\n node_embedding = self.dropout(node_embedding)\n node_embedding = self.gcn2(node_embedding, self.adj)\n return node_embedding\n\n def normalize(self, mx):\n \"\"\"Row-normalize sparse matrix\"\"\"\n rowsum = np.array(mx.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = np.diagflat(r_inv)\n mx = r_mat_inv.dot(mx)\n return mx\n\nclass GAMENet(nn.Module):\n def __init__(self, vocab_size, ehr_adj, ddi_adj, emb_dim=64, device=torch.device('cpu:0'), ddi_in_memory=True):\n super(GAMENet, self).__init__()\n K = len(vocab_size)\n self.K = K\n self.vocab_size = vocab_size\n self.device = device\n self.tensor_ddi_adj = torch.FloatTensor(ddi_adj).to(device)\n self.ddi_in_memory = ddi_in_memory\n self.embeddings = nn.ModuleList(\n [nn.Embedding(vocab_size[i], emb_dim) for i in range(K-1)])\n self.dropout = nn.Dropout(p=0.4)\n\n self.encoders = nn.ModuleList([nn.GRU(emb_dim, emb_dim*2, batch_first=True) for _ in range(K-1)])\n\n self.query = nn.Sequential(\n nn.ReLU(),\n nn.Linear(emb_dim * 4, emb_dim),\n )\n\n self.ehr_gcn = GCN(voc_size=vocab_size[2], emb_dim=emb_dim, adj=ehr_adj, device=device)\n self.ddi_gcn = GCN(voc_size=vocab_size[2], emb_dim=emb_dim, adj=ddi_adj, device=device)\n self.inter = nn.Parameter(torch.FloatTensor(1))\n\n self.output = nn.Sequential(\n nn.ReLU(),\n nn.Linear(emb_dim * 3, emb_dim * 2),\n nn.ReLU(),\n nn.Linear(emb_dim * 2, vocab_size[2])\n )\n\n self.init_weights()\n\n def forward(self, input):\n # input (adm, 3, codes)\n\n # generate medical embeddings and queries\n i1_seq = []\n i2_seq = []\n def mean_embedding(embedding):\n return embedding.mean(dim=1).unsqueeze(dim=0) # (1,1,dim)\n for adm in input:\n i1 = mean_embedding(self.dropout(self.embeddings[0](torch.LongTensor(adm[0]).unsqueeze(dim=0).to(self.device)))) # (1,1,dim)\n i2 = mean_embedding(self.dropout(self.embeddings[1](torch.LongTensor(adm[1]).unsqueeze(dim=0).to(self.device))))\n i1_seq.append(i1)\n i2_seq.append(i2)\n i1_seq = torch.cat(i1_seq, dim=1) #(1,seq,dim)\n i2_seq = torch.cat(i2_seq, dim=1) #(1,seq,dim)\n\n o1, h1 = self.encoders[0](\n i1_seq\n ) # o1:(1, seq, dim*2) hi:(1,1,dim*2)\n o2, h2 = self.encoders[1](\n i2_seq\n )\n patient_representations = torch.cat([o1, o2], dim=-1).squeeze(dim=0) # (seq, dim*4)\n queries = self.query(patient_representations) # (seq, dim)\n\n # graph memory module\n '''I:generate current input'''\n query = queries[-1:] # (1,dim)\n\n '''G:generate graph memory bank and insert history information'''\n if self.ddi_in_memory:\n drug_memory = self.ehr_gcn() - self.ddi_gcn() * self.inter # (size, dim)\n else:\n drug_memory = self.ehr_gcn()\n\n if len(input) > 1:\n history_keys = queries[:(queries.size(0)-1)] # (seq-1, dim)\n\n history_values = np.zeros((len(input)-1, self.vocab_size[2]))\n for idx, adm in enumerate(input):\n if idx == len(input)-1:\n break\n history_values[idx, adm[2]] = 1\n history_values = torch.FloatTensor(history_values).to(self.device) # (seq-1, size)\n\n '''O:read from global memory bank and dynamic memory bank'''\n key_weights1 = F.softmax(torch.mm(query, drug_memory.t()), dim=-1) # (1, size)\n fact1 = torch.mm(key_weights1, drug_memory) # (1, dim)\n\n if len(input) > 1:\n visit_weight = F.softmax(torch.mm(query, history_keys.t())) # (1, seq-1)\n weighted_values = visit_weight.mm(history_values) # (1, size)\n fact2 = torch.mm(weighted_values, drug_memory) # (1, dim)\n else:\n fact2 = fact1\n '''R:convert O and predict'''\n output = self.output(torch.cat([query, fact1, fact2], dim=-1)) # (1, dim)\n\n if self.training:\n neg_pred_prob = F.sigmoid(output)\n neg_pred_prob = neg_pred_prob.t() * neg_pred_prob # (voc_size, voc_size)\n batch_neg = neg_pred_prob.mul(self.tensor_ddi_adj).mean()\n\n return output, batch_neg\n else:\n return output\n\n def init_weights(self):\n \"\"\"Initialize weights.\"\"\"\n initrange = 0.1\n for item in self.embeddings:\n item.weight.data.uniform_(-initrange, initrange)\n\n self.inter.data.uniform_(-initrange, initrange)\n\n'''\nDMNC\n'''\nclass DMNC(nn.Module):\n def __init__(self, vocab_size, emb_dim=64, device=torch.device('cpu:0')):\n super(DMNC, self).__init__()\n K = len(vocab_size)\n self.K = K\n self.vocab_size = vocab_size\n self.device = device\n\n self.token_start = vocab_size[2]\n self.token_end = vocab_size[2] + 1\n\n self.embeddings = nn.ModuleList(\n [nn.Embedding(vocab_size[i] if i != 2 else vocab_size[2] + 2, emb_dim) for i in range(K)])\n self.dropout = nn.Dropout(p=0.3)\n\n self.encoders = nn.ModuleList([DNC(\n input_size=emb_dim,\n hidden_size=emb_dim,\n rnn_type='gru',\n num_layers=1,\n num_hidden_layers=1,\n nr_cells=16,\n cell_size=emb_dim,\n read_heads=1,\n batch_first=True,\n gpu_id=0,\n independent_linears=False\n ) for _ in range(K - 1)])\n\n self.decoder = nn.GRU(emb_dim + emb_dim * 2, emb_dim * 2,\n batch_first=True) # input: (y, r1, r2,) hidden: (hidden1, hidden2)\n self.interface_weighting = nn.Linear(emb_dim * 2, 2 * (emb_dim + 1 + 3)) # 2 read head (key, str, mode)\n self.decoder_r2o = nn.Linear(2 * emb_dim, emb_dim * 2)\n\n self.output = nn.Linear(emb_dim * 2, vocab_size[2] + 2)\n\n def forward(self, input, i1_state=None, i2_state=None, h_n=None, max_len=20):\n # input (3, code)\n i1_input_tensor = self.embeddings[0](\n torch.LongTensor(input[0]).unsqueeze(dim=0).to(self.device)) # (1, seq, codes)\n i2_input_tensor = self.embeddings[1](\n torch.LongTensor(input[1]).unsqueeze(dim=0).to(self.device)) # (1, seq, codes)\n\n o1, (ch1, m1, r1) = \\\n self.encoders[0](i1_input_tensor, (None, None, None) if i1_state is None else i1_state)\n o2, (ch2, m2, r2) = \\\n self.encoders[1](i2_input_tensor, (None, None, None) if i2_state is None else i2_state)\n\n # save memory state\n i1_state = (ch1, m1, r1)\n i2_state = (ch2, m2, r2)\n\n predict_sequence = [self.token_start] + input[2]\n if h_n is None:\n h_n = torch.cat([ch1[0], ch2[0]], dim=-1)\n\n output_logits = []\n r1 = r1.unsqueeze(dim=0)\n r2 = r2.unsqueeze(dim=0)\n\n if self.training:\n for item in predict_sequence:\n # teacher force predict drug\n item_tensor = self.embeddings[2](\n torch.LongTensor([item]).unsqueeze(dim=0).to(self.device)) # (1, seq, codes)\n\n o3, h_n = self.decoder(torch.cat([item_tensor, r1, r2], dim=-1), h_n)\n read_keys, read_strengths, read_modes = self.decode_read_variable(h_n.squeeze(0))\n\n # read from i1_mem, i2_mem and i3_mem\n r1, _ = self.read_from_memory(self.encoders[0],\n read_keys[:, 0, :].unsqueeze(dim=1),\n read_strengths[:, 0].unsqueeze(dim=1),\n read_modes[:, 0, :].unsqueeze(dim=1), i1_state[1])\n\n r2, _ = self.read_from_memory(self.encoders[1],\n read_keys[:, 1, :].unsqueeze(dim=1),\n read_strengths[:, 1].unsqueeze(dim=1),\n read_modes[:, 1, :].unsqueeze(dim=1), i2_state[1])\n\n output = self.decoder_r2o(torch.cat([r1, r2], dim=-1))\n output = self.output(output + o3).squeeze(dim=0)\n output_logits.append(output)\n else:\n item_tensor = self.embeddings[2](\n torch.LongTensor([self.token_start]).unsqueeze(dim=0).to(self.device)) # (1, seq, codes)\n for idx in range(max_len):\n # predict\n # teacher force predict drug\n o3, h_n = self.decoder(torch.cat([item_tensor, r1, r2], dim=-1), h_n)\n read_keys, read_strengths, read_modes = self.decode_read_variable(h_n.squeeze(0))\n\n # read from i1_mem, i2_mem and i3_mem\n r1, _ = self.read_from_memory(self.encoders[0],\n read_keys[:, 0, :].unsqueeze(dim=1),\n read_strengths[:, 0].unsqueeze(dim=1),\n read_modes[:, 0, :].unsqueeze(dim=1), i1_state[1])\n\n r2, _ = self.read_from_memory(self.encoders[1],\n read_keys[:, 1, :].unsqueeze(dim=1),\n read_strengths[:, 1].unsqueeze(dim=1),\n read_modes[:, 1, :].unsqueeze(dim=1), i2_state[1])\n\n output = self.decoder_r2o(torch.cat([r1, r2], dim=-1))\n output = self.output(output + o3).squeeze(dim=0)\n output = F.softmax(output, dim=-1)\n output_logits.append(output)\n\n input_token = torch.argmax(output, dim=-1)\n input_token = input_token.item()\n item_tensor = self.embeddings[2](\n torch.LongTensor([input_token]).unsqueeze(dim=0).to(self.device)) # (1, seq, codes)\n\n return torch.cat(output_logits, dim=0), i1_state, i2_state, h_n\n\n def read_from_memory(self, dnc, read_key, read_str, read_mode, m_hidden):\n read_vectors, hidden = dnc.memories[0].read(read_key, read_str, read_mode, m_hidden)\n return read_vectors, hidden\n\n def decode_read_variable(self, input):\n w = 64\n r = 2\n b = input.size(0)\n\n input = self.interface_weighting(input)\n # r read keys (b * w * r)\n read_keys = F.tanh(input[:, :r * w].contiguous().view(b, r, w))\n # r read strengths (b * r)\n read_strengths = F.softplus(input[:, r * w:r * w + r].contiguous().view(b, r))\n # read modes (b * 3*r)\n read_modes = F.softmax(input[:, (r * w + r):].contiguous().view(b, r, 3), -1)\n return read_keys, read_strengths, read_modes\n\n\n'''\nLeap\n'''\nclass Leap(nn.Module):\n def __init__(self, voc_size, emb_dim=128, device=torch.device('cpu:0')):\n super(Leap, self).__init__()\n self.voc_size = voc_size\n self.device = device\n self.SOS_TOKEN = voc_size[2]\n self.END_TOKEN = voc_size[2]+1\n\n self.enc_embedding = nn.Sequential(\n nn.Embedding(voc_size[0], emb_dim, ),\n nn.Dropout(0.3)\n )\n self.dec_embedding = nn.Sequential(\n nn.Embedding(voc_size[2] + 2, emb_dim, ),\n nn.Dropout(0.3)\n )\n\n self.dec_gru = nn.GRU(emb_dim*2, emb_dim, batch_first=True)\n\n self.attn = nn.Linear(emb_dim*2, 1)\n\n self.output = nn.Linear(emb_dim, voc_size[2]+2)\n\n\n def forward(self, input, max_len=20):\n device = self.device\n # input (3, codes)\n input_tensor = torch.LongTensor(input[0]).to(device)\n # (len, dim)\n input_embedding = self.enc_embedding(input_tensor.unsqueeze(dim=0)).squeeze(dim=0)\n\n output_logits = []\n hidden_state = None\n if self.training:\n for med_code in [self.SOS_TOKEN] + input[2]:\n dec_input = torch.LongTensor([med_code]).unsqueeze(dim=0).to(device)\n dec_input = self.dec_embedding(dec_input).squeeze(dim=0) # (1,dim)\n\n if hidden_state is None:\n hidden_state = dec_input\n\n hidden_state_repeat = hidden_state.repeat(input_embedding.size(0), 1) # (len, dim)\n combined_input = torch.cat([hidden_state_repeat, input_embedding], dim=-1) # (len, dim*2)\n attn_weight = F.softmax(self.attn(combined_input).t(), dim=-1) # (1, len)\n input_embedding = attn_weight.mm(input_embedding) # (1, dim)\n\n _, hidden_state = self.dec_gru(torch.cat([input_embedding, dec_input], dim=-1).unsqueeze(dim=0), hidden_state.unsqueeze(dim=0))\n hidden_state = hidden_state.squeeze(dim=0) # (1,dim)\n\n output_logits.append(self.output(F.relu(hidden_state)))\n\n return torch.cat(output_logits, dim=0)\n\n else:\n for di in range(max_len):\n if di == 0:\n dec_input = torch.LongTensor([[self.SOS_TOKEN]]).to(device)\n dec_input = self.dec_embedding(dec_input).squeeze(dim=0) # (1,dim)\n if hidden_state is None:\n hidden_state = dec_input\n hidden_state_repeat = hidden_state.repeat(input_embedding.size(0), 1) # (len, dim)\n combined_input = torch.cat([hidden_state_repeat, input_embedding], dim=-1) # (len, dim*2)\n attn_weight = F.softmax(self.attn(combined_input).t(), dim=-1) # (1, len)\n input_embedding = attn_weight.mm(input_embedding) # (1, dim)\n _, hidden_state = self.dec_gru(torch.cat([input_embedding, dec_input], dim=-1).unsqueeze(dim=0),\n hidden_state.unsqueeze(dim=0))\n hidden_state = hidden_state.squeeze(dim=0) # (1,dim)\n output = self.output(F.relu(hidden_state))\n topv, topi = output.data.topk(1)\n output_logits.append(F.softmax(output, dim=-1))\n dec_input = topi.detach()\n return torch.cat(output_logits, dim=0)\n\n'''\nRetain\n'''\nclass Retain(nn.Module):\n def __init__(self, voc_size, emb_size=64, device=torch.device('cpu:0')):\n super(Retain, self).__init__()\n self.device = device\n self.voc_size = voc_size\n self.emb_size = emb_size\n self.input_len = voc_size[0] + voc_size[1] + voc_size[2]\n self.output_len = voc_size[2]\n\n self.embedding = nn.Sequential(\n nn.Embedding(self.input_len + 1, self.emb_size, padding_idx=self.input_len),\n nn.Dropout(0.3)\n )\n\n self.alpha_gru = nn.GRU(emb_size, emb_size, batch_first=True)\n self.beta_gru = nn.GRU(emb_size, emb_size, batch_first=True)\n\n self.alpha_li = nn.Linear(emb_size, 1)\n self.beta_li = nn.Linear(emb_size, emb_size)\n\n self.output = nn.Linear(emb_size, self.output_len)\n\n def forward(self, input):\n device = self.device\n # input: (visit, 3, codes )\n max_len = max([(len(v[0]) + len(v[1]) + len(v[2])) for v in input])\n input_np = []\n for visit in input:\n input_tmp = []\n input_tmp.extend(visit[0])\n input_tmp.extend(list(np.array(visit[1]) + self.voc_size[0]))\n input_tmp.extend(list(np.array(visit[2]) + self.voc_size[0] + self.voc_size[1]))\n if len(input_tmp) < max_len:\n input_tmp.extend( [self.input_len]*(max_len - len(input_tmp)) )\n\n input_np.append(input_tmp)\n\n visit_emb = self.embedding(torch.LongTensor(input_np).to(device)) # (visit, max_len, emb)\n visit_emb = torch.sum(visit_emb, dim=1) # (visit, emb)\n\n g, _ = self.alpha_gru(visit_emb.unsqueeze(dim=0)) # g: (1, visit, emb)\n h, _ = self.beta_gru(visit_emb.unsqueeze(dim=0)) # h: (1, visit, emb)\n\n g = g.squeeze(dim=0) # (visit, emb)\n h = h.squeeze(dim=0) # (visit, emb)\n attn_g = F.softmax(self.alpha_li(g), dim=-1) # (visit, 1)\n attn_h = F.tanh(self.beta_li(h)) # (visit, emb)\n\n c = attn_g * attn_h * visit_emb # (visit, emb)\n c = torch.sum(c, dim=0).unsqueeze(dim=0) # (1, emb)\n\n return self.output(c)\n\n'''\nRF in train_LR.py\n'''\n\n\n"
] | [
[
"torch.nn.functional.softmax",
"torch.cat",
"torch.nn.GRU",
"torch.sum",
"torch.nn.Embedding",
"torch.FloatTensor",
"torch.device",
"torch.nn.Dropout",
"torch.mm",
"numpy.eye",
"torch.eye",
"torch.nn.functional.sigmoid",
"torch.nn.functional.relu",
"torch.LongTensor",
"numpy.power",
"numpy.diagflat",
"torch.nn.Linear",
"numpy.array",
"torch.nn.ReLU",
"numpy.isinf",
"torch.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
danieltao1993/2021Q2-hackathon | [
"975141e1d6402eed8f0f4673f409e9013718cf1f"
] | [
"projects/24-Orama/orama-neural-net.py"
] | [
"#! /usr/bin/env python3\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\ndef gen_normal(size, shape=[8, 8]) :\n w1 = np.random.normal(0, 0.1, size)\n w1 *= 1000\n np.mod(w1, 128)\n w1 = w1.astype(int)\n w1 = np.reshape(w1, shape)\n print(w1)\n return w1\n\nif __name__ == \"__main__\":\n DIM = 32\n NUM_NEURONS = DIM ** 2\n NUM_WEIGHTS = NUM_NEURONS ** 2\n\n HIDDEN0 = [0 for x in range(NUM_NEURONS)]\n HIDDEN1 = [0 for x in range(NUM_NEURONS)]\n\n init_pic = gen_normal(NUM_NEURONS, [NUM_NEURONS])\n weights0 = gen_normal(NUM_WEIGHTS, [NUM_WEIGHTS])\n weights1 = gen_normal(NUM_WEIGHTS, [NUM_WEIGHTS])\n\n #print(HIDDEN0)\n IN = init_pic\n for nid, _ in enumerate(HIDDEN0):\n for ins, _ in enumerate(init_pic):\n HIDDEN0[nid] = init_pic[ins] * weights0[nid * NUM_NEURONS + ins]\n if HIDDEN0[nid] < 0:\n HIDDEN0[nid] = 0\n\n #print(HIDDEN0)\n\n #print(HIDDEN1)\n for nid, _ in enumerate(HIDDEN1):\n for ins, _ in enumerate(HIDDEN0):\n HIDDEN1[nid] = HIDDEN0[ins] * weights0[nid * NUM_NEURONS + ins]\n if HIDDEN1[nid] < 0:\n HIDDEN1[nid] = 0\n\n #print(HIDDEN1)\n OUT = np.mod(HIDDEN1, 256)\n print(OUT)\n plt.imsave(\"a.bmp\", OUT.reshape(DIM, DIM))#, cmap=cm.gray)\n"
] | [
[
"numpy.mod",
"numpy.random.normal",
"numpy.reshape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kouohhashi/ctcdecode | [
"a40e197c30ebbb3e4a3296e0f761542bc258960a"
] | [
"ctcdecode/__init__.py"
] | [
"import torch\nfrom ._ext import ctc_decode\n\n\nclass CTCBeamDecoder(object):\n \"\"\"\n PyTorch wrapper for DeepSpeech PaddlePaddle Beam Search Decoder.\n Args:\n labels (list): The tokens/vocab used to train your model.\n They should be in the same order as they are in your model's outputs.\n model_path (basestring): The path to your external KenLM language model(LM)\n alpha (float): Weighting associated with the LMs probabilities.\n A weight of 0 means the LM has no effect.\n beta (float): Weight associated with the number of words within our beam.\n cutoff_top_n (int): Cutoff number in pruning. Only the top cutoff_top_n characters\n with the highest probability in the vocab will be used in beam search.\n cutoff_prob (float): Cutoff probability in pruning. 1.0 means no pruning.\n beam_width (int): This controls how broad the beam search is. Higher values are more likely to find top beams,\n but they also will make your beam search exponentially slower.\n num_processes (int): Parallelize the batch using num_processes workers.\n blank_id (int): Index of the CTC blank token (probably 0) used when training your model.\n log_probs_input (bool): False if your model has passed through a softmax and output probabilities sum to 1.\n \"\"\"\n\n def __init__(self, labels, model_path=None, alpha=0, beta=0, cutoff_top_n=40, cutoff_prob=1.0, beam_width=100,\n num_processes=4, blank_id=0, log_probs_input=False):\n self.cutoff_top_n = cutoff_top_n\n self._beam_width = beam_width\n self._scorer = None\n self._num_processes = num_processes\n self._labels = list(labels) # Ensure labels are a list\n self._num_labels = len(labels)\n self._blank_id = blank_id\n self._log_probs = 1 if log_probs_input else 0\n if model_path:\n self._scorer = ctc_decode.paddle_get_scorer(alpha, beta, model_path.encode(), self._labels,\n self._num_labels)\n self._cutoff_prob = cutoff_prob\n\n def decode(self, probs, seq_lens=None, funnels={}):\n \"\"\"\n Conducts the beamsearch on model outputs and return results.\n Args:\n probs (Tensor) - A rank 3 tensor representing model outputs. Shape is batch x num_timesteps x num_labels.\n seq_lens (Tensor) - A rank 1 tensor representing the sequence length of the items in the batch. Optional,\n if not provided the size of axis 1 (num_timesteps) of `probs` is used for all items\n\n Returns:\n tuple: (beam_results, beam_scores, timesteps, out_lens)\n\n beam_results (Tensor): A 3-dim tensor representing the top n beams of a batch of items.\n Shape: batchsize x num_beams x num_timesteps.\n Results are still encoded as ints at this stage.\n beam_scores (Tensor): A 3-dim tensor representing the likelihood of each beam in beam_results.\n Shape: batchsize x num_beams x num_timesteps\n timesteps (Tensor): A 2-dim tensor representing the timesteps at which the nth output character\n has peak probability.\n To be used as alignment between audio and transcript.\n Shape: batchsize x num_beams\n out_lens (Tensor): A 2-dim tensor representing the length of each beam in beam_results.\n Shape: batchsize x n_beams.\n\n \"\"\"\n probs = probs.cpu().float()\n batch_size, max_seq_len = probs.size(0), probs.size(1)\n if seq_lens is None:\n seq_lens = torch.IntTensor(batch_size).fill_(max_seq_len)\n else:\n seq_lens = seq_lens.cpu().int()\n\n output = torch.IntTensor(batch_size, self._beam_width, max_seq_len).cpu().int()\n timesteps = torch.IntTensor(batch_size, self._beam_width, max_seq_len).cpu().int()\n scores = torch.FloatTensor(batch_size, self._beam_width).cpu().float()\n out_seq_len = torch.zeros(batch_size, self._beam_width).cpu().int()\n if self._scorer:\n ctc_decode.paddle_beam_decode_lm(probs,\n seq_lens,\n self._labels,\n funnels,\n self._num_labels,\n self._beam_width,\n self._num_processes,\n self._cutoff_prob,\n self.cutoff_top_n,\n self._blank_id,\n self._log_probs,\n self._scorer,\n output,\n timesteps,\n scores,\n out_seq_len)\n else:\n ctc_decode.paddle_beam_decode(probs, seq_lens, self._labels, self._num_labels, self._beam_width,\n self._num_processes,\n self._cutoff_prob, self.cutoff_top_n, self._blank_id, self._log_probs,\n output, timesteps, scores, out_seq_len)\n\n return output, scores, timesteps, out_seq_len\n\n def character_based(self):\n return ctc_decode.is_character_based(self._scorer) if self._scorer else None\n\n def max_order(self):\n return ctc_decode.get_max_order(self._scorer) if self._scorer else None\n\n def dict_size(self):\n return ctc_decode.get_dict_size(self._scorer) if self._scorer else None\n\n def reset_params(self, alpha, beta):\n if self._scorer is not None:\n ctc_decode.reset_params(self._scorer, alpha, beta)\n\n def __del__(self):\n if self._scorer is not None:\n ctc_decode.paddle_release_scorer(self._scorer)\n"
] | [
[
"torch.zeros",
"torch.FloatTensor",
"torch.IntTensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MathGaron/py_rgbd_grabber | [
"8920d2eb8327f5f2a66a341df0cf214184d1d84e"
] | [
"tests/sensor_recorder_simple.py"
] | [
"#from py_rgbd_grabber.realsense import Realsense\n\nfrom py_rgbd_grabber.kinect2 import Kinect2\nimport time\nimport cv2\nimport numpy as np\nimport os\n\n\nif __name__ == '__main__':\n sensor = Kinect2()\n\n # will manage the other process automagically\n with sensor:\n # main loop\n while True:\n start_time = time.time()\n frame = sensor.pop_frame()\n\n # show and handle keyboard entries\n rgb = cv2.resize(frame.rgb, (int(frame.rgb.shape[1]/2), int(frame.rgb.shape[0]/2)))\n depth = cv2.resize(frame.depth, (int(frame.depth.shape[1]/2), int(frame.depth.shape[0]/2)))\n\n import matplotlib.pyplot as plt\n plt.imshow(depth, vmin=1200, vmax=1500)\n break\n cv2.imshow(\"rgb\", rgb[:, :, ::-1])\n cv2.imshow(\"depth\", (depth/np.max(depth)*255).astype(np.uint8))\n key = cv2.waitKey(1)\n if key == 1048603: # ESC\n break\n print(\"FPS : {}\".format(1./(time.time() - start_time)))\n\n plt.show()\n"
] | [
[
"numpy.max",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
krontzo/nume.py | [
"9d1e576fb3474333a8e2cf4f26f4236ee4f9deea",
"9d1e576fb3474333a8e2cf4f26f4236ee4f9deea"
] | [
"src/book/polyFit.py",
"src/book/example8_8.py"
] | [
"## module polyFit\r\n''' c = polyFit(xData,yData,m).\r\n Returns coefficients of the polynomial\r\n p(x) = c[0] + c[1]x + c[2]x^2 +...+ c[m]x^m\r\n that fits the specified data in the least\r\n squares sense.\r\n\r\n sigma = stdDev(c,xData,yData).\r\n Computes the std. deviation between p(x)\r\n and the data.\r\n''' \r\nimport numpy as np\r\nimport math\r\nfrom gaussPivot import *\r\n\r\ndef polyFit(xData,yData,m):\r\n a = np.zeros((m+1,m+1))\r\n b = np.zeros(m+1)\r\n s = np.zeros(2*m+1)\r\n for i in range(len(xData)):\r\n temp = yData[i]\r\n for j in range(m+1):\r\n b[j] = b[j] + temp\r\n temp = temp*xData[i]\r\n temp = 1.0\r\n for j in range(2*m+1):\r\n s[j] = s[j] + temp\r\n temp = temp*xData[i]\r\n for i in range(m+1):\r\n for j in range(m+1):\r\n a[i,j] = s[i+j]\r\n return gaussPivot(a,b)\r\n\r\ndef stdDev(c,xData,yData):\r\n \r\n def evalPoly(c,x):\r\n m = len(c) - 1\r\n p = c[m]\r\n for j in range(m):\r\n p = p*x + c[m-j-1]\r\n return p \r\n \r\n n = len(xData) - 1\r\n m = len(c) - 1\r\n sigma = 0.0\r\n for i in range(n+1):\r\n p = evalPoly(c,xData[i])\r\n sigma = sigma + (yData[i] - p)**2\r\n sigma = math.sqrt(sigma/(n - m))\r\n return sigma\r\n\r\n\r\n \r\n \r\n\r\n",
"#!/usr/bin/python\r\n## example8_8\r\nimport numpy as np\r\nfrom LUdecomp5 import *\r\n\r\ndef equations(x,h,m): # Set up finite difference eqs.\r\n h4 = h**4\r\n d = np.ones(m + 1)*6.0\r\n e = np.ones(m)*(-4.0)\r\n f = np.ones(m-1)\r\n b = np.zeros(m+1)\r\n d[0] = 1.0 \r\n d[1] = 7.0\r\n e[0] = 0.0\r\n f[0] = 0.0\r\n d[m-1] = 7.0\r\n d[m] = 3.0\r\n b[m] = 0.5*h**3\r\n return d,e,f,b\r\n\r\nxStart = 0.0 # x at left end\r\nxStop = 0.5 # x at right end\r\nm = 20 # Number of mesh spaces\r\nh = (xStop - xStart)/m\r\nx = np.arange(xStart,xStop + h,h)\r\nd,e,f,b = equations(x,h,m)\r\nd,e,f = LUdecomp5(d,e,f)\r\ny = LUsolve5(d,e,f,b)\r\nprint('\\n x y')\r\nprint('{:14.5e} {:14.5e}'.format(x[m-1],y[m-1]))\r\nprint('{:14.5e} {:14.5e}'.format(x[m],y[m]))\r\ninput(\"\\nPress return to exit\")\r\n\r\n\r\n"
] | [
[
"numpy.zeros"
],
[
"numpy.arange",
"numpy.zeros",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
qnano/simflux | [
"4f149d4e6c997954ac862cc5a7a404855b2a0be9",
"4f149d4e6c997954ac862cc5a7a404855b2a0be9"
] | [
"python/smlmlib/simflux.py",
"python/smlmlib/util.py"
] | [
"# -*- coding: utf-8 -*-\n\n\nimport ctypes\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport numpy.ctypeslib as ctl\nimport scipy.stats\nfrom enum import Enum\n\nfrom .base import SMLM, NullableFloatArrayType\nfrom smlmlib import gaussian\nfrom smlmlib.context import Context\nfrom smlmlib.psf import PSF\nfrom smlmlib.calib import sCMOS_Calib\n\nTheta = ctypes.c_float * 4\nFisherMatrix = ctypes.c_float * 16\nEstimationResult = gaussian.EstimationResult\nModulation = ctypes.c_float * 4\n\n\n\n\nclass SIMFLUX_ASW_Params(ctypes.Structure):\n _fields_ = [\n (\"imgw\", ctypes.c_int32),\n (\"numep\", ctypes.c_int32),\n (\"sigma\", ctypes.c_float),\n (\"levMarMaxIt\", ctypes.c_int32),\n (\"levMarLambdaStep\", ctypes.c_float)\n ]\n\n def make(imgw, numep, sigma, levMarIt=100, startLambdaStep=0.1):\n return SIMFLUX_ASW_Params(imgw, numep, sigma, levMarIt, startLambdaStep)\n\n\nclass SIMFLUX:\n def __init__(self, ctx:Context):\n self.ctx = ctx\n smlmlib = ctx.smlm.lib\n self._SIMFLUX_ASW_ComputeMLE = smlmlib.SIMFLUX_ASW_ComputeMLE\n self._SIMFLUX_ASW_ComputeMLE.argtypes = [\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # img\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # modulation\n ctypes.POINTER(EstimationResult), # results\n ctypes.c_int32, # numspots\n ctypes.c_int32, # numframes\n ctypes.POINTER(SIMFLUX_ASW_Params), # p\n NullableFloatArrayType, # initialValue\n ctl.ndpointer(np.int32, flags=\"aligned, c_contiguous\"), # roipos\n ctypes.c_int32, # flags\n NullableFloatArrayType, # tracebuf\n ctypes.c_int32, # tracebuflen per spot\n ]\n\n self._SIMFLUX_ASW_ComputeFisherMatrix = smlmlib.SIMFLUX_ASW_ComputeFisherMatrix\n self._SIMFLUX_ASW_ComputeFisherMatrix.argtypes = [\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # mu\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # fi\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # phi\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # theta\n ctl.ndpointer(np.int32, flags=\"aligned, c_contiguous\"), # theta\n ctypes.c_int32, # numspots\n ctypes.c_int32, # numframes\n ctypes.POINTER(SIMFLUX_ASW_Params),\n ]\n # CDLL_EXPORT void SIMFLUX_DFT2D_Points(const Vector3f* xyI, int numpts, const Vector2f* k, \n # int numk, Vector2f* output, bool useCuda);\n\n self._SIMFLUX_DFT2D_Points = smlmlib.SIMFLUX_DFT2D_Points\n self._SIMFLUX_DFT2D_Points.argtypes = [\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # xyI\n ctypes.c_int32, # numpts\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # k\n ctypes.c_int32, # numk\n ctl.ndpointer(np.complex64, flags=\"aligned, c_contiguous\"), # output\n ctypes.c_bool # useCuda\n ]\n\n # CDLL_EXPORT void FFT(const cuFloatComplex* src, cuFloatComplex* dst, int batchsize, int siglen, int forward)\n self._FFT = smlmlib.FFT\n self._FFT.argtypes = [\n ctl.ndpointer(np.complex64, flags=\"aligned, c_contiguous\"), # src\n ctl.ndpointer(np.complex64, flags=\"aligned, c_contiguous\"), # dst\n ctypes.c_int32, # batchsize\n ctypes.c_int32, # numsigA\n ctypes.c_int32, # forward\n ]\n \n self._SIMFLUX_ASW_ComputeOnOffProb = smlmlib.SIMFLUX_ASW_ComputeOnOffProb \n self._SIMFLUX_ASW_ComputeOnOffProb.argtypes = [\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # rois [numspots,numframes,roisize,roisize]\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # mod[numep]\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # gaussFits [numspots]\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # IBg[out]\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # probOnOff[numspots,numframes,2]\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # crlbVariances[numspots,numframes,2]\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # expectedIntensities[numspots,numframes]\n ctypes.POINTER(SIMFLUX_ASW_Params), # p\n ctypes.c_int32, # numframes\n ctypes.c_int32, # numspots\n ctl.ndpointer(np.int32, flags=\"aligned, c_contiguous\"), # startPatterns[numspots]\n ctl.ndpointer(np.int32, flags=\"aligned, c_contiguous\"), # roipos[numspots]\n ctypes.c_bool, # useCuda\n ]\n \n self._SIMFLUX_ProjectPointData = smlmlib.SIMFLUX_ProjectPointData\n self._SIMFLUX_ProjectPointData.argtypes = [\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # xyI\n ctypes.c_int32, # numpts\n ctypes.c_int32, # projectionWidth\n ctypes.c_float, # scale\n ctypes.c_int32, # numProjAngles\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # projectionAngles\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # output\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # output\n ]\n \n#CDLL_EXPORT PSF* SIMFLUX2D_PSF_Create(PSF* original, SIMFLUX_Modulation* mod, int num_patterns, \n#\tconst int * xyIBg_indices)\n \n self._SIMFLUX2D_PSF_Create = smlmlib.SIMFLUX2D_PSF_Create\n self._SIMFLUX2D_PSF_Create.argtypes = [\n ctypes.c_void_p,\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # mod[numep]\n ctypes.c_int,\n ctl.ndpointer(np.int32, flags=\"aligned, c_contiguous\"), \n ctypes.c_bool,\n ctypes.c_void_p\n ]\n self._SIMFLUX2D_PSF_Create.restype = ctypes.c_void_p\n \n#CDLL_EXPORT PSF* SIMFLUX2D_Gauss2D_PSF_Create(SIMFLUX_Modulation* mod, int num_patterns, \n# float sigma, int roisize, int numframes, bool simfluxFit, Context* ctx);\n \n self._SIMFLUX2D_Gauss2D_PSF_Create = smlmlib.SIMFLUX2D_Gauss2D_PSF_Create \n self._SIMFLUX2D_Gauss2D_PSF_Create.argtypes= [\n \n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # mod[numep]\n ctypes.c_int, # numpatterns\n ctypes.c_float, # sigma_x\n ctypes.c_float, # sigma_y\n ctypes.c_int, # roisize\n ctypes.c_int, # nframes\n ctypes.c_bool, # simfluxfit\n ctypes.c_bool, # defineStartEnd\n ctypes.c_void_p, # scmos\n ctypes.c_void_p # context\n ]\n self._SIMFLUX2D_Gauss2D_PSF_Create.restype = ctypes.c_void_p\n \n #\n#(int* spotToLinkedIdx, int *startframes, int *ontime,\n#int numspots, int numlinked, int numpatterns, SpotToExtract* result)\n\n self._SIMFLUX_GenerateROIExtractionList = smlmlib.SIMFLUX_GenerateROIExtractionList\n self._SIMFLUX_GenerateROIExtractionList.argtypes= [\n ctl.ndpointer(np.int32, flags=\"aligned, c_contiguous\"), # startframes\n ctl.ndpointer(np.int32, flags=\"aligned, c_contiguous\"), # ontime\n ctypes.c_int, #maxresults\n ctypes.c_int, # numlinked\n ctypes.c_int, # numpatterns\n ctl.ndpointer(np.int32, flags=\"aligned, c_contiguous\") # results\n ]\n\n def GenerateROIExtractionList(self, startframes, ontime, numpatterns):\n \"\"\"\n returns linkedIndex, numroi and firstframe\n \"\"\"\n maxresults = np.sum(ontime)//numpatterns\n numlinked = len(startframes)\n startframes= np.ascontiguousarray(startframes,dtype=np.int32)\n ontime = np.ascontiguousarray(ontime, dtype=np.int32)\n results = np.zeros((maxresults,3),dtype=np.int32)\n resultcount = self._SIMFLUX_GenerateROIExtractionList(startframes,ontime,maxresults,numlinked,numpatterns,results)\n results =results[:resultcount]\n return results[:,0],results[:,1],results[:,2]\n\n \n def CreateSIMFLUX2DPSF(self, psf:PSF, mod, xyIBgIndices, simfluxEstim=False) -> PSF:\n xyIBgIndices=np.ascontiguousarray(xyIBgIndices,dtype=np.int32)\n assert(len(xyIBgIndices)==4)\n inst = self._SIMFLUX2D_PSF_Create(psf.inst, mod.astype(np.float32), len(mod), xyIBgIndices, \n simfluxEstim, self.ctx.inst if self.ctx else None)\n return PSF(self.ctx,inst)\n \n class SIMFLUX_PSF(PSF):\n def __init__(self, ctx:Context, psfInst, mod):\n self.mod = mod\n super().__init__(self, ctx, psfInst)\n\n def ComputeExcitation(self,x,y):\n return self.mod[...,4]*(1+self.mod[...,2]*np.sin(self.mod[...,0]*x + self.mod[...,1]*y - self.mod[...,3]))\n\n def CreateSIMFLUX2D_Gauss2D_PSF(self, sigma, mod_or_num_patterns, roisize, \n numframes, simfluxEstim=False, defineStartEnd=False, scmos_calib=None) -> PSF:\n if scmos_calib is not None:\n assert(isinstance(scmos_calib,sCMOS_Calib))\n scmos_calib = scmos_calib.inst\n\n mod = mod_or_num_patterns\n\n if mod is None:\n mod = 1\n if np.isscalar(mod):\n mod = np.zeros((mod,5))\n else:\n mod = np.ascontiguousarray(mod)\n assert(mod.shape[1] == 5)\n\n if np.isscalar(sigma):\n sigma_x, sigma_y = sigma,sigma\n else:\n sigma_x, sigma_y = sigma\n \n inst = self._SIMFLUX2D_Gauss2D_PSF_Create(mod.astype(np.float32), len(mod), sigma_x, sigma_y,\n roisize, numframes, simfluxEstim, defineStartEnd,\n scmos_calib, self.ctx.inst if self.ctx else None)\n return PSF(self.ctx,inst)\n\n # Convert an array of phases to an array of alternating XY modulation parameters\n def phase_to_mod(self, phases, omega, depth=1):\n mod = np.zeros((*phases.shape, 5), dtype=np.float32)\n mod[..., 0::2, 0] = omega # kx\n mod[..., 1::2, 1] = omega # ky\n mod[..., 2] = depth\n mod[..., 3] = phases\n mod[..., 4] = 1/len(mod)\n return mod\n\n#CDLL_EXPORT void SIMFLUX_ASW_ComputeOnOffProb(const float* rois, \n#const SIMFLUX_Modulation* modulation, Vector4f* gaussFits, \n#\tVector2f* IBg, Vector2f* probOnOff, const SIMFLUX_ASW_Params& params, int numframes, \n#\tint numspots, const int* startPatterns, const int2* roipos, bool useCuda)\n def SIMFLUX_ASW_ComputeOnOffProb(self, images, mod, xyIBg_gauss, silmParams: SIMFLUX_ASW_Params, \n startPatterns, roipos, useCuda):\n mod = np.ascontiguousarray(mod, dtype=np.float32)\n images = np.ascontiguousarray(images, dtype=np.float32)\n xyIBg_gauss = np.ascontiguousarray(xyIBg_gauss,dtype=np.float32)\n numframes = images.shape[1]\n numspots = images.shape[0]\n \n probOnOff = np.zeros((numspots,numframes,2),dtype=np.float32)\n crlbVariances = np.zeros((numspots,numframes,2),dtype=np.float32)\n expectedIntensity = np.zeros((numspots,numframes),dtype=np.float32)\n IBg = np.zeros((numspots,numframes,2),dtype=np.float32)\n startPatterns = np.ascontiguousarray(startPatterns,dtype=np.int32)\n roipos = np.ascontiguousarray(roipos, dtype=np.int32)\n \n self._SIMFLUX_ASW_ComputeOnOffProb(\n images, mod, xyIBg_gauss, IBg, probOnOff, crlbVariances, expectedIntensity, silmParams, \n numframes, numspots, startPatterns,roipos, useCuda)\n \n return probOnOff, IBg, crlbVariances, expectedIntensity\n \n\n def Params(self, imgw, numep, sigma, levMarIt=100, startLambdaStep=0.1):\n return SIMFLUX_ASW_Params(imgw, numep, sigma, levMarIt, startLambdaStep)\n \n \n def SIMFLUX_DFT2D_Points(self, xyI, k, useCuda=True):\n xyI = np.ascontiguousarray(xyI, dtype=np.float32)\n numpts = len(xyI)\n k = np.ascontiguousarray(k, dtype=np.float32)\n output = np.zeros( len(k), dtype=np.complex64)\n self._SIMFLUX_DFT2D_Points(xyI, numpts, k, len(k), output, useCuda)\n return output\n\n # CDLL_EXPORT void SIMFLUX_ProjectPointData(const Vector3f *xyI, int numpts, int projectionWidth,\n # \tfloat scale, int numProjAngles, const float *projectionAngles, float* output)\n def ProjectPoints(self, xyI, projectionWidth, scale, projectionAngles):\n numProjAngles = len(projectionAngles)\n assert xyI.shape[1] == 3\n xyI = np.ascontiguousarray(xyI, dtype=np.float32)\n output = np.zeros((numProjAngles, projectionWidth), dtype=np.float32)\n shifts = np.zeros((numProjAngles), dtype=np.float32)\n\n self._SIMFLUX_ProjectPointData(\n xyI,\n len(xyI),\n projectionWidth,\n scale,\n numProjAngles,\n np.array(projectionAngles, dtype=np.float32),\n output,\n shifts,\n )\n return output, shifts\n\n ##CDLL_EXPORT void FFT(const cuFloatComplex* src, cuFloatComplex* dst, int batchsize, int siglen, int forward)\n\n def FFT(self, src, forward=True):\n batchsize = len(src)\n src = np.ascontiguousarray(src, dtype=np.complex64)\n dst = np.zeros(src.shape, dtype=np.complex64)\n self._FFT(src, dst, batchsize, src.shape[1], forward)\n return dst\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 26 13:06:46 2018\n\n@author: jcnossen1\n\"\"\"\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport sys\n\n# smp = [numspots, width, height]\ndef compute_com(smp):\n w = smp.shape[1]\n h = smp.shape[2]\n X, Y = np.meshgrid(np.arange(w), np.arange(h))\n X = np.tile(X, (smp.shape[0], 1, 1))\n Y = np.tile(Y, (smp.shape[0], 1, 1))\n moment_x = np.sum(smp * X, (1, 2))\n moment_y = np.sum(smp * Y, (1, 2))\n sums = np.sum(smp, (1, 2))\n\n bgfraction = 0.2\n\n theta = np.ascontiguousarray(\n np.array([moment_x / sums, moment_y / sums, sums * (1 - bgfraction), bgfraction * sums / (w * h)], np.float32).T\n )\n return theta\n\n\ndef imshow_many(imgs, title=None, cols=6, maxrows=5):\n n = imgs.shape[0]\n imgh = imgs.shape[1]\n imgw = imgs.shape[2]\n\n nrows = math.ceil(n / cols)\n\n if nrows > maxrows:\n nrows = maxrows\n\n fig, axes = plt.subplots(nrows, cols, True, True,squeeze=False)\n\n for y in range(nrows):\n for x in range(cols):\n if y * nrows + x < len(imgs):\n axes[y][x].imshow(imgs[y * nrows + x])\n\n if title:\n fig.suptitle(title)\n\n return fig\n\n\ndef imshow_hstack(imgs, title=None, max_num_img=10,colorbar=False):\n n = np.minimum(max_num_img, len(imgs))\n fig=plt.figure()\n plt.imshow(np.hstack(tuple(imgs[:n])))\n if title:\n plt.title(title)\n return fig\n\ndef imshow_rois(imgs, title=None, cols=10, maxrows=10,colorbar=False):\n n = imgs.shape[0]\n imgh = imgs.shape[1]\n imgw = imgs.shape[2]\n\n nrows = (n+cols-1)//cols\n \n if nrows > maxrows:\n nrows = maxrows\n n=maxrows*cols\n \n img = np.zeros((imgh*nrows,imgw*cols))\n for k in range(n):\n y = k//cols\n x = k%cols\n img[imgh*y:imgh*(y+1),imgw*x:imgw*(x+1)]=imgs[k]\n\n fig=plt.figure()\n plt.imshow(img)\n if title:\n plt.title(title)\n plt.show()\n return fig\n\ndef save_movie(imgs, fn, fps=15):\n import matplotlib.animation as manimation\n\n print(f\"saving {len(imgs)} images to {fn}\")\n FFMpegWriter = manimation.writers[\"ffmpeg\"]\n metadata = dict(title=\"Movie Test\", artist=\"Matplotlib\", comment=\"Movie support!\")\n writer = FFMpegWriter(fps=fps, metadata=metadata)\n\n fig = plt.figure()\n with writer.saving(fig, fn, dpi=100):\n for i in range(len(imgs)):\n sys.stdout.write(\".\")\n plt.imshow(imgs[i])\n writer.grab_frame()\n plt.clf()\n\n print(f\"done\")\n\n\ndef sum_epp(img):\n return np.sum(img, 1)\n\n\ndef chisq(mu, smp):\n return np.sum(mu - smp - smp * np.log(mu / np.maximum(smp, 1)))\n\n\ndef loglikelihood(mu, smp):\n return np.sum(smp * np.log(np.maximum(mu, 1e-9)) - mu)\n\n\ndef extract_roi(images, x, y, roisize):\n hs = roisize // 2\n r = np.zeros((len(images), roisize, roisize))\n for f in range(len(images)):\n r[f] = images[f, y - hs : y + hs, x - hs : x + hs]\n return r\n\n\n# x and y are arrays\ndef extract_rois(images, x, y, roisize):\n images = np.array(images)\n r = np.zeros((len(x), len(images), roisize, roisize), dtype=images.dtype)\n halfroi = roisize // 2\n for s in range(len(x)):\n for f in range(len(images)):\n r[s, f] = images[f, y[s] - halfroi : y[s] - halfroi + roisize, x[s] - halfroi : x[s] - halfroi + roisize]\n\n return r\n\n\n"
] | [
[
"numpy.ascontiguousarray",
"numpy.sin",
"numpy.isscalar",
"numpy.ctypeslib.ndpointer",
"numpy.array",
"numpy.zeros",
"numpy.sum"
],
[
"matplotlib.pyplot.imshow",
"numpy.maximum",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.subplots",
"numpy.tile",
"matplotlib.pyplot.clf",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.sum",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
metantonio/AEMET-pythonclient-to-Excel | [
"3516592d4b04904fdfd0b9d6f2579e34fdd176d5"
] | [
"app.py"
] | [
"import http.client\nimport os\nimport requests\nimport json\nimport csv\nimport pandas as pd\nfrom base64 import b64encode\n\nconn = http.client.HTTPSConnection(\"opendata.aemet.es\")\nprint(\"Coloque su API KEY de opendata.aemet.es\\n\")\napi = input()\nprint(f\"API, {api} \\n\")\nprint(\"Coloque el código de la estación, ejemplo de la estación Hinojosa del Duque en Córdoba (con mayúsculas): 4267X\\n\")\nestacion = input()\nprint(\"Tenga en cuenta que la siguiente API solo puede abarcar un rango de fechas de 5 anios \\n\")\nprint(\"a Continuación coloque la fecha de inicio con el siguiente formato: 2015-08-23 \\n\")\nfechaIni=input()\nprint(\"a Continuación coloque la fecha de cierre con el siguiente formato: 2016-08-23 \\n\")\nfechaFin=input()\n\nheaders = {\n 'cache-control': \"no-cache\"\n }\n\nconn.request(\"GET\", \"/opendata/api/valores/climatologicos/diarios/datos/fechaini/\"+fechaIni+\"T00:00:00UTC/fechafin/\"+fechaFin+\"T23:59:59UTC/estacion/\"+estacion+\"/?api_key=\"+api, headers=headers)\n\nres = conn.getresponse()\ndata = res.read()\n\nprint(data.decode(\"utf-8\"))\n#respuestaUrl=data.decode.datos\n\ndef serialize(self):\n return {\n \"datos\": self.datos,\n # do not serialize the password, its a security breach\n }\n\nprint(\"Ahora copie a continuación la URL sin comillas que aparece en la etiqueta datos, y péguela a continuación \\n\")\nurl2=input()\nresponse2=requests.get(url2)\n#x = requests.get(url2)\nprint(response2)\n#print(response2.content)\n\n# def save_users(users):\n# \"\"\"\n# abre/crea el archivo users_lists.json y guarda\n# la lista de diccionarios que representan a los\n# usuarios.\n# \"\"\"\n# with open(os.path.join(os.getcwd(), \"users_lists.json\"), \"w\") as users_file:\n# users_as_dictionaries = []\n# for user in users:\n# users_as_dictionaries.append(user.serialize())\n# json.dump(users_as_dictionaries, users_file)\n\n## Write API Results to CSV\n# Write to .CSV\nf = open('newfile.json', \"w\")\nf.write(response2.text)\nf.close()\n\n\n\n# with open('newfile.txt', 'r') as in_file:\n# stripped = (line.strip() for line in in_file)\n# lines = (line.split(\",\") for line in stripped if line)\n# with open('log.csv', 'w') as out_file:\n# writer = csv.writer(out_file)\n# writer.writerow(('title', 'intro'))\n# writer.writerows(lines)\n\ndf = pd.read_json (r'newfile.json')\ndf.to_csv (r'Final.csv', index = None)\n\nprint(\"\"\"\narchivo .cvs y .txt creados\n\nApp creada por:\nAntonio Martínez\[email protected]\n\"\"\")"
] | [
[
"pandas.read_json"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
andrrizzi/tfep | [
"a98ec870007a2ceb72cab147d9e0dfffb7dc8849",
"a98ec870007a2ceb72cab147d9e0dfffb7dc8849"
] | [
"tfep/tests/nn/flows/test_maf.py",
"tfep/nn/transformers/sos.py"
] | [
"#!/usr/bin/env python\n\n\n# =============================================================================\n# MODULE DOCSTRING\n# =============================================================================\n\n\"\"\"\nTest MAF layer in tfep.nn.flows.maf.\n\"\"\"\n\n\n# =============================================================================\n# GLOBAL IMPORTS\n# =============================================================================\n\nimport numpy as np\nimport pytest\nimport torch\n\nfrom tfep.nn.utils import generate_block_sizes\nfrom tfep.nn.transformers import (\n AffineTransformer, SOSPolynomialTransformer,\n NeuralSplineTransformer, MobiusTransformer\n)\nfrom tfep.nn.flows import MAF\nfrom ..utils import create_random_input\n\n\n# =============================================================================\n# TESTS\n# =============================================================================\n\[email protected]('dimensions_hidden', [1, 4])\[email protected]('conditioning_indices', [\n [],\n [0, 1],\n [0, 3],\n [1, 3],\n [0, 4],\n [2, 4],\n [3, 4]\n])\[email protected]('degrees_in', ['input', 'reversed'])\[email protected]('weight_norm', [False, True])\[email protected]('split_conditioner', [True, False])\[email protected]('transformer', [\n AffineTransformer(),\n SOSPolynomialTransformer(2),\n SOSPolynomialTransformer(3),\n NeuralSplineTransformer(x0=torch.tensor(-2), xf=torch.tensor(2), n_bins=3),\n MobiusTransformer(blocks=3, shorten_last_block=True)\n])\ndef test_identity_initialization_MAF(dimensions_hidden, conditioning_indices, degrees_in,\n weight_norm, split_conditioner, transformer):\n \"\"\"Test that the identity initialization of MAF works.\n\n This tests that the flow layers can be initialized to perform the\n identity function.\n\n \"\"\"\n dimension = 5\n batch_size = 2\n\n # We don't initialize as the identity function to make the test meaningful.\n maf = MAF(\n dimension,\n dimensions_hidden,\n conditioning_indices=conditioning_indices,\n degrees_in=degrees_in,\n weight_norm=weight_norm,\n split_conditioner=split_conditioner,\n transformer=transformer,\n initialize_identity=True\n )\n\n # Create random input.\n if isinstance(transformer, NeuralSplineTransformer):\n x = create_random_input(batch_size, dimension, x_func=torch.rand)\n x = x * (transformer.xf - transformer.x0) + transformer.x0\n else:\n x = create_random_input(batch_size, dimension)\n\n y, log_det_J = maf.forward(x)\n\n assert torch.allclose(x, y)\n assert torch.allclose(log_det_J, torch.zeros(batch_size), atol=1e-6)\n\n\[email protected]('conditioning_indices', [\n [],\n [0, 1],\n [0, 3],\n [1, 3],\n [0, 4],\n [2, 4],\n [3, 4]\n])\[email protected]('degrees_in', ['input', 'reversed', 'random'])\[email protected]('split_conditioner', [True, False])\[email protected]('transformer', [\n AffineTransformer(),\n MobiusTransformer(blocks=3, shorten_last_block=True)\n])\[email protected]('weight_norm', [False, True])\ndef test_round_trip_MAF(conditioning_indices, degrees_in, weight_norm, split_conditioner, transformer):\n \"\"\"Test that the MAF.inverse(MAF.forward(x)) equals the identity.\"\"\"\n dimension = 5\n dimensions_hidden = 2\n batch_size = 2\n n_conditioning_dofs = len(conditioning_indices)\n\n # Temporarily set default precision to double to improve comparisons.\n old_dtype = torch.get_default_dtype()\n torch.set_default_dtype(torch.double)\n\n # With the Mobius transformer, we need block dependencies.\n if isinstance(transformer, MobiusTransformer):\n blocks = generate_block_sizes(dimension-n_conditioning_dofs, transformer.blocks,\n transformer.shorten_last_block)\n shorten_last_block = transformer.shorten_last_block\n n_blocks = len(blocks)\n else:\n blocks = 1\n shorten_last_block = False\n n_blocks = dimension - n_conditioning_dofs\n\n # Make sure the permutation is reproducible.\n if degrees_in == 'random':\n random_state = np.random.RandomState(0)\n degrees_in = random_state.permutation(range(n_blocks))\n\n # We don't initialize as the identity function to make the test meaningful.\n maf = MAF(\n dimension, dimensions_hidden,\n conditioning_indices=conditioning_indices,\n degrees_in=degrees_in,\n weight_norm=weight_norm,\n blocks=blocks,\n shorten_last_block=shorten_last_block,\n split_conditioner=split_conditioner,\n transformer=transformer,\n initialize_identity=False\n )\n\n # Create random input.\n x = create_random_input(batch_size, dimension)\n\n # The conditioning features are always left unchanged.\n y, log_det_J = maf.forward(x)\n assert torch.allclose(x[:, conditioning_indices], y[:, conditioning_indices])\n\n # Inverting the transformation produces the input vector.\n x_inv, log_det_J_inv = maf.inverse(y)\n assert torch.allclose(x, x_inv)\n assert torch.allclose(log_det_J + log_det_J_inv, torch.zeros(batch_size), atol=1e-04)\n\n # Restore default dtype.\n torch.set_default_dtype(old_dtype)\n",
"#!/usr/bin/env python\n\n\n# =============================================================================\n# MODULE DOCSTRING\n# =============================================================================\n\n\"\"\"\nSum-of-squares polynomial transformer for autoregressive normalizing flows.\n\"\"\"\n\n\n# =============================================================================\n# GLOBAL IMPORTS\n# =============================================================================\n\nimport numpy as np\nimport torch\nimport torch.autograd\n\n\n# =============================================================================\n# SUM-OF-SQUARES POLYNOMIAL TRANSFORMER\n# =============================================================================\n\nclass SOSPolynomialTransformer(torch.nn.Module):\n \"\"\"Sum-of-squares polynomial transformer module for autoregressive normalizing flows.\n\n This is an implementation of the polynomial transformer proposed in [1].\n\n :math:`y_i = a_0 + \\int_0^{x_i} \\sum_{k=1}^K \\left( \\sum_{l=0}^L a_{kl} z^l \\right)^2 dz`\n\n where :math:`K` and :math:`L` are the total number and degree of the polynomials\n respectively, and :math:`a_X` represent the parameters of the transformer.\n\n Only sums of squared first-degree polynomials (i.e., L=1) are currently\n supported as they are the only one with an analytic inverse and sum of\n zeroth degree polynomials (i.e., L=0) are equivalent to affine transformer.\n\n Parameters\n ----------\n n_polynomials : int\n The functional form of this transformer is a sum of squared polynomials.\n This is the number of such polynomials, which must be greater than 1.\n The more polynomials, the greater the number of parameters. Default is 2.\n\n See Also\n --------\n nets.functions.transformer.sos_polynomial_transformer\n\n References\n ----------\n [1] Jaini P, Selby KA, Yu Y. Sum-of-Squares Polynomial Flow. arXiv\n preprint arXiv:1905.02325. 2019 May 7.\n\n \"\"\"\n def __init__(self, n_polynomials=2):\n super().__init__()\n if n_polynomials < 2:\n raise ValueError('n_polynomials must be strictly greater than 1.')\n self.n_polynomials = n_polynomials\n\n @property\n def degree_polynomials(self):\n \"\"\"The degree of each squared polynomial.\"\"\"\n return 1\n\n @property\n def parameters_per_polynomial(self):\n \"\"\"Numer of parameters needed by the transformer for each squared polynomial.\"\"\"\n return self.degree_polynomials + 1\n\n @property\n def n_parameters_per_input(self):\n \"\"\"Number of parameters needed by the transformer for each input dimension.\"\"\"\n return self.parameters_per_polynomial * self.n_polynomials + 1\n\n def forward(self, x, parameters):\n \"\"\"Apply the transformation to the input.\n\n See the documentation of :func:`~tfep.nn.transformers.sos_polynomial_transformer`\n for details.\n\n \"\"\"\n return sos_polynomial_transformer(x, parameters)\n\n def inverse(self, y, parameters):\n \"\"\"Currently not implemented.\"\"\"\n raise NotImplementedError(\n 'Inversion of SOS polynomial transformer has not been implemented yet.')\n\n def get_identity_parameters(self, n_features):\n \"\"\"Return the value of the parameters that makes this the identity function.\n\n This can be used to initialize the normalizing flow to perform the identity\n transformation.\n\n Parameters\n ----------\n n_features : int\n The dimension of the input vector of the transformer.\n\n Returns\n -------\n parameters : torch.Tensor\n A tensor of shape ``(1+K*L, n_features)`` where ``K`` and ``L`` are\n the number and degree of the polynomials.\n\n \"\"\"\n id_conditioner = torch.zeros(size=(self.n_parameters_per_input, n_features))\n # The sum of the squared linear parameters must be 1.\n id_conditioner[1::self.parameters_per_polynomial].fill_(np.sqrt(1 / self.n_polynomials))\n return id_conditioner\n\n\n# =============================================================================\n# FUNCTIONAL API\n# =============================================================================\n\nclass SOSPolynomialTransformerFunc(torch.autograd.Function):\n r\"\"\"Implement the sum-of-squares polynomial transformer for triangular maps.\n\n This provides a functional API for the :class:`~tfep.nn.transformers.SOSPolynomialTransformer`\n layer. It implements the polynomial transformer proposed in [1].\n\n :math:`y_i = a_0 + \\int_0^{x_i} \\sum_{k=1}^K \\left( \\sum_{l=0}^L a_{kl} z^l \\right)^2 dz`\n\n where :math:`K` and :math:`L` are the total number and degree of the polynomials\n respectively, and :math:`a_X` represent the parameters of the transformer.\n\n The function returns the transformed feature as a ``Tensor`` of shape\n ``(batch_size, n_features)`` and the log absolute determinant of its\n Jacobian as a ``Tensor`` of shape ``(batch_size,)``.\n\n Only sums of squared first-degree polynomials (i.e., L=1) are currently\n supported as they are the only one with an analytic inverse and sum of\n zeroth degree polynomials (i.e., L=0) are equivalent to affine transformer.\n\n Parameters\n ----------\n x : torch.Tensor\n Input tensor x of shape ``(batch_size, n_features)``.\n parameters : torch.Tensor\n The coefficients of the squared polynomials obtained from the\n conditioner. Each ``Tensor`` has shape ``(batch_size, 1+K*L, n_features)``.\n The coefficients are ordered by polynomial so that ``parameters[:,0]``\n is :math:`a_0` followed by :math:`a_{10}, a_{11}, ..., a_{K0}, a_{K1}`.\n\n Returns\n -------\n y : torch.Tensor\n Output tensor of shape ``(batch_size, n_features)``.\n log_det_J : torch.Tensor\n The logarithm of the absolute value of the determinant of the Jacobian\n of the transformation with shape ``(batch_size,)``.\n\n References\n ----------\n [1] Jaini P, Selby KA, Yu Y. Sum-of-Squares Polynomial Flow. arXiv\n preprint arXiv:1905.02325. 2019 May 7.\n\n \"\"\"\n\n @staticmethod\n def forward(ctx, x, parameters):\n # Compute the parameters of the sos polynomial.\n sos_degree_coefficients = SOSPolynomialTransformerFunc.get_sos_poly_coefficients(parameters)\n\n # Compute the power of x.\n x_powers = [x, x*x]\n\n # Compute y and the gradient of y w.r.t. x.\n y = sos_degree_coefficients[1].clone()\n grad_x = sos_degree_coefficients[1].clone()\n\n for degree, coef in enumerate(sos_degree_coefficients[2:]):\n term = coef * x_powers[degree]\n y += term\n grad_x += (degree+2) * term\n\n y *= x\n y += sos_degree_coefficients[0]\n\n log_det_J = torch.sum(torch.log(grad_x), dim=1)\n\n # Save tensor used for backward() before returning.\n ctx.save_for_backward(grad_x, parameters, *x_powers)\n\n # We don't need to compute gradients of log_det_J.\n ctx.mark_non_differentiable(log_det_J)\n return y, log_det_J\n\n @staticmethod\n def backward(ctx, grad_y, grad_log_det_J):\n saved_grad_x, parameters, x, x2 = ctx.saved_tensors\n grad_x = grad_parameters = None\n batch_size, n_features = saved_grad_x.shape\n\n # Compute gradients w.r.t. input parameters.\n if ctx.needs_input_grad[0]:\n grad_x = saved_grad_x * grad_y\n\n if ctx.needs_input_grad[1]:\n grad_parameters = torch.empty_like(parameters)\n\n # The first coefficient is the constant term.\n grad_parameters[:, 0] = torch.ones(\n size=(batch_size, n_features), dtype=saved_grad_x.dtype)\n\n # Zeroth and first degree terms of the inner polynomials.\n zeroth_degree_terms = parameters[:, 1::2]\n first_degree_terms = parameters[:, 2::2]\n\n # We need to add a dimension corresponding to the number of\n # coefficients in the power of x for them to be broadcastable.\n x = x.unsqueeze(1)\n x2 = x2.unsqueeze(1)\n x3 = x2 * x\n\n grad_parameters[:, 1::2] = first_degree_terms*x2 + 2*zeroth_degree_terms*x\n grad_parameters[:, 2::2] = 2/3*first_degree_terms*x3 + zeroth_degree_terms*x2\n\n grad_parameters = grad_parameters * grad_y.unsqueeze(1)\n\n return grad_x, grad_parameters\n\n @staticmethod\n def get_sos_poly_coefficients(parameters):\n \"\"\"Compute the coefficient of the SOS polynomial.\n\n Parameters\n ----------\n parameters : torch.Tensor\n The coefficients of the squared polynomials obtained from the\n conditioner. Each ``Tensor`` has shape ``(batch_size, 1+K*L, n_features)``.\n The coefficients are ordered by polynomial so that ``parameters[:,0]``\n is :math:`a_0` followed by :math:`a_{10}, a_{11}, ..., a_{K0}, a_{K1}`.\n\n Returns\n -------\n sos_poly_coefficients : List[torch.Tensor]\n ``sos_poly_coefficients[i]`` is a tensor of shape ``(batch_size, n_features)``\n with the coefficients of the term of the SOS polynomial of degree ``i``.\n\n \"\"\"\n # We support only L=1 for now. Number of coefficients in\n # each summed polynomials include also the constant term.\n coeff_per_inner_poly = 2\n batch_size, _, n_features = parameters.shape\n\n # inner_degree_parameters[d][b][p] is the parameter for the term of\n # the p-th inner polynomial of degree d for the b-th batch sample.\n inner_degree_coefficients = []\n for degree in range(coeff_per_inner_poly):\n inner_degree_coefficients.append(parameters[:, 1+degree::coeff_per_inner_poly])\n\n # Find the coefficients of the integrated polynomial.\n sos_degree_coefficients = [parameters[:, 0]]\n sos_degree_coefficients.append(torch.sum(inner_degree_coefficients[0]**2, dim=1))\n sos_degree_coefficients.append(torch.sum(inner_degree_coefficients[0]*inner_degree_coefficients[1], dim=1))\n sos_degree_coefficients.append(torch.sum(inner_degree_coefficients[1]**2, dim=1) / 3)\n\n return sos_degree_coefficients\n\n\n# Functional notation.\nsos_polynomial_transformer = SOSPolynomialTransformerFunc.apply\n"
] | [
[
"torch.zeros",
"torch.set_default_dtype",
"torch.tensor",
"torch.allclose",
"torch.get_default_dtype",
"numpy.random.RandomState"
],
[
"torch.empty_like",
"torch.ones",
"numpy.sqrt",
"torch.zeros",
"torch.sum",
"torch.log"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pandamax/carrier-of-tricks-for-classification-pytorch | [
"283a9f644b43d4800217bd10c1ab2accf1a787c6"
] | [
"converter/pytorch_to_caffe_git.py"
] | [
"'''\nsource:https://github.com/WolffyChen/PytorchToCaffe/blob/master/pytorch_to_caffe.py\n'''\n\nimport torch\nimport torch.nn as nn\nimport traceback\nfrom Caffe import caffe_net\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom Caffe import layer_param\nfrom torch.nn.modules.utils import _pair\nimport numpy as np\n\n\"\"\"\nHow to support a new layer type:\n layer_name=log.add_layer(layer_type_name)\n top_blobs=log.add_blobs(<output of that layer>)\n layer=caffe_net.Layer_param(xxx)\n <set layer parameters>\n [<layer.add_data(*datas)>]\n log.cnet.add_layer(layer)\nPlease MUTE the inplace operations to avoid not find in graph\n注意:只有torch.nn.functional中的函数才能转换为caffe中的层\n\"\"\"\n\n# TODO: support the inplace output of the layers\n\nclass Blob_LOG():\n def __init__(self):\n self.data = {}\n def __setitem__(self, key, value):\n self.data[key] = value\n def __getitem__(self, key):\n return self.data[key]\n def __len__(self):\n return len(self.data)\n\nNET_INITTED = False\n\n# 转换原理解析:通过记录\nclass TransLog(object):\n def __init__(self):\n \"\"\"\n doing init() with inputs Variable before using it\n \"\"\"\n self.layers = {}\n self.detail_layers = {}\n self.detail_blobs = {}\n self._blobs = Blob_LOG()\n self._blobs_data = []\n self.cnet = caffe_net.Caffemodel('')\n self.debug = True\n\n def init(self, inputs):\n \"\"\"\n :param inputs: is a list of input variables\n \"\"\"\n self.layers['data'] = 'data'\n self.add_blobs(inputs, 'data', False)\n\n def add_layer(self, name='layer'):\n if name in self.layers:\n return self.layers[name]\n if name not in self.detail_layers.keys():\n self.detail_layers[name] = 0\n self.detail_layers[name] += 1\n name = '{}{}'.format(name,self.detail_layers[name])\n self.layers[name] = name\n if self.debug:\n print('{} was added to layers'.format(self.layers[name]))\n return self.layers[name]\n\n def add_blobs(self, blobs, name='blob', with_num=True):\n rst=[]\n for blob in blobs:\n self._blobs_data.append(blob) # to block the memory address be rewrited\n blob_id=int(id(blob))\n if name not in self.detail_blobs.keys():\n self.detail_blobs[name] = 0\n self.detail_blobs[name] += 1\n if with_num:\n rst.append('{}{}'.format(name, self.detail_blobs[name]))\n else:\n rst.append('{}'.format(name))\n if self.debug:\n print(\"{}:{} was added to blobs\".format(blob_id, rst[-1]))\n #print('Add blob {} : {}'.format(rst[-1].center(21),blob.size()))\n self._blobs[blob_id] = rst[-1]\n return rst\n\n def blobs(self, var):\n var = id(var)\n #if self.debug:\n # print(\"{}:{} getting\".format(var, self._blobs[var]))\n try:\n return self._blobs[var]\n except:\n print(\"WARNING: CANNOT FOUND blob {}\".format(var))\n return None\n\n def inplace_flag(self, name='layer'):\n key_list = ['add', 'sub', 'mul']\n for kl in key_list:\n if kl in name:\n return False\n key_num = 3\n vl = list(self.layers.values())\n if vl.count(name) >= key_num:\n return False\n\n return True\n\nlog = TransLog()\nlayer_names = {}\n\ndef _conv2d(raw,input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):\n print('conv: ', log.blobs(input))\n x = raw(input, weight, bias, stride, padding, dilation, groups)\n name = log.add_layer(name='conv')\n log.add_blobs([x], name='conv')\n layer = caffe_net.Layer_param(name=name, type='Convolution', bottom=[log.layers[log.blobs(input)]], top=[log.blobs(x)])\n layer.conv_param(x.size()[1], weight.size()[2:], stride=_pair(stride),\n pad=_pair(padding), dilation=_pair(dilation), bias_term=bias is not None, groups=groups)\n if bias is not None:\n layer.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())\n else:\n layer.param.convolution_param.bias_term = False\n layer.add_data(weight.cpu().data.numpy())\n log.cnet.add_layer(layer)\n return x\n\ndef _conv_transpose2d(raw,input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):\n x = raw(input, weight, bias, stride, padding, output_padding, groups, dilation)\n name = log.add_layer(name='conv_transpose')\n log.add_blobs([x], name='conv_transpose')\n layer = caffe_net.Layer_param(name=name, type='Deconvolution', bottom=[log.layers[log.blobs(input)]], top=[log.blobs(x)])\n layer.conv_param(x.size()[1], weight.size()[2:], stride=_pair(stride),\n pad=_pair(padding), dilation=_pair(dilation), bias_term=bias is not None, groups=groups)\n if bias is not None:\n layer.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())\n else:\n layer.param.convolution_param.bias_term = False\n layer.add_data(weight.cpu().data.numpy())\n log.cnet.add_layer(layer)\n return x\n\ndef _linear(raw,input, weight, bias=None):\n x = raw(input,weight,bias)\n layer_name = log.add_layer(name='fc')\n top_blobs = log.add_blobs([x],name='fc')\n layer = caffe_net.Layer_param(name=layer_name, type='InnerProduct', bottom=[log.layers[log.blobs(input)]], top=top_blobs)\n layer.fc_param(x.size()[1], has_bias=bias is not None)\n if bias is not None:\n layer.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())\n else:\n layer.add_data(weight.cpu().data.numpy())\n log.cnet.add_layer(layer)\n return x\n\ndef _split(raw,input, split_size, dim=0):\n # split in pytorch is slice in caffe\n x = raw(input, split_size, dim)\n layer_name = log.add_layer('split')\n top_blobs = log.add_blobs([x], name='split')\n layer = caffe_net.Layer_param(name=layer_name, type='Slice', bottom=[log.layers[log.blobs(input)]], top=top_blobs)\n slice_num = int(np.floor(input.size()[dim] / split_size))\n slice_param = caffe_net.pb.SliceParameter(axis=dim, slice_point=[split_size * i for i in range(1, slice_num)])\n layer.param.slice_param.CopyFrom(slice_param)\n log.cnet.add_layer(layer)\n return x\n\ndef _pool(type,raw,input, x, kernel_size, stride, padding, ceil_mode):\n # TODO dilation,ceil_mode,return indices\n layer_name = log.add_layer(name='{}_pool'.format(type))\n top_blobs = log.add_blobs([x], name='{}_pool'.format(type))\n layer = caffe_net.Layer_param(name=layer_name, type='Pooling', bottom=[log.layers[log.blobs(input)]], top=top_blobs)\n # TODO w,h different kernel, stride and padding\n # processing ceil mode\n layer.pool_param(kernel_size=kernel_size, stride=kernel_size if stride is None else stride,\n pad=padding, type=type.upper(), ceil_mode=ceil_mode)\n log.cnet.add_layer(layer)\n if ceil_mode == False and stride is not None:\n oheight = (input.size()[2] - _pair(kernel_size)[0] + 2 * _pair(padding)[0]) % (_pair(stride)[0])\n owidth = (input.size()[3] - _pair(kernel_size)[1] + 2 * _pair(padding)[1]) % (_pair(stride)[1])\n if oheight != 0 or owidth != 0:\n caffe_out = raw(input, kernel_size, stride, padding, ceil_mode=True)\n print(\"WARNING: the output shape miss match at {}: \"\n \"input {} output---Pytorch:{}---Caffe:{}\\n\"\n \"This is caused by the different implementation that ceil mode in caffe and the floor mode in pytorch.\\n\"\n \"You can add the clip layer in caffe prototxt manually if shape mismatch error is caused in caffe. \".format(layer_name, input.size(), x.size(), caffe_out.size()))\n\ndef _max_pool2d(raw,input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False):\n x = raw(input, kernel_size, stride, padding, dilation,ceil_mode, return_indices)\n _pool('max',raw,input, x, kernel_size, stride, padding,ceil_mode)\n return x\n\ndef _avg_pool2d(raw,input, kernel_size, stride = None, padding = 0, ceil_mode = False, count_include_pad=True):\n x = raw(input, kernel_size, stride, padding, ceil_mode, count_include_pad)\n _pool('ave',raw,input, x, kernel_size, stride, padding,ceil_mode)\n return x\n\n\ndef _adaptive_pool(type,raw,input, x, kernel_size, stride):\n layer_name = log.add_layer(name='{}_pool'.format(type))\n top_blobs = log.add_blobs([x], name='{}_pool'.format(type))\n layer = caffe_net.Layer_param(name=layer_name, type='Pooling', bottom=[log.layers[log.blobs(input)]], top=top_blobs)\n # TODO w,h different kernel, stride and padding\n # processing ceil mode\n layer.pool_param(kernel_size=kernel_size, stride=kernel_size if stride is None else stride, pad=0, type=type.upper(), ceil_mode='ceil')\n log.cnet.add_layer(layer)\n\ndef _adaptive_max_pool2d(raw,input, output_size=(1, 1)):\n x = raw(input, output_size)\n _adaptive_pool('max',raw,input, x, input.size(2), 1)\n return x\n\ndef _adaptive_avg_pool2d(raw,input, output_size=(1, 1)):\n x = raw(input, output_size)\n _adaptive_pool('ave',raw,input, x, input.size(2), 1)\n return x\n\n\ndef _flatten(raw,*args):\n x = raw(*args)\n if len(args) == 1:\n # TODO\n assert NotImplementedError\n else:\n layer_name = log.add_layer(name='flatten')\n top_blobs = log.add_blobs([x],name='flatten')\n layer = caffe_net.Layer_param(name=layer_name, type='Reshape', bottom=[log.layers[log.blobs(args[0])]], top=top_blobs)\n dims = list([0, 1])\n dims[0] = 0 # the first dim should be batch_size\n for s in x.size()[1:]:\n dims[1] *= s\n layer.param.reshape_param.shape.CopyFrom(caffe_net.pb.BlobShape(dim=dims))\n log.cnet.add_layer(layer)\n return x\n\ndef _max(raw,*args):\n x = raw(*args)\n if len(args) == 1:\n # TODO max in one tensor\n assert NotImplementedError\n else:\n bottom_blobs = []\n for arg in args:\n bottom_blobs.append(log.layers[log.blobs(arg)])\n layer_name = log.add_layer(name='max')\n top_blobs = log.add_blobs([x], name='max')\n layer = caffe_net.Layer_param(name=layer_name, type='Eltwise', bottom=bottom_blobs, top=top_blobs)\n layer.param.eltwise_param.operation = 2\n log.cnet.add_layer(layer)\n return x\n\ndef _cat(raw,inputs, dimension=0):\n x = raw(inputs, dimension)\n bottom_blobs = []\n for input in inputs:\n bottom_blobs.append(log.layers[log.blobs(input)])\n layer_name = log.add_layer(name='cat')\n top_blobs = log.add_blobs([x], name='cat')\n layer = caffe_net.Layer_param(name=layer_name, type='Concat', bottom=bottom_blobs, top=top_blobs)\n layer.param.concat_param.axis = dimension\n log.cnet.add_layer(layer)\n return x\n\ndef _dropout(raw,input, p=0.5, training=False, inplace=False):\n x = raw(input, p, training)\n bottom_blobs = [log.layers[log.blobs(input)]]\n layer_name = log.add_layer(name='dropout')\n top_blobs = log.add_blobs([x], name='dropout')\n layer = caffe_net.Layer_param(name=layer_name, type='Dropout', bottom=bottom_blobs, top=bottom_blobs)\n layer.param.dropout_param.dropout_ratio = p\n layer.param.include.extend([caffe_net.pb.NetStateRule(phase=1)]) # 1 for test, 0 for train\n log.cnet.add_layer(layer)\n log.layers[log.blobs(x)] = log.layers[log.blobs(input)]\n return x\n\ndef _threshold(raw,input, threshold, value, inplace=False):\n # for threshold or relu\n if threshold == 0 and value == 0:\n x = raw(input, threshold, value)\n bottom_blobs = [log.layers[log.blobs(input)]]\n name = log.add_layer(name='relu')\n log.add_blobs([x], name='relu')\n layer = caffe_net.Layer_param(name=name, type='ReLU', bottom=bottom_blobs, top=bottom_blobs)\n log.cnet.add_layer(layer)\n log.layers[log.blobs(x)] = log.layers[log.blobs(input)]\n return x\n if value!=0:\n raise NotImplemented(\"value !=0 not implemented in caffe\")\n x = raw(input, threshold, value, inplace)\n bottom_blobs = [log.layers[log.blobs(input)]]\n layer_name = log.add_layer(name='threshold')\n top_blobs = log.add_blobs([x], name='threshold')\n layer = caffe_net.Layer_param(name=layer_name, type='Threshold', bottom=bottom_blobs, top=bottom_blobs)\n layer.param.threshold_param.threshold = threshold\n log.cnet.add_layer(layer)\n log.layers[log.blobs(x)] = log.layers[log.blobs(input)]\n return x\n\ndef _relu(raw,input, inplace=False):\n # for threshold or prelu\n x = raw(input)\n name = log.add_layer(name='relu')\n log.add_blobs([x], name='relu')\n layer = caffe_net.Layer_param(name=name, type='ReLU', bottom=[log.layers[log.blobs(input)]], top=[log.layers[log.blobs(input)]])\n log.cnet.add_layer(layer)\n log.layers[log.blobs(x)] = log.layers[log.blobs(input)]\n return x\n\ndef _prelu(raw,input, weight):\n # for threshold or prelu\n x = raw(input, weight)\n bottom_blobs = [log.layers[log.blobs(input)]]\n name = log.add_layer(name='prelu')\n log.add_blobs([x], name='prelu')\n layer = caffe_net.Layer_param(name=name, type='PReLU', bottom=bottom_blobs, top=bottom_blobs)\n if weight.size()[0] == 1:\n layer.param.prelu_param.channel_shared = True\n layer.add_data(weight.cpu().data.numpy()[0])\n else:\n layer.add_data(weight.cpu().data.numpy())\n log.cnet.add_layer(layer)\n log.layers[log.blobs(x)] = log.layers[log.blobs(input)]\n return x\n\ndef _leaky_relu(raw,input, negative_slope=0.01, inplace=False):\n x = raw(input, negative_slope)\n name = log.add_layer(name='leaky_relu')\n log.add_blobs([x], name='leaky_relu')\n layer = caffe_net.Layer_param(name=name, type='ReLU', bottom=[log.layers[log.blobs(input)]], top=[log.layers[log.blobs(input)]])\n layer.param.relu_param.negative_slope = negative_slope\n log.cnet.add_layer(layer)\n log.layers[log.blobs(x)] = log.layers[log.blobs(input)]\n return x\n\ndef _tanh(raw,input):\n # for tanh activation\n x = raw(input)\n name = log.add_layer(name='tanh')\n log.add_blobs([x], name='tanh')\n layer = caffe_net.Layer_param(name=name, type='TanH', bottom=[log.layers[log.blobs(input)]], top=[log.layers[log.blobs(input)]])\n log.cnet.add_layer(layer)\n log.layers[log.blobs(x)] = log.layers[log.blobs(input)]\n return x\n\ndef _softmax(raw,input, dim=None, _stacklevel=3):\n # for F.softmax\n x = raw(input, dim=dim)\n if dim is None:\n dim = F._get_softmax_dim('softmax', input.dim(), _stacklevel)\n bottom_blobs = [log.layers[log.blobs(input)]]\n name = log.add_layer(name='softmax')\n log.add_blobs([x], name='softmax')\n layer = caffe_net.Layer_param(name=name, type='Softmax', bottom=bottom_blobs, top=[log.blobs(x)])\n layer.param.softmax_param.axis = dim\n log.cnet.add_layer(layer)\n return x\n\ndef _batch_norm(raw,input, running_mean, running_var, weight=None, bias=None, training=False, momentum=0.1, eps=1e-5):\n # because the runing_mean and runing_var will be changed after the _batch_norm operation, we first save the parameters\n x = raw(input, running_mean, running_var, weight, bias, training, momentum, eps)\n bottom_blobs = [log.layers[log.blobs(input)]]\n layer_name1 = log.add_layer(name='batch_norm')\n top_blobs = log.add_blobs([x], name='batch_norm')\n if log.inplace_flag(log.layers[log.blobs(input)]):\n top_blobs = bottom_blobs\n layer1 = caffe_net.Layer_param(name=layer_name1, type='BatchNorm', bottom=bottom_blobs, top=top_blobs)\n if running_mean is None or running_var is None:\n # not use global_stats, normalization is performed over the current mini-batch\n layer1.batch_norm_param(use_global_stats=0, eps=eps)\n else:\n layer1.batch_norm_param(use_global_stats=1, eps=eps)\n running_mean_clone = running_mean.clone()\n running_var_clone = running_var.clone()\n layer1.add_data(running_mean_clone.cpu().numpy(), running_var_clone.cpu().numpy(), np.array([1.0]))\n log.cnet.add_layer(layer1)\n if weight is not None and bias is not None:\n layer_name2 = log.add_layer(name='bn_scale')\n layer2 = caffe_net.Layer_param(name=layer_name2, type='Scale', bottom=top_blobs, top=top_blobs)\n layer2.param.scale_param.bias_term = True\n layer2.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())\n log.cnet.add_layer(layer2)\n if log.inplace_flag(log.layers[log.blobs(input)]):\n log.layers[layer_name2] = log.layers[log.blobs(input)]\n else:\n log.layers[layer_name2] = log.layers[log.blobs(x)]\n if log.inplace_flag(log.layers[log.blobs(input)]):\n log.layers[log.blobs(x)] = log.layers[log.blobs(input)]\n return x\n\ndef _instance_norm(raw,input, running_mean=None, running_var=None, weight=None, bias=None, use_input_stats=True, momentum=0.1, eps=1e-5):\n # TODO: the batch size!=1 view operations\n print(\"WARNING: The Instance Normalization transfers to Caffe using BatchNorm, so the batch size should be 1\")\n if running_var is not None or weight is not None:\n # TODO: the affine=True or track_running_stats=True case\n raise NotImplementedError(\"not implement the affine=True or track_running_stats=True case InstanceNorm\")\n x= torch.batch_norm(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps,torch.backends.cudnn.enabled)\n bottom_blobs = [log.layers[log.blobs(input)]]\n layer_name1 = log.add_layer(name='instance_norm')\n top_blobs = log.add_blobs([x], name='instance_norm')\n if log.inplace_flag(log.layers[log.blobs(input)]):\n top_blobs = bottom_blobs\n layer1 = caffe_net.Layer_param(name=layer_name1, type='BatchNorm', bottom=bottom_blobs, top=top_blobs)\n if running_mean is None or running_var is None:\n # not use global_stats, normalization is performed over the current mini-batch\n layer1.batch_norm_param(use_global_stats=0,eps=eps)\n running_mean = torch.zeros(input.size()[1])\n running_var = torch.ones(input.size()[1])\n else:\n layer1.batch_norm_param(use_global_stats=1, eps=eps)\n running_mean_clone = running_mean.clone()\n running_var_clone = running_var.clone()\n layer1.add_data(running_mean_clone.cpu().numpy(), running_var_clone.cpu().numpy(), np.array([1.0]))\n log.cnet.add_layer(layer1)\n if weight is not None and bias is not None:\n layer_name2 = log.add_layer(name='bn_scale')\n layer2 = caffe_net.Layer_param(name=layer_name2, type='Scale', bottom=top_blobs, top=top_blobs)\n layer2.param.scale_param.bias_term = True\n layer2.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())\n log.cnet.add_layer(layer2)\n if log.inplace_flag(log.layers[log.blobs(input)]):\n log.layers[layer_name2] = log.layers[log.blobs(input)]\n else:\n log.layers[layer_name2] = log.layers[log.blobs(x)]\n if log.inplace_flag(log.layers[log.blobs(input)]):\n log.layers[log.blobs(x)] = log.layers[log.blobs(input)]\n return x\n\n#upsample layer\ndef _interpolate(raw,input, size=None, scale_factor=None, mode='nearest', align_corners=None):\n # 定义的参数包括 scale,即输出与输入的尺寸比例,如 2;scale_h、scale_w,\n # 同 scale,分别为 h、w 方向上的尺寸比例;pad_out_h、pad_out_w,仅在 scale 为 2 时\n # 有用,对输出进行额外 padding 在 h、w 方向上的数值;upsample_h、upsample_w,输\n # 出图像尺寸的数值。在 Upsample 的相关代码中,推荐仅仅使用 upsample_h、\n # upsample_w 准确定义 Upsample 层的输出尺寸,其他所有的参数都不推荐继续使用。\n # for nearest _interpolate\n if mode != \"nearest\" or align_corners != None:\n raise NotImplementedError(\"not implement F.interpolate totoaly\")\n x = raw(input,size , scale_factor ,mode)\n layer_name = log.add_layer(name='upsample')\n top_blobs = log.add_blobs([x], name='upsample'.format(type))\n layer = caffe_net.Layer_param(name=layer_name, type='Upsample', bottom=[log.layers[log.blobs(input)]], top=top_blobs)\n layer.upsample_param(size =(input.size(2),input.size(3)), scale_factor= scale_factor)\n log.cnet.add_layer(layer)\n return x\n\n\n#sigmid layer\ndef _sigmoid(raw,input):\n # Applies the element-wise function:\n # Sigmoid(x)= 1/(1+exp(−x))\n x = raw(input)\n name = log.add_layer(name='sigmoid')\n log.add_blobs([x], name='sigmoid')\n layer = caffe_net.Layer_param(name=name, type='Sigmoid', bottom=[log.layers[log.blobs(input)]], top=[log.layers[log.blobs(input)]])\n log.cnet.add_layer(layer)\n log.layers[log.blobs(x)] = log.layers[log.blobs(input)]\n\n#tanh layer\ndef _tanh(raw,input):\n # Applies the element-wise function:\n # torch.nn.Tanh\n x = raw(input)\n name = log.add_layer(name='tanh')\n log.add_blobs([x], name='tanh')\n layer = caffe_net.Layer_param(name=name, type='TanH', bottom=[log.layers[log.blobs(input)]], top=[log.layers[log.blobs(input)]])\n log.cnet.add_layer(layer)\n log.layers[log.blobs(x)] = log.layers[log.blobs(input)]\n\ndef _hardtanh(raw, input, min_val, max_val, inplace):\n # Applies the element-wise function:\n # torch.nn.ReLu6\n print('relu6: ', log.blobs(input))\n x = raw(input, min_val, max_val)\n name = log.add_layer(name='relu6')\n log.add_blobs([x], name='relu6_blob')\n layer = caffe_net.Layer_param(name=name, type='ReLU6', bottom=[log.blobs(input)], top=[log.blobs(x)])\n log.cnet.add_layer(layer)\n return x\n\n#L2Norm layer\ndef _l2Norm(raw, input, weight, eps):\n # Applies the element-wise function:\n # L2Norm in vgg_ssd\n x = raw(input, weight, eps)\n name = log.add_layer(name='normalize')\n log.add_blobs([x], name='normalize_blob')\n layer = caffe_net.Layer_param(name=name, type='Normalize', bottom=[log.blobs(input)], top=[log.blobs(x)])\n layer.norm_param(eps)\n\n layer.add_data(weight.cpu().data.numpy())\n log.cnet.add_layer(layer)\n return x\n\ndef _div(raw,inputs, inputs2):\n x=raw(inputs, inputs2)\n log.add_blobs([x],name='div_blob')\n return x\n\n\n# ----- for Variable operations --------\n\ndef _view(input,*args):\n x = raw_view(input, *args)\n if not NET_INITTED:\n return x\n layer_name = log.add_layer(name='view')\n top_blobs = log.add_blobs([x],name='view')\n layer = caffe_net.Layer_param(name=layer_name, type='Reshape', bottom=[log.layers[log.blobs(input)]], top=top_blobs)\n # TODO: reshpae added to nn_tools layer\n dims = list(args)\n dims[0] = 0 # the first dim should be batch_size\n layer.param.reshape_param.shape.CopyFrom(caffe_net.pb.BlobShape(dim=dims))\n log.cnet.add_layer(layer)\n return x\n\ndef _mean(input,*args,**kwargs):\n x = raw_mean(input, *args,**kwargs)\n if not NET_INITTED:\n return x\n layer_name = log.add_layer(name='mean')\n top_blobs = log.add_blobs([x],name='mean')\n layer = caffe_net.Layer_param(name=layer_name, type='Reduction', bottom=[log.layers[log.blobs(input)]], top=top_blobs)\n if len(args)==1:\n dim = args[0]\n elif 'dim' in kwargs:\n dim = kwargs['dim']\n else:\n raise NotImplementedError('mean operation must specify a dim')\n layer.param.reduction_param.operation = 4\n layer.param.reduction_param.axis = dim\n log.cnet.add_layer(layer)\n return x\n\ndef _add(input,*args):\n x = raw__add__(input, *args)\n if not NET_INITTED:\n return x\n layer_name = log.add_layer(name='add')\n top_blobs = log.add_blobs([x], name='add')\n if log.blobs(args[0]) == None:\n log.add_blobs([args[0]], name='extra')\n else:\n layer = caffe_net.Layer_param(name=layer_name, type='Eltwise', bottom=[log.layers[log.blobs(input)],log.layers[log.blobs(args[0])]], top=top_blobs)\n layer.param.eltwise_param.operation = 1 # sum is 1\n log.cnet.add_layer(layer)\n return x\n\ndef _iadd(input,*args):\n x = raw__iadd__(input, *args)\n if not NET_INITTED:\n return x\n x = x.clone()\n layer_name = log.add_layer(name='add')\n top_blobs = log.add_blobs([x], name='add')\n layer = caffe_net.Layer_param(name=layer_name, type='Eltwise', bottom=[log.layers[log.blobs(input)],log.layers[log.blobs(args[0])]], top=top_blobs)\n layer.param.eltwise_param.operation = 1 # sum is 1\n log.cnet.add_layer(layer)\n return x\n\ndef _sub(input,*args):\n x = raw__sub__(input, *args)\n if not NET_INITTED:\n return x\n layer_name = log.add_layer(name='sub')\n top_blobs = log.add_blobs([x], name='sub')\n layer = caffe_net.Layer_param(name=layer_name, type='Eltwise', bottom=[log.layers[log.blobs(input)],log.layers[log.blobs(args[0])]], top=top_blobs)\n layer.param.eltwise_param.operation = 1 # sum is 1\n layer.param.eltwise_param.coeff.extend([1.,-1.])\n log.cnet.add_layer(layer)\n return x\n\ndef _isub(input,*args):\n x = raw__isub__(input, *args)\n if not NET_INITTED:\n return x\n x = x.clone()\n layer_name = log.add_layer(name='sub')\n top_blobs = log.add_blobs([x], name='sub')\n layer = caffe_net.Layer_param(name=layer_name, type='Eltwise', bottom=[log.layers[log.blobs(input)],log.layers[log.blobs(args[0])]], top=top_blobs)\n layer.param.eltwise_param.operation = 1 # sum is 1\n log.cnet.add_layer(layer)\n return x\n\ndef _mul(input,*args):\n x = raw__mul__(input, *args)\n if not NET_INITTED:\n return x\n layer_name = log.add_layer(name='mul')\n top_blobs = log.add_blobs([x], name='mul')\n layer = caffe_net.Layer_param(name=layer_name, type='Eltwise', bottom=[log.layers[log.blobs(input)], log.layers[log.blobs(args[0])]], top=top_blobs)\n layer.param.eltwise_param.operation = 0 # product is 1\n log.cnet.add_layer(layer)\n return x\n\ndef _imul(input,*args):\n x = raw__imul__(input, *args)\n if not NET_INITTED:\n return x\n x = x.clone()\n layer_name = log.add_layer(name='mul')\n top_blobs = log.add_blobs([x], name='mul')\n layer = caffe_net.Layer_param(name=layer_name, type='Eltwise', bottom=[log.layers[log.blobs(input)], log.layers[log.blobs(args[0])]], top=top_blobs)\n layer.param.eltwise_param.operation = 0 # product is 1\n layer.param.eltwise_param.coeff.extend([1., -1.])\n log.cnet.add_layer(layer)\n return x\n\n\n#Permute layer\ndef _permute(input,*args):\n x = raw__permute__(input, *args)\n name = log.add_layer(name='permute')\n log.add_blobs([x], name='permute')\n layer = caffe_net.Layer_param(name=name, type='Permute', bottom=[log.blobs(input)], top=[log.blobs(x)])\n order1 = args[0]\n order2 = args[1]\n order3 = args[2]\n order4 = args[3]\n\n layer.permute_param(order1, order2, order3, order4)\n log.cnet.add_layer(layer)\n return x\n\n#contiguous\ndef _contiguous(input,*args):\n x = raw__contiguous__(input, *args)\n name = log.add_layer(name='contiguous')\n log.add_blobs([x], name='contiguous')\n layer = caffe_net.Layer_param(name=name, type='NeedRemove', bottom=[log.blobs(input)], top=[log.blobs(x)])\n log.cnet.add_layer(layer)\n return x\n\n#pow\ndef _pow(input,*args):\n x = raw__pow__(input, *args)\n log.add_blobs([x], name='pow')\n return x\n\n#sum\ndef _sum(input,*args):\n x = raw__sum__(input, *args)\n log.add_blobs([x], name='sum')\n return x\n\n# sqrt\ndef _sqrt(input,*args):\n x = raw__sqrt__(input, *args)\n log.add_blobs([x], name='sqrt')\n return x\n\n# unsqueeze\ndef _unsqueeze(input,*args):\n x = raw__unsqueeze__(input, *args)\n log.add_blobs([x], name='unsqueeze')\n return x\n\n# sqrt\ndef _expand_as(input,*args):\n x = raw__expand_as__(input, *args)\n log.add_blobs([x], name='expand_as')\n return x\n\n\n\n# 核心组件,通过该类,实现对torch的function中的operators的输入,输出以及参数的读取\nclass Rp(object):\n def __init__(self, raw, replace, **kwargs):\n # replace the raw function to replace function\n self.obj = replace\n self.raw = raw\n\n def __call__(self, *args, **kwargs):\n if not NET_INITTED:\n return self.raw(*args, **kwargs)\n for stack in traceback.walk_stack(None):\n if 'self' in stack[0].f_locals:\n layer = stack[0].f_locals['self']\n if layer in layer_names:\n log.pytorch_layer_name = layer_names[layer]\n print(layer_names[layer])\n break\n out = self.obj(self.raw,*args,**kwargs)\n # if isinstance(out,Variable):\n # out = [out]\n return out\n\n\n\nF.conv2d = Rp(F.conv2d, _conv2d)\nF.linear = Rp(F.linear, _linear)\nF.relu = Rp(F.relu, _relu)\nF.leaky_relu = Rp(F.leaky_relu, _leaky_relu)\nF.max_pool2d = Rp(F.max_pool2d, _max_pool2d)\nF.avg_pool2d = Rp(F.avg_pool2d, _avg_pool2d)\nF.dropout = Rp(F.dropout, _dropout)\nF.threshold = Rp(F.threshold, _threshold)\nF.prelu = Rp(F.prelu, _prelu)\nF.batch_norm = Rp(F.batch_norm, _batch_norm)\nF.instance_norm = Rp(F.instance_norm, _instance_norm)\nF.softmax = Rp(F.softmax, _softmax)\nF.conv_transpose2d = Rp(F.conv_transpose2d, _conv_transpose2d)\nF.interpolate = Rp(F.interpolate, _interpolate)\nF.sigmoid = Rp(F.sigmoid, _sigmoid)\nF.tanh = Rp(F.tanh, _tanh)\nF.hardtanh = Rp(F.hardtanh, _hardtanh)\n# F.l2norm = Rp(F.l2norm, _l2Norm)\nF.adaptive_max_pool2d = Rp(F.adaptive_max_pool2d, _adaptive_max_pool2d)\nF.adaptive_avg_pool2d = Rp(F.adaptive_avg_pool2d, _adaptive_avg_pool2d)\n\ntorch.split = Rp(torch.split, _split)\ntorch.max = Rp(torch.max, _max)\ntorch.cat = Rp(torch.cat, _cat)\ntorch.div = Rp(torch.div, _div)\ntorch.flatten = Rp(torch.flatten, _flatten)\n\n# TODO: other types of the view function\ntry:\n raw_view = Variable.view\n Variable.view = _view\n raw_mean = Variable.mean\n Variable.mean = _mean\n raw__add__ = Variable.__add__\n Variable.__add__ = _add\n raw__iadd__ = Variable.__iadd__\n Variable.__iadd__ = _iadd\n raw__sub__ = Variable.__sub__\n Variable.__sub__ = _sub\n raw__isub__ = Variable.__isub__\n Variable.__isub__ = _isub\n raw__mul__ = Variable.__mul__\n Variable.__mul__ = _mul\n raw__imul__ = Variable.__imul__\n Variable.__imul__ = _imul\nexcept:\n # for new version 0.4.0 and later version\n for t in [torch.Tensor]:\n raw_view = t.view\n t.view = _view\n raw_mean = t.mean\n t.mean = _mean\n raw__add__ = t.__add__\n t.__add__ = _add\n raw__iadd__ = t.__iadd__\n t.__iadd__ = _iadd\n raw__sub__ = t.__sub__\n t.__sub__ = _sub\n raw__isub__ = t.__isub__\n t.__isub__ = _isub\n raw__mul__ = t.__mul__\n t.__mul__=_mul\n raw__imul__ = t.__imul__\n t.__imul__ = _imul\n raw__permute__ = t.permute\n t.permute = _permute\n raw__contiguous__ = t.contiguous\n t.contiguous = _contiguous\n raw__pow__ = t.pow\n t.pow = _pow\n raw__sum__ = t.sum\n t.sum = _sum\n raw__sqrt__ = t.sqrt\n t.sqrt = _sqrt\n raw__unsqueeze__ = t.unsqueeze\n t.unsqueeze = _unsqueeze\n raw__expand_as__ = t.expand_as\n t.expand_as = _expand_as\n\n\ndef trans_net(net,input_var, name='TransferedPytorchModel'):\n print('Starting Transform, This will take a while')\n log.init([input_var])\n log.cnet.net.name = name\n log.cnet.net.input.extend([log.blobs(input_var)])\n log.cnet.net.input_dim.extend(input_var.size())\n # layer = caffe_net.Layer_param(name='data', type='Input', top=['data'])\n # layer.input_param(input_var.data.numpy().shape)\n # log.cnet.add_layer(layer)\n global NET_INITTED\n NET_INITTED = True\n for name,layer in net.named_modules():\n layer_names[layer] = name\n print(\"torch ops name:\", layer_names)\n out = net.forward(input_var)\n print('Transform Completed')\n for key in log.layers:\n print('{} {}'.format(key, log.layers[key]))\n\ndef save_prototxt(save_name):\n # log.cnet.remove_layer_by_type(\"NeedRemove\")\n log.cnet.save_prototxt(save_name)\n\ndef save_caffemodel(save_name):\n log.cnet.save(save_name)"
] | [
[
"numpy.array",
"torch.batch_norm",
"torch.nn.modules.utils._pair"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aryejanoff/Nourishment-Coordination | [
"e9fb6425a5faca96d08b2235a58d339b3f20bda8"
] | [
"Field Data/CoastSat Data Codes Analysis/Barnegat Light/BarnegatLight.py"
] | [
"#==========================================================#\n# Shoreline extraction from satellite images\n#==========================================================#\n\n# Kilian Vos WRL 2018\n\n#%% 1. Initial settings\n\n# load modules\nimport os\nimport numpy as np\nimport pickle\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport matplotlib.pyplot as plt\nfrom coastsat import SDS_download, SDS_preprocess, SDS_shoreline, SDS_tools, SDS_transects\n\n# region of interest (longitude, latitude in WGS84)\npolygon = [[[-74.113056, 39.765471],\n [-74.086892, 39.759295],\n [-74.108082, 39.737243],\n [-74.122002, 39.741299],\n [-74.113056, 39.765471]]]\n# can also be loaded from a .kml polygon\n#kml_polygon = os.path.join(os.getcwd(), 'examples', 'NARRA_polygon.kml')\n#polygon = SDS_tools.polygon_from_kml(kml_polygon)\n \n# date range\ndates = ['2011-01-01', '2020-01-01']\n\n# satellite missions\nsat_list = ['L5', 'L7', 'L8', 'S2']\n# sat_list = ['S2']\n\n# name of the site\nsitename = 'BL'\n\n# filepath where data will be stored\nfilepath_data = os.path.join(os.getcwd(), 'data')\n\n# put all the inputs into a dictionnary\ninputs = {\n 'polygon': polygon,\n 'dates': dates,\n 'sat_list': sat_list,\n 'sitename': sitename,\n 'filepath': filepath_data\n }\n\n# before downloading the images, check how many images are available for your inputs\nSDS_download.check_images_available(inputs);\n\n#%% 2. Retrieve images\n\n# only uncomment this line if you want Landsat Tier 2 images (not suitable for time-series analysis)\n# inputs['include_T2'] = True\n\n# retrieve satellite images from GEE\nmetadata = SDS_download.retrieve_images(inputs)\n\n# if you have already downloaded the images, just load the metadata file\nmetadata = SDS_download.get_metadata(inputs) \n\n#%% 3. Batch shoreline detection\n \n# settings for the shoreline extraction\nsettings = { \n # general parameters:\n 'cloud_thresh': 0.5, # threshold on maximum cloud cover\n 'output_epsg': 28356, # epsg code of spatial reference system desired for the output \n # quality control:\n 'check_detection': True, # if True, shows each shoreline detection to the user for validation\n 'save_figure': True, # if True, saves a figure showing the mapped shoreline for each image\n # add the inputs defined previously\n 'inputs': inputs,\n # [ONLY FOR ADVANCED USERS] shoreline detection parameters:\n 'min_beach_area': 4500, # minimum area (in metres^2) for an object to be labelled as a beach\n 'buffer_size': 150, # radius (in metres) of the buffer around sandy pixels considered in the shoreline detection\n 'min_length_sl': 200, # minimum length (in metres) of shoreline perimeter to be valid\n 'cloud_mask_issue': False, # switch this parameter to True if sand pixels are masked (in black) on many images \n 'sand_color': 'default', # 'default', 'dark' (for grey/black sand beaches) or 'bright' (for white sand beaches)\n}\n\n# [OPTIONAL] preprocess images (cloud masking, pansharpening/down-sampling)\nSDS_preprocess.save_jpg(metadata, settings)\n\n# [OPTIONAL] create a reference shoreline (helps to identify outliers and false detections)\nsettings['reference_shoreline'] = SDS_preprocess.get_reference_sl(metadata, settings)\n# set the max distance (in meters) allowed from the reference shoreline for a detected shoreline to be valid\nsettings['max_dist_ref'] = 100 \n\n# extract shorelines from all images (also saves output.pkl and shorelines.kml)\noutput = SDS_shoreline.extract_shorelines(metadata, settings)\n\n# plot the mapped shorelines\nfig = plt.figure()\nplt.axis('equal')\nplt.xlabel('Eastings')\nplt.ylabel('Northings')\nplt.grid(linestyle=':', color='0.5')\nfor i in range(len(output['shorelines'])):\n sl = output['shorelines'][i]\n date = output['dates'][i]\n plt.plot(sl[:,0], sl[:,1], '.', label=date.strftime('%d-%m-%Y'))\nplt.legend()\nmng = plt.get_current_fig_manager() \nmng.window.showMaximized() \nfig.set_size_inches([15.76, 8.52])\n\n#%% 4. Shoreline analysis\n\n# if you have already mapped the shorelines, load the output.pkl file\nfilepath = os.path.join(inputs['filepath'], sitename)\nwith open(os.path.join(filepath, sitename + '_output' + '.pkl'), 'rb') as f:\n output = pickle.load(f) \n\n# now we have to define cross-shore transects over which to quantify the shoreline changes\n# each transect is defined by two points, its origin and a second point that defines its orientation\n\n# there are 3 options to create the transects:\n# - option 1: draw the shore-normal transects along the beach\n# - option 2: load the transect coordinates from a .kml file\n# - option 3: create the transects manually by providing the coordinates\n\n# option 1: draw origin of transect first and then a second point to define the orientation\ntransects = SDS_transects.draw_transects(output, settings)\n \n# option 2: load the transects from a .geojson file\n#geojson_file = os.path.join(os.getcwd(), 'examples', 'NARRA_transects.geojson')\n#transects = SDS_tools.transects_from_geojson(geojson_file)\n\n# option 3: create the transects by manually providing the coordinates of two points \n#transects = dict([])\n#transects['Transect 1'] = np.array([[342836, 6269215], [343315, 6269071]])\n#transects['Transect 2'] = np.array([[342482, 6268466], [342958, 6268310]])\n#transects['Transect 3'] = np.array([[342185, 6267650], [342685, 6267641]])\n \n# intersect the transects with the 2D shorelines to obtain time-series of cross-shore distance\n# (also saved a .csv file with the time-series, dates are in UTC time)\nsettings['along_dist'] = 25\ncross_distance = SDS_transects.compute_intersection(output, transects, settings) \n\n# plot the time-series\nfrom matplotlib import gridspec\nfig = plt.figure()\ngs = gridspec.GridSpec(len(cross_distance),1)\ngs.update(left=0.05, right=0.95, bottom=0.05, top=0.95, hspace=0.05)\nfor i,key in enumerate(cross_distance.keys()):\n if np.all(np.isnan(cross_distance[key])):\n continue\n ax = fig.add_subplot(gs[i,0])\n ax.grid(linestyle=':', color='0.5')\n ax.set_ylim([-50,50])\n ax.plot(output['dates'], cross_distance[key]- np.nanmedian(cross_distance[key]), '-^', markersize=6)\n ax.set_ylabel('distance [m]', fontsize=12)\n ax.text(0.5,0.95,'Transect ' + key, bbox=dict(boxstyle=\"square\", ec='k',fc='w'), ha='center',\n va='top', transform=ax.transAxes, fontsize=14)\nmng = plt.get_current_fig_manager() \nmng.window.showMaximized() \nfig.set_size_inches([15.76, 8.52])"
] | [
[
"matplotlib.pyplot.legend",
"numpy.nanmedian",
"numpy.isnan",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.get_current_fig_manager",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rspotc/simphony | [
"3c458f253192a43200ad097428f54fc0e3dafaa1"
] | [
"examples/gm.py"
] | [
"import numpy as np\n\nimport simphony.core as core\nfrom simphony.core import ComponentInstance as inst\nimport simphony.DeviceLibrary.ebeam as dev\nimport simphony.DeviceLibrary.sipann as lib\nimport simphony.simulation as sim\n\nimport matplotlib.pyplot as plt\n\n# -----------------------------------------------------------------------------\n#\n# Some helper functions for converting between wavelength and frequency\n#\nc = 299792458\ndef freq2wl(f):\n return c/f\ndef wl2freq(l):\n return c/l\n\n# -----------------------------------------------------------------------------\n#\n# Define all input component instances\n#\ninputs = [inst(dev.ebeam_gc_te1550) for _ in range(4)]\nwg1 = [inst(dev.ebeam_wg_integral_1550, extras={'length':100e-6}) for _ in range(4)]\ndc1 = [inst(lib.sipann_dc_fifty) for _ in range(2)]\nwg_inner1 = [inst(dev.ebeam_wg_integral_1550, extras={'length':100e-6}) for _ in range(2)]\ncrossover = inst(lib.sipann_dc_crossover1550)\nwg_inner2 = [inst(dev.ebeam_wg_integral_1550, extras={'length':102.125e-6}) for _ in range(2)]\nwg_outer = [inst(dev.ebeam_wg_integral_1550, extras={'length':300e-6}) for _ in range(2)]\ndc2 = [inst(lib.sipann_dc_fifty) for _ in range(2)]\nwg3 = [inst(dev.ebeam_wg_integral_1550, extras={'length':100e-6}) for _ in range(4)]\noutputs = [inst(dev.ebeam_gc_te1550) for _ in range(4)]\n\n# -----------------------------------------------------------------------------\n#\n# Define all circuit connections\n#\nconnections = []\nfor i in range(4):\n connections.append([inputs[i], 0, wg1[i], 0])\n\nconnections.append([wg1[0], 1, dc1[0], 1])\nconnections.append([wg1[1], 1, dc1[0], 0])\nconnections.append([wg1[2], 1, dc1[1], 1])\nconnections.append([wg1[3], 1, dc1[1], 0])\n\nconnections.append([wg_outer[0], 0, dc1[0], 3])\nconnections.append([wg_outer[1], 0, dc1[1], 2])\nconnections.append([wg_inner1[0], 0, dc1[0], 2])\nconnections.append([wg_inner1[1], 0, dc1[1], 3])\n\nconnections.append([wg_inner1[0], 1, crossover, 1])\nconnections.append([wg_inner1[1], 1, crossover, 0])\nconnections.append([crossover, 3, wg_inner2[0], 0])\nconnections.append([crossover, 2, wg_inner2[1], 0])\n\nconnections.append([wg_outer[0], 1, dc2[0], 1])\nconnections.append([wg_outer[1], 1, dc2[1], 0])\nconnections.append([wg_inner2[0], 1, dc2[0], 0])\nconnections.append([wg_inner2[1], 1, dc2[1], 1])\n\nconnections.append([dc2[0], 3, wg3[0], 0])\nconnections.append([dc2[0], 2, wg3[1], 0])\nconnections.append([dc2[1], 3, wg3[2], 0])\nconnections.append([dc2[1], 2, wg3[3], 0])\n\nfor i in range(4):\n connections.append([outputs[i], 0, wg3[i], 1])\n\ndef local():\n plt.figure()\n device = dc1[0]\n f,s = device.get_s_parameters()\n\n set_wl = 1550e-9\n set_freq = wl2freq(set_wl)\n # set_freq = 1.93e+14\n idx = np.argmax(f>set_freq)\n print(idx, freq2wl(f[idx]))\n\n plt.plot(f, np.abs(s[:,3,0])**2)\n plt.plot(f, np.abs(s[:,2,0])**2)\n plt.title(\"DC\")\n plt.legend()\n plt.tight_layout()\n\n plt.figure()\n plt.plot(f, np.rad2deg(np.unwrap(np.angle(s[:,3,0]))))\n plt.plot(f, np.rad2deg(np.unwrap(np.angle(s[:,2,0]))))\n plt.legend()\n plt.tight_layout()\n plt.show()\n# local()\n\n# -----------------------------------------------------------------------------\n#\n# Run the actual simulation (over some optional frequency range)\n#\nnl = core.Netlist()\nnl.load(connections, formatter='ll')\n# simu = sim.Simulation(nl, start_freq=1.925e+14, stop_freq=1.945e+14)\nsimu = sim.Simulation(nl, start_freq=wl2freq(1.5501e-6), stop_freq=wl2freq(1.5499e-6))\n# simu = sim.Simulation(nl)\n\n# Get the simulation results\nfreq, s = simu.freq_array, simu.s_parameters()\n\n\n# -----------------------------------------------------------------------------\n#\n# We're interested in investigating behavior at this frequency\n#\nset_wl = 1550e-9\nset_freq = wl2freq(set_wl)\n# set_freq = 1.93e+14\n\n# -----------------------------------------------------------------------------\n#\n# Plot the response of the entire green machine using input port i\n#\n# for i in range(0,4):\ni = 2\n# plt.figure()\n# # for i in range(1, 2):\n# for j in range(4,8):\n# # plt.plot(freq/1e12, np.abs(s[:,j,i])**2, label=\"Port {} to {}\".format(i, j))\n# plt.plot(freq2wl(freq)*1e9, np.abs(s[:,j,i])**2, label=\"Port {}\".format(j), linewidth=\"0.7\")\n# # plt.axvline(set_freq/1e12)\n# # plt.axvline(1550)\n# plt.legend(loc=\"upper right\")\n# plt.xlabel(\"Wavelength (nm)\")\n# plt.ylabel(\"Fractional Optical Power\")\n\nplt.figure()\nidx = np.argmax(freq>set_freq)\nprint(idx, freq2wl(freq[idx]))\n# for i in range(1, 2):\noffsets = [0] * 4\nfor j in range(4,8):\n offsets[j-4] = np.angle(s[idx,j,i])\n\nangles = [None] * 4\nfor j in range(4,8):\n angles[j-4] = np.unwrap(np.angle(s[:,j,i]))\n\nprint(offsets, \"Min:\", min(offsets))\nfor j in range(4):\n angles[j] -= min(offsets)\n print(angles[j][idx])\n\nfor j in range(4,8):\n # plt.plot(freq/1e12, np.rad2deg(np.unwrap(np.angle(s[:,j,i]))), label=\"Port {} to {}\".format(i, j))\n # angles = np.rad2deg(np.unwrap(np.angle(s[:,j,i])))\n # angles = np.unwrap(np.angle(s[:,j,i]))\n # angles -= min(offsets)\n # angles = angles + (angles[idx] % 2*np.pi) - angles[idx]\n plt.plot(freq2wl(freq)*1e9, angles[j-4], linewidth='0.7')\nplt.axvline(1550, color='k', linestyle='--', linewidth='0.5')\nplt.legend([r'$\\phi_4$',r'$\\phi_5$',r'$\\phi_6$',r'$\\phi_7$'], loc='upper right')\nplt.xlabel(\"Wavelength (nm)\")\nplt.ylabel(\"Phase\")\nplt.show()\n\nimport sys; sys.exit()\n\nplt.figure()\nidx = np.argmax(freq>set_freq)\nprint(idx, freq2wl(freq[idx]))\nfor j in range(4,8):\n # print(np.rad2deg(np.angle(s[idx,j,i])))\n angles = np.rad2deg(np.unwrap(np.angle(s[:,j,i])))\n angles = angles + (angles[idx] % 2*np.pi) - angles[idx]\n print(angles[idx], angles)\n plt.plot(freq2wl(freq)*1e9, angles, label=\"Port {} to {}\".format(i, j))\n plt.plot(freq2wl(freq[idx])*1e9, angles[idx], 'rx')\n\nplt.axvline(1550)\nplt.legend()\nplt.xlabel(\"Wavelength (nm)\")\nplt.ylabel(\"Phase\")\n\n\n# plt.axvline(set_freq/1e12)\n\n\nplt.show()\n\n# -----------------------------------------------------------------------------\n#\n# Response at precisely 1550nm\n#\nidx = np.argmax(freq>set_freq)\nprint(idx, freq2wl(freq[idx]))\n\n# Phases of the four outputs at 1550nm\nplt.figure()\ncircle = np.linspace(0, 2*np.pi)\nplt.plot(np.cos(circle), np.sin(circle))\n\n# for i in range(0,4):\ninputs1550 = [0] * 8\nfor output in range(4,8):\n rad = np.angle(s[idx,output,i])\n plt.plot(np.cos(rad), np.sin(rad), 'o')\n inputs1550[output-4] = np.cos(rad) + np.sin(rad) * 1j\nplt.xlim(-1, 1)\nplt.ylim(-1, 1)\nplt.axes().set_aspect('equal')\n\n# for val in inputs1550:\n# print(val, np.rad2deg(np.angle(val)))\n\n# -----------------------------------------------------------------------------\n#\n# Multiple input stuffs:\n#\ndef multi_input(num_ports, inputs, verbose=True):\n inputs = np.array(inputs, dtype=np.complex_)\n if verbose:\n angles = np.rad2deg(np.angle(inputs))\n print(angles - min(angles))\n out = np.zeros([len(freq), num_ports], dtype='complex128')\n for j in range(len(freq)):\n out[j, :] = np.dot(s[j, :, :], inputs.T)\n return out\n\ndef plot_outputs(out):\n plt.figure()\n for j in range(8):\n plt.subplot(8, 1, j+1)\n plt.plot(freq/1e12, np.abs(out[:,j])**2, label=\"Port {}\".format(j))\n plt.axvline(set_freq/1e12)\n plt.legend()\n plt.xlabel(\"Frequency (THz)\")\n plt.ylabel(\"Normalized Power\")\n\nout = multi_input(8, inputs1550)\n\nplt.figure()\nfor j in range(8):\n plt.subplot(8, 1, j+1)\n plt.plot(freq/1e12, np.abs(out[:,j])**2, label=\"Port {}\".format(j))\n plt.axvline(set_freq/1e12)\n plt.legend()\n plt.xlabel(\"Frequency (THz)\")\n plt.ylabel(\"Normalized Power\")\n\n# plt.figure()\n# for j in range(8):\n# plt.plot(freq/1e12, np.rad2deg(np.unwrap(np.angle(out[:,j]))), label=\"Port {}\".format(j))\n# # plt.plot(freq/1e12, np.rad2deg(np.angle(s[:,j,i])), label=\"Port {} to {}\".format(i, j))\n# plt.axvline(set_freq/1e12)\n# plt.legend()\n# plt.xlabel(\"Frequency (THz)\")\n# plt.ylabel(\"Phase\")\n\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.dot",
"numpy.linspace",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.tight_layout",
"numpy.sin",
"numpy.argmax",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.axvline",
"numpy.abs",
"numpy.cos",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"numpy.angle"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
edwardyang12/implicit_depth | [
"4dc85bbddfc96af60006d6006be0c74478fe9204"
] | [
"src/models/implicit_net.py"
] | [
"import torch\r\ntorch.autograd.set_detect_anomaly(True)\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport numpy as np\r\n\r\n\r\n# Positional encoding (section 5.1)\r\nclass Embedder:\r\n def __init__(self, **kwargs):\r\n self.kwargs = kwargs\r\n self.create_embedding_fn()\r\n \r\n def create_embedding_fn(self):\r\n embed_fns = []\r\n d = self.kwargs['input_dims']\r\n out_dim = 0\r\n if self.kwargs['include_input']:\r\n embed_fns.append(lambda x : x)\r\n out_dim += d\r\n \r\n max_freq = self.kwargs['max_freq_log2']\r\n N_freqs = self.kwargs['num_freqs']\r\n \r\n if self.kwargs['log_sampling']:\r\n freq_bands = 2.**torch.linspace(0., max_freq, steps=N_freqs)\r\n else:\r\n freq_bands = torch.linspace(2.**0., 2.**max_freq, steps=N_freqs)\r\n \r\n for freq in freq_bands:\r\n for p_fn in self.kwargs['periodic_fns']:\r\n embed_fns.append(lambda x, p_fn=p_fn, freq=freq : p_fn(x * freq))\r\n out_dim += d\r\n \r\n self.embed_fns = embed_fns\r\n self.out_dim = out_dim\r\n \r\n def embed(self, inputs):\r\n return torch.cat([fn(inputs) for fn in self.embed_fns], -1)\r\n\r\n\r\ndef get_embedder(multires, i=0):\r\n if i == -1:\r\n return nn.Identity(), 3\r\n \r\n embed_kwargs = {\r\n 'include_input' : True,\r\n 'input_dims' : 3,\r\n 'max_freq_log2' : multires-1,\r\n 'num_freqs' : multires,\r\n 'log_sampling' : True,\r\n 'periodic_fns' : [torch.sin, torch.cos],\r\n }\r\n \r\n embedder_obj = Embedder(**embed_kwargs)\r\n embed = lambda x, eo=embedder_obj : eo.embed(x)\r\n return embed, embedder_obj.out_dim\r\n\r\n\r\nclass IMNet(nn.Module):\r\n def __init__(self, inp_dim, out_dim, gf_dim=64, use_sigmoid=False):\r\n super(IMNet, self).__init__()\r\n self.inp_dim = inp_dim\r\n self.gf_dim = gf_dim\r\n self.use_sigmoid = use_sigmoid\r\n self.linear_1 = nn.Linear(self.inp_dim, self.gf_dim*4, bias=True)\r\n self.linear_2 = nn.Linear(self.gf_dim*4, self.gf_dim*2, bias=True)\r\n self.linear_3 = nn.Linear(self.gf_dim*2, self.gf_dim*1, bias=True)\r\n self.linear_4 = nn.Linear(self.gf_dim*1, out_dim, bias=True)\r\n if self.use_sigmoid:\r\n self.sigmoid = nn.Sigmoid()\r\n nn.init.normal_(self.linear_1.weight, mean=0.0, std=0.02)\r\n nn.init.constant_(self.linear_1.bias,0)\r\n nn.init.normal_(self.linear_2.weight, mean=0.0, std=0.02)\r\n nn.init.constant_(self.linear_2.bias,0)\r\n nn.init.normal_(self.linear_3.weight, mean=0.0, std=0.02)\r\n nn.init.constant_(self.linear_3.bias,0)\r\n nn.init.normal_(self.linear_4.weight, mean=1e-5, std=0.02)\r\n nn.init.constant_(self.linear_4.bias,0)\r\n\r\n def forward(self, inp_feat):\r\n l1 = self.linear_1(inp_feat)\r\n l1 = F.leaky_relu(l1, negative_slope=0.02, inplace=True)\r\n\r\n l2 = self.linear_2(l1)\r\n l2 = F.leaky_relu(l2, negative_slope=0.02, inplace=True)\r\n\r\n l3 = self.linear_3(l2)\r\n l3 = F.leaky_relu(l3, negative_slope=0.02, inplace=True)\r\n\r\n l4 = self.linear_4(l3)\r\n \r\n if self.use_sigmoid:\r\n l4 = self.sigmoid(l4)\r\n else:\r\n l4 = torch.max(torch.min(l4, l4*0.01+0.99), l4*0.01)\r\n \r\n return l4\r\n\r\nclass IEF(nn.Module):\r\n def __init__(self, device, inp_dim, out_dim, gf_dim=64, n_iter=3, use_sigmoid=False):\r\n super(IEF, self).__init__()\r\n self.device = device\r\n self.init_offset = torch.Tensor([0.001]).float().to(self.device)\r\n self.inp_dim = inp_dim\r\n self.gf_dim = gf_dim\r\n self.n_iter = n_iter\r\n self.use_sigmoid = use_sigmoid\r\n self.offset_enc = nn.Linear(1, 16, bias=True)\r\n self.linear_1 = nn.Linear(self.inp_dim+16, self.gf_dim*4, bias=True)\r\n self.linear_2 = nn.Linear(self.gf_dim*4, self.gf_dim*2, bias=True)\r\n self.linear_3 = nn.Linear(self.gf_dim*2, self.gf_dim*1, bias=True)\r\n self.linear_4 = nn.Linear(self.gf_dim*1, out_dim, bias=True)\r\n if self.use_sigmoid:\r\n self.sigmoid = nn.Sigmoid()\r\n\r\n nn.init.normal_(self.offset_enc.weight, mean=0.0, std=0.02)\r\n nn.init.constant_(self.offset_enc.bias,0)\r\n nn.init.normal_(self.linear_1.weight, mean=0.0, std=0.02)\r\n nn.init.constant_(self.linear_1.bias,0)\r\n nn.init.normal_(self.linear_2.weight, mean=0.0, std=0.02)\r\n nn.init.constant_(self.linear_2.bias,0)\r\n nn.init.normal_(self.linear_3.weight, mean=0.0, std=0.02)\r\n nn.init.constant_(self.linear_3.bias,0)\r\n nn.init.normal_(self.linear_4.weight, mean=1e-5, std=0.02)\r\n nn.init.constant_(self.linear_4.bias,0)\r\n\r\n\r\n def forward(self, inp_feat):\r\n batch_size = inp_feat.shape[0]\r\n # iterative update\r\n pred_offset = self.init_offset.expand(batch_size, -1)\r\n for i in range(self.n_iter):\r\n offset_feat = self.offset_enc(pred_offset)\r\n xc = torch.cat([inp_feat,offset_feat],1)\r\n l1 = self.linear_1(xc)\r\n l1 = F.leaky_relu(l1, negative_slope=0.02, inplace=True)\r\n\r\n l2 = self.linear_2(l1)\r\n l2 = F.leaky_relu(l2, negative_slope=0.02, inplace=True)\r\n\r\n l3 = self.linear_3(l2)\r\n l3 = F.leaky_relu(l3, negative_slope=0.02, inplace=True)\r\n\r\n l4 = self.linear_4(l3)\r\n pred_offset = pred_offset + l4\r\n \r\n if self.use_sigmoid:\r\n pred_offset = self.sigmoid(pred_offset)\r\n else:\r\n pred_offset = torch.max(torch.min(pred_offset, pred_offset*0.01+0.99), pred_offset*0.01)\r\n return pred_offset\r\n"
] | [
[
"torch.linspace",
"torch.autograd.set_detect_anomaly",
"torch.cat",
"torch.nn.init.constant_",
"torch.Tensor",
"torch.min",
"torch.nn.Sigmoid",
"torch.nn.Linear",
"torch.nn.Identity",
"torch.nn.init.normal_",
"torch.nn.functional.leaky_relu"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tacaswell/sunpy | [
"1e06d75408d1a621749a5d4e743ae44a31886100",
"1e06d75408d1a621749a5d4e743ae44a31886100"
] | [
"sunpy/timeseries/sources/norh.py",
"sunpy/conftest.py"
] | [
"\"\"\"\nThis module provies a Nobeyama Radioheliograph `~sunpy.timeseries.TimeSeries`\nsource.\n\"\"\"\nfrom collections import OrderedDict\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas\n\nimport astropy.units as u\nfrom astropy.time import TimeDelta\n\nimport sunpy.io\nfrom sunpy import config\nfrom sunpy.time import parse_time\nfrom sunpy.timeseries.timeseriesbase import GenericTimeSeries\nfrom sunpy.util.metadata import MetaDict\nfrom sunpy.visualization import peek_show\n\nTIME_FORMAT = config.get(\"general\", \"time_format\")\n\n__all__ = ['NoRHTimeSeries']\n\n\nclass NoRHTimeSeries(GenericTimeSeries):\n \"\"\"\n Nobeyama Radioheliograph Correlation lightcurve TimeSeries.\n\n Nobeyama Radioheliograph (NoRH) is a radio telescope dedicated to observing the Sun.\n It consists of 84 parabolic antennas with a 80 cm diameter,\n sitting on lines of 490 m long in the east/west and of 220 m long in the north/south.\n It observes the full solar disk at 17 GHz and 34 GHz with a temporal resolution\n down to 0.1 second resolution (typically 1 second).\n\n Its first observation was in April, 1992 and daily 8-hour observations are available starting June, 1992.\n\n Examples\n --------\n >>> import sunpy.data.sample # doctest: +REMOTE_DATA\n >>> import sunpy.timeseries\n >>> norh = sunpy.timeseries.TimeSeries(sunpy.data.sample.NORH_TIMESERIES, source='NoRH') # doctest: +REMOTE_DATA\n >>> norh.peek() # doctest: +SKIP\n\n References\n ----------\n * `Nobeyama Radioheliograph Homepage <https://solar.nro.nao.ac.jp/norh/>`_\n * `Analysis Manual <https://solar.nro.nao.ac.jp/norh/doc/manuale/index.html>`_\n * `Nobeyama Correlation Plots <https://solar.nro.nao.ac.jp/norh/html/cor_plot/>`_\n \"\"\"\n # Class attribute used to specify the source class of the TimeSeries.\n _source = 'norh'\n\n def __init__(self, data, header, units, **kwargs):\n super().__init__(data, header, units, **kwargs)\n\n @peek_show\n def peek(self, **kwargs):\n \"\"\"\n Plot the NoRH lightcurve TimeSeries.\n\n .. plot::\n\n import sunpy.data.sample\n import sunpy.timeseries\n norh = sunpy.timeseries.TimeSeries(sunpy.data.sample.NORH_TIMESERIES, source='NoRH')\n norh.peek()\n\n Parameters\n ----------\n **kwargs : `dict`\n Additional plot keyword arguments that are handed to `~matplotlib.axes.Axes.plot`\n functions.\n \"\"\"\n # Check we have a timeseries valid for plotting\n self._validate_data_for_plotting()\n\n fig, ax = plt.subplots()\n data_lab = str(self.meta.get('OBS-FREQ').values()).replace('[', '').replace(\n ']', '').replace('\\'', '')\n ax.plot(self.to_dataframe().index, self.to_dataframe(), label=data_lab, **kwargs)\n ax.set_yscale(\"log\")\n ax.set_ylim(1e-4, 1)\n ax.set_title('Nobeyama Radioheliograph')\n ax.set_xlabel('Start time: ' + self.to_dataframe().index[0].strftime(TIME_FORMAT))\n ax.set_ylabel('Correlation')\n ax.legend()\n\n return fig\n\n @classmethod\n def _parse_file(cls, filepath):\n \"\"\"\n This method parses NoRH FITS files.\n\n Parameters\n ----------\n filepath : `str`\n The path to the file you want to parse.\n \"\"\"\n hdus = sunpy.io.read_file(filepath)\n return cls._parse_hdus(hdus)\n\n @classmethod\n def _parse_hdus(cls, hdulist):\n \"\"\"\n This method parses a NoRH `astropy.io.fits.HDUList`.\n\n Parameters\n ----------\n hdulist : `astropy.io.fits.HDUList`\n A HDU list.\n \"\"\"\n header = MetaDict(OrderedDict(hdulist[0].header))\n # For these NoRH files, the time series data is recorded in the primary\n # HDU\n data = hdulist[0].data\n\n # No explicit time array in FITS file, so construct the time array from\n # the FITS header\n obs_start_time = parse_time(header['DATE-OBS'] + 'T' + header['CRVAL1'])\n length = len(data)\n cadence = np.float(header['CDELT1'])\n sec_array = np.linspace(0, length - 1, int(length / cadence))\n\n norh_time = obs_start_time + TimeDelta(sec_array*u.second)\n norh_time.precision = 9\n norh_time = norh_time.isot.astype('datetime64')\n\n # Add the units data\n units = OrderedDict([('Correlation Coefficient', u.dimensionless_unscaled)])\n # Todo: check units used.\n return pandas.DataFrame(\n data, index=norh_time, columns=('Correlation Coefficient', )), header, units\n\n @classmethod\n def is_datasource_for(cls, **kwargs):\n \"\"\"\n Determines if header corresponds to a Nobeyama Radioheliograph\n Correlation `~sunpy.timeseries.TimeSeries`.\n \"\"\"\n if 'source' in kwargs.keys():\n if kwargs.get('source', ''):\n return kwargs.get('source', '').lower().startswith(cls._source)\n if 'meta' in kwargs.keys():\n return kwargs['meta'].get('ORIGIN', '').startswith('NOBEYAMA RADIO OBS')\n",
"import os\nimport tempfile\nimport importlib\n\nimport pytest\n\nimport astropy\nimport astropy.config.paths\n\n# Force MPL to use non-gui backends for testing.\ntry:\n import matplotlib\nexcept ImportError:\n pass\nelse:\n matplotlib.use('Agg')\n\n# Don't actually import pytest_remotedata because that can do things to the\n# entrypoints code in pytest.\nremotedata_spec = importlib.util.find_spec(\"pytest_remotedata\")\nHAVE_REMOTEDATA = remotedata_spec is not None\n\n# Do not collect the sample data file because this would download the sample data.\ncollect_ignore = [\"data/sample.py\"]\n\n\[email protected](scope='session', autouse=True)\ndef tmp_config_dir(request):\n \"\"\"\n Globally set the default config for all tests.\n \"\"\"\n tmpdir = tempfile.TemporaryDirectory()\n\n os.environ[\"SUNPY_CONFIGDIR\"] = str(tmpdir.name)\n astropy.config.paths.set_temp_config._temp_path = str(tmpdir.name)\n astropy.config.paths.set_temp_cache._temp_path = str(tmpdir.name)\n\n yield\n\n del os.environ[\"SUNPY_CONFIGDIR\"]\n tmpdir.cleanup()\n astropy.config.paths.set_temp_config._temp_path = None\n astropy.config.paths.set_temp_cache._temp_path = None\n\n\[email protected]()\ndef sunpy_cache(mocker, tmp_path):\n \"\"\"\n Provide a way to add local files to the cache. This can be useful when mocking\n remote requests.\n \"\"\"\n from types import MethodType\n\n from sunpy.data.data_manager.cache import Cache\n from sunpy.data.data_manager.downloader import ParfiveDownloader\n from sunpy.data.data_manager.storage import InMemStorage\n cache = Cache(\n ParfiveDownloader(),\n InMemStorage(),\n tmp_path,\n None\n )\n\n def add(self, url, path):\n self._storage.store({\n 'url': url,\n 'file_path': path,\n 'file_hash': 'none', # hash doesn't matter\n })\n cache.add = MethodType(add, cache)\n\n def func(mocked):\n mocker.patch(mocked, cache)\n return cache\n yield func\n\n\[email protected]()\ndef undo_config_dir_patch():\n \"\"\"\n Provide a way for certain tests to not have the config dir.\n \"\"\"\n oridir = os.environ[\"SUNPY_CONFIGDIR\"]\n del os.environ[\"SUNPY_CONFIGDIR\"]\n yield\n os.environ[\"SUNPY_CONFIGDIR\"] = oridir\n\n\[email protected](scope='session', autouse=True)\ndef hide_parfive_progress(request):\n \"\"\"\n Globally set the HIDE_PARFIVE_PROGESS to hide the parfive progress bar in tests.\n Used by the parfive helper class only.\n \"\"\"\n os.environ[\"HIDE_PARFIVE_PROGESS\"] = \"True\"\n yield\n del os.environ[\"HIDE_PARFIVE_PROGESS\"]\n\n\[email protected](scope='session', autouse=True)\ndef tmp_dl_dir(request):\n \"\"\"\n Globally set the default download directory for the test run to a tmp dir.\n \"\"\"\n with tempfile.TemporaryDirectory() as tmpdir:\n os.environ[\"SUNPY_DOWNLOADDIR\"] = tmpdir\n yield tmpdir\n del os.environ[\"SUNPY_DOWNLOADDIR\"]\n\n\[email protected]()\ndef undo_download_dir_patch():\n \"\"\"\n Provide a way for certain tests to not have tmp download dir.\n \"\"\"\n oridir = os.environ[\"SUNPY_DOWNLOADDIR\"]\n del os.environ[\"SUNPY_DOWNLOADDIR\"]\n yield\n os.environ[\"SUNPY_DOWNLOADDIR\"] = oridir\n\n\ndef pytest_runtest_setup(item):\n \"\"\"\n pytest hook to skip all tests that have the mark 'remotedata' if the\n pytest_remotedata plugin is not installed.\n \"\"\"\n if isinstance(item, pytest.Function):\n if 'remote_data' in item.keywords and not HAVE_REMOTEDATA:\n pytest.skip(\"skipping remotedata tests as pytest-remotedata is not installed\")\n"
] | [
[
"matplotlib.pyplot.subplots",
"numpy.float",
"pandas.DataFrame"
],
[
"matplotlib.use"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Wieschie/autograder | [
"c6fd87027152ef6cad455bf54679211a123384a4"
] | [
"autograder/win32_limit.py"
] | [
"import sys\nimport time\n\nimport numpy\nimport os\nimport subprocess\nimport win32api\n\n__job = None\n\n\ndef win32_limit(max_memory: int = None, max_processes: int = None):\n \"\"\"\n Sets per-process limits on Windows systems.\n\n WARNING: Currently creates a single job, so affects all spawned processes.\n \"\"\"\n # check if script has already been added to the job\n # TODO check if necessary\n global __job\n # if IsProcessInJob(GetCurrentProcess(), __job):\n if __job:\n return\n\n __job = win32api.CreateJobObject(None, \"\")\n win32api.AssignProcessToJobObject(__job, win32api.GetCurrentProcess())\n\n # Get current limit info\n limits = win32api.QueryInformationJobObject(\n None, win32api.JobObjectExtendedLimitInformation\n )\n\n # modify limits\n limit_flags = (\n 0\n | (win32api.JOB_OBJECT_LIMIT_ACTIVE_PROCESS if max_processes else 0)\n | (win32api.JOB_OBJECT_LIMIT_PROCESS_MEMORY if max_memory else 0)\n )\n limits[\"BasicLimitInformation\"][\"LimitFlags\"] = limit_flags\n limits[\"BasicLimitInformation\"][\"ActiveProcessLimit\"] = (\n max_processes + 1 if max_processes else 0\n )\n limits[\"ProcessMemoryLimit\"] = max_memory if max_memory else 0\n\n # set the limits\n win32api.SetInformationJobObject(\n __job, win32api.JobObjectExtendedLimitInformation, limits\n )\n\n\n########################################################################################\n# functions for testing below this line\n\n\ndef parent():\n print(\"parent started\", os.getpid())\n arr1 = numpy.arange(1024 * 10)\n win32_limit(max_memory=1024 * 10)\n arr2 = numpy.arange(1024 * 10)\n for i in range(3):\n subprocess.Popen(\"python win32_limit.py /child\")\n\n input(\"press any key to do stuff to children\")\n\n job_processes = win32api.QueryInformationJobObject(\n None, win32api.JobObjectBasicProcessIdList\n )\n for pid in job_processes:\n if pid == os.getpid(): # Don't kill ourselves\n continue\n print(\"Killed\", pid)\n\n\ndef child():\n print(\"child running\", os.getpid())\n time.sleep(300)\n\n\nif __name__ == \"__main__\":\n if \"/child\" in sys.argv:\n child()\n else:\n parent()\n"
] | [
[
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
openclimatefix/predict_pv_yield | [
"83f27bd392190f1771221e92bfebb879bf562f5d"
] | [
"predict_pv_yield/models/conv3d/model_sat_nwp.py"
] | [
"import logging\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom predict_pv_yield.models.base_model import BaseModel\nfrom nowcasting_dataloader.batch import BatchML\n\nlogging.basicConfig()\n_LOG = logging.getLogger(\"predict_pv_yield\")\n\n\nclass Model(BaseModel):\n\n name = \"conv3d_sat_nwp\"\n\n def __init__(\n self,\n include_pv_or_gsp_yield_history: bool = True,\n include_nwp: bool = True,\n forecast_minutes: int = 30,\n history_minutes: int = 60,\n number_of_conv3d_layers: int = 4,\n conv3d_channels: int = 32,\n image_size_pixels: int = 64,\n nwp_image_size_pixels: int = 64,\n number_sat_channels: int = 12,\n number_nwp_channels: int = 10,\n fc1_output_features: int = 128,\n fc2_output_features: int = 128,\n fc3_output_features: int = 64,\n output_variable: str = \"pv_yield\",\n embedding_dem: int = 16,\n include_pv_yield_history: int = True,\n include_future_satellite: int = True,\n ):\n \"\"\"\n 3d conv model, that takes in different data streams\n\n architecture is roughly\n 1. satellite image time series goes into many 3d convolution layers.\n 2. nwp time series goes into many 3d convolution layers.\n 3. Final convolutional layer goes to full connected layer. This is joined by other data inputs like\n - pv yield\n - time variables\n Then there ~4 fully connected layers which end up forecasting the pv yield / gsp into the future\n\n include_pv_or_gsp_yield_history: include pv yield data\n include_nwp: include nwp data\n forecast_len: the amount of minutes that should be forecasted\n history_len: the amount of historical minutes that are used\n number_of_conv3d_layers, number of convolution 3d layers that are use\n conv3d_channels, the amount of convolution 3d channels\n image_size_pixels: the input satellite image size\n nwp_image_size_pixels: the input nwp image size\n number_sat_channels: number of nwp channels\n fc1_output_features: number of fully connected outputs nodes out of the the first fully connected layer\n fc2_output_features: number of fully connected outputs nodes out of the the second fully connected layer\n fc3_output_features: number of fully connected outputs nodes out of the the third fully connected layer\n output_variable: the output variable to be predicted\n number_nwp_channels: The number of nwp channels there are\n include_future_satellite: option to include future satellite images, or not\n \"\"\"\n\n self.include_pv_or_gsp_yield_history = include_pv_or_gsp_yield_history\n self.include_nwp = include_nwp\n self.number_of_conv3d_layers = number_of_conv3d_layers\n self.number_of_nwp_features = 128\n self.fc1_output_features = fc1_output_features\n self.fc2_output_features = fc2_output_features\n self.fc3_output_features = fc3_output_features\n self.forecast_minutes = forecast_minutes\n self.history_minutes = history_minutes\n self.output_variable = output_variable\n self.number_nwp_channels = number_nwp_channels\n self.embedding_dem = embedding_dem\n self.include_pv_yield_history = include_pv_yield_history\n self.include_future_satellite = include_future_satellite\n\n super().__init__()\n\n conv3d_channels = conv3d_channels\n\n if include_future_satellite:\n cnn_output_size_time = self.forecast_len_5 + self.history_len_5 + 1\n else:\n cnn_output_size_time = self.history_len_5 + 1\n self.cnn_output_size = (\n conv3d_channels\n * ((image_size_pixels - 2 * self.number_of_conv3d_layers) ** 2)\n * cnn_output_size_time\n )\n\n self.nwp_cnn_output_size = (\n conv3d_channels\n * ((nwp_image_size_pixels - 2 * self.number_of_conv3d_layers) ** 2)\n * (self.forecast_len_60 + self.history_len_60 + 1)\n )\n\n # conv0\n self.sat_conv0 = nn.Conv3d(\n in_channels=number_sat_channels,\n out_channels=conv3d_channels,\n kernel_size=(3, 3, 3),\n padding=(1, 0, 0),\n )\n for i in range(0, self.number_of_conv3d_layers - 1):\n layer = nn.Conv3d(\n in_channels=conv3d_channels,\n out_channels=conv3d_channels,\n kernel_size=(3, 3, 3),\n padding=(1, 0, 0),\n )\n setattr(self, f\"sat_conv{i + 1}\", layer)\n\n self.fc1 = nn.Linear(\n in_features=self.cnn_output_size, out_features=self.fc1_output_features\n )\n self.fc2 = nn.Linear(\n in_features=self.fc1_output_features, out_features=self.fc2_output_features\n )\n\n # nwp\n if include_nwp:\n self.nwp_conv0 = nn.Conv3d(\n in_channels=number_nwp_channels,\n out_channels=conv3d_channels,\n kernel_size=(3, 3, 3),\n padding=(1, 0, 0),\n )\n for i in range(0, self.number_of_conv3d_layers - 1):\n layer = nn.Conv3d(\n in_channels=conv3d_channels,\n out_channels=conv3d_channels,\n kernel_size=(3, 3, 3),\n padding=(1, 0, 0),\n )\n setattr(self, f\"nwp_conv{i + 1}\", layer)\n\n self.nwp_fc1 = nn.Linear(\n in_features=self.nwp_cnn_output_size, out_features=self.fc1_output_features\n )\n self.nwp_fc2 = nn.Linear(\n in_features=self.fc1_output_features, out_features=self.number_of_nwp_features\n )\n\n if self.embedding_dem:\n self.pv_system_id_embedding = nn.Embedding(\n num_embeddings=940, embedding_dim=self.embedding_dem\n )\n\n if self.include_pv_yield_history:\n self.pv_fc1 = nn.Linear(\n in_features=self.number_of_pv_samples_per_batch * (self.history_len_5 + 1),\n out_features=128,\n )\n\n fc3_in_features = self.fc2_output_features\n if include_pv_or_gsp_yield_history:\n fc3_in_features += self.number_of_samples_per_batch * (self.history_len_30 + 1)\n if include_nwp:\n fc3_in_features += 128\n if self.embedding_dem:\n fc3_in_features += self.embedding_dem\n if self.include_pv_yield_history:\n fc3_in_features += 128\n\n self.fc3 = nn.Linear(in_features=fc3_in_features, out_features=self.fc3_output_features)\n self.fc4 = nn.Linear(in_features=self.fc3_output_features, out_features=self.forecast_len)\n # self.fc5 = nn.Linear(in_features=32, out_features=8)\n # self.fc6 = nn.Linear(in_features=8, out_features=1)\n\n def forward(self, x):\n\n if type(x) == dict:\n x = BatchML(**x)\n\n # ******************* Satellite imagery *************************\n # Shape: batch_size, channel, seq_length, height, width\n sat_data = x.satellite.data.float()\n batch_size, n_chans, seq_len, height, width = sat_data.shape\n\n if not self.include_future_satellite:\n sat_data = sat_data[:, :, : self.history_len_5 + 1]\n\n # :) Pass data through the network :)\n out = F.relu(self.sat_conv0(sat_data))\n for i in range(0, self.number_of_conv3d_layers - 1):\n layer = getattr(self, f\"sat_conv{i + 1}\")\n out = F.relu(layer(out))\n\n out = out.reshape(batch_size, self.cnn_output_size)\n\n # Fully connected layers\n out = F.relu(self.fc1(out))\n out = F.relu(self.fc2(out))\n # which has shape (batch_size, 128)\n\n # add pv yield\n if self.include_pv_or_gsp_yield_history:\n if self.output_variable == \"gsp_yield\":\n pv_yield_history = (\n x.gsp.gsp_yield[:, : self.history_len_30 + 1].nan_to_num(nan=0.0).float()\n )\n else:\n pv_yield_history = (\n x.pv.pv_yield[:, : self.history_len_30 + 1].nan_to_num(nan=0.0).float()\n )\n\n pv_yield_history = pv_yield_history.reshape(\n pv_yield_history.shape[0], pv_yield_history.shape[1] * pv_yield_history.shape[2]\n )\n # join up\n out = torch.cat((out, pv_yield_history), dim=1)\n\n # add the pv yield history. This can be used if trying to predict gsp\n if self.include_pv_yield_history:\n pv_yield_history = (\n x.pv.pv_yield[:, : self.history_len_5 + 1].nan_to_num(nan=0.0).float()\n )\n\n pv_yield_history = pv_yield_history.reshape(\n pv_yield_history.shape[0], pv_yield_history.shape[1] * pv_yield_history.shape[2]\n )\n pv_yield_history = F.relu(self.pv_fc1(pv_yield_history))\n\n out = torch.cat((out, pv_yield_history), dim=1)\n\n # *********************** NWP Data ************************************\n if self.include_nwp:\n\n # shape: batch_size, n_chans, seq_len, height, width\n nwp_data = x.nwp.data.float()\n\n out_nwp = F.relu(self.nwp_conv0(nwp_data))\n for i in range(0, self.number_of_conv3d_layers - 1):\n layer = getattr(self, f\"nwp_conv{i + 1}\")\n out_nwp = F.relu(layer(out_nwp))\n\n # fully connected layers\n out_nwp = out_nwp.reshape(batch_size, self.nwp_cnn_output_size)\n out_nwp = F.relu(self.nwp_fc1(out_nwp))\n out_nwp = F.relu(self.nwp_fc2(out_nwp))\n\n # join with other FC layer\n out = torch.cat((out, out_nwp), dim=1)\n\n # ********************** Embedding of PV system ID ********************\n if self.embedding_dem:\n if self.output_variable == \"pv_yield\":\n id = x.pv.pv_system_row_number[0 : self.batch_size, 0]\n else:\n id = x.gsp.gsp_id[0 : self.batch_size, 0]\n\n id = id.type(torch.IntTensor)\n id = id.to(out.device)\n id_embedding = self.pv_system_id_embedding(id)\n out = torch.cat((out, id_embedding), dim=1)\n\n # Fully connected layers.\n out = F.relu(self.fc3(out))\n out = self.fc4(out)\n\n out = out.reshape(batch_size, self.forecast_len)\n\n return out\n"
] | [
[
"torch.nn.Linear",
"torch.nn.Conv3d",
"torch.nn.Embedding",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
daenjannis/red-pitaya-notes | [
"745c829f1e678ef6b8fbb86ec29da8609b1c56b5"
] | [
"projects/scanner/client/scanner.py"
] | [
"#!/usr/bin/env python\n\n# Control program for the Red Pitaya Scanning system\n# Copyright (C) 2015 Pavel Demin\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport sys\nimport struct\n\nimport numpy as np\n\nimport matplotlib\nmatplotlib.use('Qt5Agg')\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\nfrom matplotlib.figure import Figure\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\n\nfrom PyQt5.uic import loadUiType\nfrom PyQt5.QtCore import QRegExp, QTimer, Qt\nfrom PyQt5.QtGui import QRegExpValidator\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QMenu, QVBoxLayout, QSizePolicy, QMessageBox, QWidget\nfrom PyQt5.QtNetwork import QAbstractSocket, QTcpSocket\n\nUi_Scanner, QMainWindow = loadUiType('scanner.ui')\n\nsys.path.append(r'C:\\Users\\DJannis\\Documents\\RedPitaya\\red-pitaya-notes\\projects\\scanner\\client')\nimport selfpatterncreator as spc\n\nclass Scanner(QMainWindow, Ui_Scanner):\n def __init__(self):\n super(Scanner, self).__init__()\n self.setupUi(self)\n # IP address validator\n rx = QRegExp('^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$')\n self.addrValue.setValidator(QRegExpValidator(rx, self.addrValue))\n # state variable\n self.idle = True\n # number of samples to show on the plot\n self.xsize = self.xsizeValue.value()\n self.ysize = self.xsizeValue.value()\n self.size = self.xsize * self.ysize\n self.x = np.arange(self.xsize) #X array for plotting\n self.y = np.arange(self.ysize) #Y array for plotting\n self.freq = 125.0\n \n figure = Figure()\n figure.set_facecolor('none')\n self.axes = figure.add_subplot(111)\n self.canvas = FigureCanvas(figure)\n self.plotLayout.addWidget(self.canvas)\n self.change_scan_size()\n self.plotLayout.addWidget(self.toolbar)\n\n \n # create TCP socket\n self.socket = QTcpSocket(self)\n self.socket.connected.connect(self.connected)\n self.socket.readyRead.connect(self.read_data)\n self.socket.error.connect(self.display_error)\n # connect signals from buttons and boxes\n self.connectButton.clicked.connect(self.start)\n self.scanButton.clicked.connect(self.scan)\n self.periodValue.valueChanged.connect(self.set_period)\n self.trgtimeValue.valueChanged.connect(self.set_trgtime)\n self.trginvCheck.stateChanged.connect(self.set_trginv)\n self.shdelayValue.valueChanged.connect(self.set_shdelay)\n self.shtimeValue.valueChanged.connect(self.set_shtime)\n self.shinvCheck.stateChanged.connect(self.set_shinv)\n self.acqdelayValue.valueChanged.connect(self.set_acqdelay)\n self.samplesValue.valueChanged.connect(self.set_samples)\n self.pulsesValue.valueChanged.connect(self.set_pulses)\n self.xsizeValue.valueChanged.connect(self.set_xsize)\n self.ysizeValue.valueChanged.connect(self.set_ysize)\n \n # create timers\n self.startTimer = QTimer(self)\n self.startTimer.timeout.connect(self.timeout)\n self.meshTimer = QTimer(self)\n self.meshTimer.timeout.connect(self.update_mesh)\n # set default values\n self.periodValue.setValue(200.0)\n\n def start(self):\n if self.idle:\n self.connectButton.setEnabled(False)\n self.socket.connectToHost(self.addrValue.text(), 1001)\n self.startTimer.start(5000)\n else:\n self.stop()\n\n def stop(self):\n self.idle = True\n self.socket.abort()\n self.offset = 0\n self.connectButton.setText('Connect')\n self.connectButton.setEnabled(True)\n self.scanButton.setEnabled(True)\n\n def timeout(self):\n self.display_error('timeout')\n\n def connected(self):\n self.startTimer.stop()\n self.idle = False\n self.set_period(self.periodValue.value())\n self.set_trgtime(self.trgtimeValue.value())\n self.set_trginv(self.trginvCheck.checkState())\n self.set_shdelay(self.shdelayValue.value())\n self.set_shtime(self.shtimeValue.value())\n self.set_shinv(self.shinvCheck.checkState())\n self.set_acqdelay(self.acqdelayValue.value())\n self.set_samples(self.samplesValue.value())\n self.set_pulses(self.pulsesValue.value())\n # start pulse generators\n self.socket.write(struct.pack('<I', 11<<28))\n self.connectButton.setText('Disconnect')\n self.connectButton.setEnabled(True)\n self.scanButton.setEnabled(True)\n\n def read_data(self):\n size = self.socket.bytesAvailable()\n if self.offset + size < 8 * self.size:\n self.buffer[self.offset:self.offset + size] = self.socket.read(size)\n self.offset += size\n# plt.figure()\n# plt.plot(np.frombuffer(self.buffer, np.int32)[0::2])\n# plt.show()\n else:\n self.meshTimer.stop()\n self.buffer[self.offset:8 * self.size] = self.socket.read(8 * self.size - self.offset)\n self.offset = 0\n self.update_mesh()\n plt.figure()\n plt.plot(self.data[0::2])\n plt.show()\n self.scanButton.setEnabled(True)\n\n def display_error(self, socketError):\n self.startTimer.stop()\n if socketError == 'timeout':\n QMessageBox.information(self, 'Scanner', 'Error: connection timeout.')\n else:\n QMessageBox.information(self, 'Scanner', 'Error: %s.' % self.socket.errorString())\n self.stop()\n\n def set_period(self, value):\n # set maximum delays and times to half period\n maximum = int(value * 5.0 + 0.5) / 10.0\n self.trgtimeValue.setMaximum(maximum)\n self.shdelayValue.setMaximum(maximum)\n self.shtimeValue.setMaximum(maximum)\n self.acqdelayValue.setMaximum(maximum)\n # set maximum number of samples per pulse\n maximum = int(value * 500.0 + 0.5) / 10.0\n if maximum > 256.0: maximum = 256.0\n self.samplesValue.setMaximum(maximum)\n shdelay = value * 0.25\n samples = value * 0.5\n if self.idle: return\n self.socket.write(struct.pack('<I', 0<<28 | int(value * self.freq)))\n\n def set_trgtime(self, value):\n if self.idle: return\n self.socket.write(struct.pack('<I', 1<<28 | int(value * self.freq)))\n\n def set_trginv(self, checked):\n if self.idle: return\n self.socket.write(struct.pack('<I', 2<<28 | int(checked == Qt.Checked)))\n\n def set_shdelay(self, value):\n if self.idle: return\n self.socket.write(struct.pack('<I', 3<<28 | int(value * self.freq)))\n\n def set_shtime(self, value):\n if self.idle: return\n self.socket.write(struct.pack('<I', 4<<28 | int(value * self.freq)))\n\n def set_shinv(self, checked):\n if self.idle: return\n self.socket.write(struct.pack('<I', 5<<28 | int(checked == Qt.Checked)))\n\n def set_acqdelay(self, value):\n if self.idle: return\n self.socket.write(struct.pack('<I', 6<<28 | int(value * self.freq)))\n\n def set_samples(self, value):\n if self.idle: return\n self.socket.write(struct.pack('<I', 7<<28 | int(value)))\n\n def set_pulses(self, value):\n if self.idle: return\n self.socket.write(struct.pack('<I', 8<<28 | int(value)))\n \n def set_xsize(self, value):\n self.xsize = value\n self.size = self.xsize * self.ysize\n self.y = np.arange(self.xsize) \n self.change_scan_size()\n \n \n def set_ysize(self, value):\n self.ysize = value\n self.size = self.xsize * self.ysize\n self.y = np.arange(self.ysize)\n self.change_scan_size()\n\n def change_scan_size(self):\n self.x = np.arange(self.xsize) #X array for plotting\n self.y = np.arange(self.ysize) #Y array for plotting\n \n # buffer and offset for the incoming samples\n self.buffer = bytearray(8 * self.xsize * self.ysize)\n self.offset = 0\n self.data = np.frombuffer(self.buffer, np.int32)\n # create figure\n self.axes.axis((0.0, self.ysize, 0.0, self.xsize))\n x, y = np.meshgrid(np.linspace(0.0, self.ysize, self.ysize+1), np.linspace(0.0, self.xsize, self.xsize+1))\n z = x / self.xsize + y * 0.0\n self.mesh = self.axes.pcolormesh(x, y, z, cmap = cm.gray,vmin = 0, vmax = 1)\n # create navigation toolbar\n self.toolbar = NavigationToolbar(self.canvas, self.plotWidget, False)\n # remove subplots action\n actions = self.toolbar.actions()\n if int(matplotlib.__version__[0]) < 2:\n self.toolbar.removeAction(actions[7])\n else:\n self.toolbar.removeAction(actions[6])\n self.canvas.draw()\n\n \n\n\n# def set_coordinates(self):\n# if self.idle: return\n# self.socket.write(struct.pack('<I', 9<<28))\n# for i in range(self.xsize):\n# for j in range(self.ysize):\n# value = (i + 0 << 18) | (j << 4)\n# self.socket.write(struct.pack('<I', 10<<28 | int(value)))\n\n\n def set_coordinates(self):\n if self.idle: return\n self.socket.write(struct.pack('<I', 9<<28))\n for i in range(self.xco.size):\n value = (self.xco_prop[i] + 0 << 18) | (self.yco_prop[i] << 4)\n self.socket.write(struct.pack('<I', 10<<28 | int(value)))\n\n\n def scan(self):\n if self.idle: return\n print('start scanning')\n self.scanButton.setEnabled(False)\n scan_name = self.comboBoxScan.currentText()\n xco, yco = spc.LoadScanPattern(scan_name, self.xsize, self.ysize)\n #Change the coordinate such that we scan the full fov\n self.propx = int(np.ceil(512/(self.xsize)))\n self.propy = int(np.ceil(512/(self.ysize)))\n self.xco = xco\n self.yco = yco\n self.xco_prop = self.propx*self.xco\n self.yco_prop = self.propy*self.yco\n self.data[:] = np.zeros(2 * self.xsize * self.ysize, np.int32)\n self.update_mesh()\n self.set_coordinates()\n self.socket.write(struct.pack('<I', 12<<28))\n self.meshTimer.start(500)\n\n def update_mesh(self):\n result = self.data[0::2]/(self.samplesValue.value() * self.pulsesValue.value() * 8192.0)\n result = result - np.min(result)\n result = result.reshape(self.xsize, self.ysize)\n result = result[self.x[self.xco], self.y[self.yco]]\n self.mesh.set_array(result.reshape(self.xsize * self.ysize))\n self.mesh.set_clim(vmin = result.min(), vmax = result.max())\n self.canvas.draw()\n \n\napp = QApplication(sys.argv)\nwindow = Scanner()\nwindow.show()\nsys.exit(app.exec_())\n"
] | [
[
"numpy.linspace",
"matplotlib.figure.Figure",
"numpy.min",
"matplotlib.use",
"numpy.arange",
"matplotlib.pyplot.plot",
"numpy.frombuffer",
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"numpy.ceil",
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mfem/PyMFEM | [
"b7b7c3d3de1082eac1015e3a313cf513db06fd7b",
"fa654447ac6819c5aa0341397b91a299f4ce5492",
"b7b7c3d3de1082eac1015e3a313cf513db06fd7b"
] | [
"mfem/_par/mesh.py",
"examples/ex28p.py",
"mfem/_ser/vector.py"
] | [
"# This file was automatically generated by SWIG (http://www.swig.org).\n# Version 4.0.2\n#\n# Do not make changes to this file unless you know what you are doing--modify\n# the SWIG interface file instead.\n\nfrom sys import version_info as _swig_python_version_info\nif _swig_python_version_info < (2, 7, 0):\n raise RuntimeError(\"Python 2.7 or later required\")\n\n# Import the low-level C/C++ module\nif __package__ or \".\" in __name__:\n from . import _mesh\nelse:\n import _mesh\n\ntry:\n import builtins as __builtin__\nexcept ImportError:\n import __builtin__\n\n_swig_new_instance_method = _mesh.SWIG_PyInstanceMethod_New\n_swig_new_static_method = _mesh.SWIG_PyStaticMethod_New\n\ndef _swig_repr(self):\n try:\n strthis = \"proxy of \" + self.this.__repr__()\n except __builtin__.Exception:\n strthis = \"\"\n return \"<%s.%s; %s >\" % (self.__class__.__module__, self.__class__.__name__, strthis,)\n\n\ndef _swig_setattr_nondynamic_instance_variable(set):\n def set_instance_attr(self, name, value):\n if name == \"thisown\":\n self.this.own(value)\n elif name == \"this\":\n set(self, name, value)\n elif hasattr(self, name) and isinstance(getattr(type(self), name), property):\n set(self, name, value)\n else:\n raise AttributeError(\"You cannot add instance attributes to %s\" % self)\n return set_instance_attr\n\n\ndef _swig_setattr_nondynamic_class_variable(set):\n def set_class_attr(cls, name, value):\n if hasattr(cls, name) and not isinstance(getattr(cls, name), property):\n set(cls, name, value)\n else:\n raise AttributeError(\"You cannot add class attributes to %s\" % cls)\n return set_class_attr\n\n\ndef _swig_add_metaclass(metaclass):\n \"\"\"Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass\"\"\"\n def wrapper(cls):\n return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())\n return wrapper\n\n\nclass _SwigNonDynamicMeta(type):\n \"\"\"Meta class to enforce nondynamic attributes (no new attributes) for a class\"\"\"\n __setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)\n\n\nimport weakref\n\nimport mfem._par.matrix\nimport mfem._par.vector\nimport mfem._par.array\nimport mfem._par.mem_manager\nimport mfem._par.operators\nimport mfem._par.sort_pairs\nimport mfem._par.ncmesh\nimport mfem._par.vtk\nimport mfem._par.element\nimport mfem._par.globals\nimport mfem._par.densemat\nimport mfem._par.geom\nimport mfem._par.intrules\nimport mfem._par.table\nimport mfem._par.hash\nimport mfem._par.vertex\nimport mfem._par.gridfunc\nimport mfem._par.coefficient\nimport mfem._par.sparsemat\nimport mfem._par.eltrans\nimport mfem._par.fe\nimport mfem._par.fespace\nimport mfem._par.fe_coll\nimport mfem._par.lininteg\nimport mfem._par.handle\nimport mfem._par.hypre\nimport mfem._par.restriction\nimport mfem._par.bilininteg\nimport mfem._par.linearform\nimport mfem._par.nonlininteg\nFaceType_Interior = _mesh.FaceType_Interior\n\nFaceType_Boundary = _mesh.FaceType_Boundary\n\nclass Mesh(object):\n r\"\"\"Proxy of C++ mfem::Mesh class.\"\"\"\n\n thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc=\"The membership flag\")\n __repr__ = _swig_repr\n NONE = _mesh.Mesh_NONE\n \n REFINE = _mesh.Mesh_REFINE\n \n DEREFINE = _mesh.Mesh_DEREFINE\n \n REBALANCE = _mesh.Mesh_REBALANCE\n \n attributes = property(_mesh.Mesh_attributes_get, doc=r\"\"\"attributes : mfem::Array<(int)>\"\"\")\n bdr_attributes = property(_mesh.Mesh_bdr_attributes_get, doc=r\"\"\"bdr_attributes : mfem::Array<(int)>\"\"\")\n NURBSext = property(_mesh.Mesh_NURBSext_get, _mesh.Mesh_NURBSext_set, doc=r\"\"\"NURBSext : p.mfem::NURBSExtension\"\"\")\n ncmesh = property(_mesh.Mesh_ncmesh_get, _mesh.Mesh_ncmesh_set, doc=r\"\"\"ncmesh : p.mfem::NCMesh\"\"\")\n geom_factors = property(_mesh.Mesh_geom_factors_get, _mesh.Mesh_geom_factors_set, doc=r\"\"\"geom_factors : mfem::Array<(p.mfem::GeometricFactors)>\"\"\")\n face_geom_factors = property(_mesh.Mesh_face_geom_factors_get, _mesh.Mesh_face_geom_factors_set, doc=r\"\"\"face_geom_factors : mfem::Array<(p.mfem::FaceGeometricFactors)>\"\"\")\n remove_unused_vertices = property(_mesh.Mesh_remove_unused_vertices_get, _mesh.Mesh_remove_unused_vertices_set, doc=r\"\"\"remove_unused_vertices : bool\"\"\")\n\n @staticmethod\n def LoadFromFile(filename, generate_edges=0, refine=1, fix_orientation=True):\n r\"\"\"LoadFromFile(char const * filename, int generate_edges=0, int refine=1, bool fix_orientation=True) -> Mesh\"\"\"\n return _mesh.Mesh_LoadFromFile(filename, generate_edges, refine, fix_orientation)\n LoadFromFile = _swig_new_static_method(_mesh.Mesh_LoadFromFile)\n\n @staticmethod\n def MakeCartesian1D(n, sx=1.0):\n r\"\"\"MakeCartesian1D(int n, double sx=1.0) -> Mesh\"\"\"\n return _mesh.Mesh_MakeCartesian1D(n, sx)\n MakeCartesian1D = _swig_new_static_method(_mesh.Mesh_MakeCartesian1D)\n\n @staticmethod\n def MakeCartesian2D(nx, ny, type, generate_edges=False, sx=1.0, sy=1.0, sfc_ordering=True):\n r\"\"\"MakeCartesian2D(int nx, int ny, mfem::Element::Type type, bool generate_edges=False, double sx=1.0, double sy=1.0, bool sfc_ordering=True) -> Mesh\"\"\"\n return _mesh.Mesh_MakeCartesian2D(nx, ny, type, generate_edges, sx, sy, sfc_ordering)\n MakeCartesian2D = _swig_new_static_method(_mesh.Mesh_MakeCartesian2D)\n\n @staticmethod\n def MakeCartesian3D(nx, ny, nz, type, sx=1.0, sy=1.0, sz=1.0, sfc_ordering=True):\n r\"\"\"MakeCartesian3D(int nx, int ny, int nz, mfem::Element::Type type, double sx=1.0, double sy=1.0, double sz=1.0, bool sfc_ordering=True) -> Mesh\"\"\"\n return _mesh.Mesh_MakeCartesian3D(nx, ny, nz, type, sx, sy, sz, sfc_ordering)\n MakeCartesian3D = _swig_new_static_method(_mesh.Mesh_MakeCartesian3D)\n\n @staticmethod\n def MakeRefined(*args):\n r\"\"\"\n MakeRefined(Mesh orig_mesh, int ref_factor, int ref_type) -> Mesh\n MakeRefined(Mesh orig_mesh, intArray ref_factors, int ref_type) -> Mesh\n \"\"\"\n return _mesh.Mesh_MakeRefined(*args)\n MakeRefined = _swig_new_static_method(_mesh.Mesh_MakeRefined)\n\n @staticmethod\n def MakeSimplicial(orig_mesh):\n r\"\"\"MakeSimplicial(Mesh orig_mesh) -> Mesh\"\"\"\n return _mesh.Mesh_MakeSimplicial(orig_mesh)\n MakeSimplicial = _swig_new_static_method(_mesh.Mesh_MakeSimplicial)\n\n @staticmethod\n def MakePeriodic(orig_mesh, v2v):\n r\"\"\"MakePeriodic(Mesh orig_mesh, std::vector< int > const & v2v) -> Mesh\"\"\"\n return _mesh.Mesh_MakePeriodic(orig_mesh, v2v)\n MakePeriodic = _swig_new_static_method(_mesh.Mesh_MakePeriodic)\n\n def CreatePeriodicVertexMapping(self, translations, tol=1e-8):\n r\"\"\"CreatePeriodicVertexMapping(Mesh self, std::vector< mfem::Vector > const & translations, double tol=1e-8) -> std::vector< int >\"\"\"\n return _mesh.Mesh_CreatePeriodicVertexMapping(self, translations, tol)\n CreatePeriodicVertexMapping = _swig_new_instance_method(_mesh.Mesh_CreatePeriodicVertexMapping)\n\n def NewElement(self, geom):\n r\"\"\"NewElement(Mesh self, int geom) -> Element\"\"\"\n return _mesh.Mesh_NewElement(self, geom)\n NewElement = _swig_new_instance_method(_mesh.Mesh_NewElement)\n\n def AddVertex(self, *args):\n r\"\"\"\n AddVertex(Mesh self, double x, double y=0.0, double z=0.0) -> int\n AddVertex(Mesh self, double const * coords) -> int\n \"\"\"\n return _mesh.Mesh_AddVertex(self, *args)\n AddVertex = _swig_new_instance_method(_mesh.Mesh_AddVertex)\n\n def AddVertexParents(self, i, p1, p2):\n r\"\"\"AddVertexParents(Mesh self, int i, int p1, int p2)\"\"\"\n return _mesh.Mesh_AddVertexParents(self, i, p1, p2)\n AddVertexParents = _swig_new_instance_method(_mesh.Mesh_AddVertexParents)\n\n def AddSegment(self, *args):\n r\"\"\"\n AddSegment(Mesh self, int v1, int v2, int attr=1) -> int\n AddSegment(Mesh self, int const * vi, int attr=1) -> int\n \"\"\"\n return _mesh.Mesh_AddSegment(self, *args)\n AddSegment = _swig_new_instance_method(_mesh.Mesh_AddSegment)\n\n def AddTriangle(self, *args):\n r\"\"\"\n AddTriangle(Mesh self, int v1, int v2, int v3, int attr=1) -> int\n AddTriangle(Mesh self, int const * vi, int attr=1) -> int\n \"\"\"\n return _mesh.Mesh_AddTriangle(self, *args)\n AddTriangle = _swig_new_instance_method(_mesh.Mesh_AddTriangle)\n\n def AddTri(self, vi, attr=1):\n r\"\"\"AddTri(Mesh self, int const * vi, int attr=1) -> int\"\"\"\n return _mesh.Mesh_AddTri(self, vi, attr)\n AddTri = _swig_new_instance_method(_mesh.Mesh_AddTri)\n\n def AddQuad(self, *args):\n r\"\"\"\n AddQuad(Mesh self, int v1, int v2, int v3, int v4, int attr=1) -> int\n AddQuad(Mesh self, int const * vi, int attr=1) -> int\n \"\"\"\n return _mesh.Mesh_AddQuad(self, *args)\n AddQuad = _swig_new_instance_method(_mesh.Mesh_AddQuad)\n\n def AddTet(self, *args):\n r\"\"\"\n AddTet(Mesh self, int v1, int v2, int v3, int v4, int attr=1) -> int\n AddTet(Mesh self, int const * vi, int attr=1) -> int\n \"\"\"\n return _mesh.Mesh_AddTet(self, *args)\n AddTet = _swig_new_instance_method(_mesh.Mesh_AddTet)\n\n def AddWedge(self, *args):\n r\"\"\"\n AddWedge(Mesh self, int v1, int v2, int v3, int v4, int v5, int v6, int attr=1) -> int\n AddWedge(Mesh self, int const * vi, int attr=1) -> int\n \"\"\"\n return _mesh.Mesh_AddWedge(self, *args)\n AddWedge = _swig_new_instance_method(_mesh.Mesh_AddWedge)\n\n def AddHex(self, *args):\n r\"\"\"\n AddHex(Mesh self, int v1, int v2, int v3, int v4, int v5, int v6, int v7, int v8, int attr=1) -> int\n AddHex(Mesh self, int const * vi, int attr=1) -> int\n \"\"\"\n return _mesh.Mesh_AddHex(self, *args)\n AddHex = _swig_new_instance_method(_mesh.Mesh_AddHex)\n\n def AddHexAsTets(self, vi, attr=1):\n r\"\"\"AddHexAsTets(Mesh self, int const * vi, int attr=1)\"\"\"\n return _mesh.Mesh_AddHexAsTets(self, vi, attr)\n AddHexAsTets = _swig_new_instance_method(_mesh.Mesh_AddHexAsTets)\n\n def AddHexAsWedges(self, vi, attr=1):\n r\"\"\"AddHexAsWedges(Mesh self, int const * vi, int attr=1)\"\"\"\n return _mesh.Mesh_AddHexAsWedges(self, vi, attr)\n AddHexAsWedges = _swig_new_instance_method(_mesh.Mesh_AddHexAsWedges)\n\n def AddElement(self, elem):\n r\"\"\"AddElement(Mesh self, Element elem) -> int\"\"\"\n return _mesh.Mesh_AddElement(self, elem)\n AddElement = _swig_new_instance_method(_mesh.Mesh_AddElement)\n\n def AddBdrElement(self, elem):\n r\"\"\"AddBdrElement(Mesh self, Element elem) -> int\"\"\"\n return _mesh.Mesh_AddBdrElement(self, elem)\n AddBdrElement = _swig_new_instance_method(_mesh.Mesh_AddBdrElement)\n\n def AddBdrSegment(self, *args):\n r\"\"\"\n AddBdrSegment(Mesh self, int v1, int v2, int attr=1) -> int\n AddBdrSegment(Mesh self, int const * vi, int attr=1) -> int\n \"\"\"\n return _mesh.Mesh_AddBdrSegment(self, *args)\n AddBdrSegment = _swig_new_instance_method(_mesh.Mesh_AddBdrSegment)\n\n def AddBdrTriangle(self, *args):\n r\"\"\"\n AddBdrTriangle(Mesh self, int v1, int v2, int v3, int attr=1) -> int\n AddBdrTriangle(Mesh self, int const * vi, int attr=1) -> int\n \"\"\"\n return _mesh.Mesh_AddBdrTriangle(self, *args)\n AddBdrTriangle = _swig_new_instance_method(_mesh.Mesh_AddBdrTriangle)\n\n def AddBdrQuad(self, *args):\n r\"\"\"\n AddBdrQuad(Mesh self, int v1, int v2, int v3, int v4, int attr=1) -> int\n AddBdrQuad(Mesh self, int const * vi, int attr=1) -> int\n \"\"\"\n return _mesh.Mesh_AddBdrQuad(self, *args)\n AddBdrQuad = _swig_new_instance_method(_mesh.Mesh_AddBdrQuad)\n\n def AddBdrQuadAsTriangles(self, vi, attr=1):\n r\"\"\"AddBdrQuadAsTriangles(Mesh self, int const * vi, int attr=1)\"\"\"\n return _mesh.Mesh_AddBdrQuadAsTriangles(self, vi, attr)\n AddBdrQuadAsTriangles = _swig_new_instance_method(_mesh.Mesh_AddBdrQuadAsTriangles)\n\n def AddBdrPoint(self, v, attr=1):\n r\"\"\"AddBdrPoint(Mesh self, int v, int attr=1) -> int\"\"\"\n return _mesh.Mesh_AddBdrPoint(self, v, attr)\n AddBdrPoint = _swig_new_instance_method(_mesh.Mesh_AddBdrPoint)\n\n def GenerateBoundaryElements(self):\n r\"\"\"GenerateBoundaryElements(Mesh self)\"\"\"\n return _mesh.Mesh_GenerateBoundaryElements(self)\n GenerateBoundaryElements = _swig_new_instance_method(_mesh.Mesh_GenerateBoundaryElements)\n\n def FinalizeTriMesh(self, generate_edges=0, refine=0, fix_orientation=True):\n r\"\"\"FinalizeTriMesh(Mesh self, int generate_edges=0, int refine=0, bool fix_orientation=True)\"\"\"\n return _mesh.Mesh_FinalizeTriMesh(self, generate_edges, refine, fix_orientation)\n FinalizeTriMesh = _swig_new_instance_method(_mesh.Mesh_FinalizeTriMesh)\n\n def FinalizeQuadMesh(self, generate_edges=0, refine=0, fix_orientation=True):\n r\"\"\"FinalizeQuadMesh(Mesh self, int generate_edges=0, int refine=0, bool fix_orientation=True)\"\"\"\n return _mesh.Mesh_FinalizeQuadMesh(self, generate_edges, refine, fix_orientation)\n FinalizeQuadMesh = _swig_new_instance_method(_mesh.Mesh_FinalizeQuadMesh)\n\n def FinalizeTetMesh(self, generate_edges=0, refine=0, fix_orientation=True):\n r\"\"\"FinalizeTetMesh(Mesh self, int generate_edges=0, int refine=0, bool fix_orientation=True)\"\"\"\n return _mesh.Mesh_FinalizeTetMesh(self, generate_edges, refine, fix_orientation)\n FinalizeTetMesh = _swig_new_instance_method(_mesh.Mesh_FinalizeTetMesh)\n\n def FinalizeWedgeMesh(self, generate_edges=0, refine=0, fix_orientation=True):\n r\"\"\"FinalizeWedgeMesh(Mesh self, int generate_edges=0, int refine=0, bool fix_orientation=True)\"\"\"\n return _mesh.Mesh_FinalizeWedgeMesh(self, generate_edges, refine, fix_orientation)\n FinalizeWedgeMesh = _swig_new_instance_method(_mesh.Mesh_FinalizeWedgeMesh)\n\n def FinalizeHexMesh(self, generate_edges=0, refine=0, fix_orientation=True):\n r\"\"\"FinalizeHexMesh(Mesh self, int generate_edges=0, int refine=0, bool fix_orientation=True)\"\"\"\n return _mesh.Mesh_FinalizeHexMesh(self, generate_edges, refine, fix_orientation)\n FinalizeHexMesh = _swig_new_instance_method(_mesh.Mesh_FinalizeHexMesh)\n\n def FinalizeMesh(self, refine=0, fix_orientation=True):\n r\"\"\"FinalizeMesh(Mesh self, int refine=0, bool fix_orientation=True)\"\"\"\n return _mesh.Mesh_FinalizeMesh(self, refine, fix_orientation)\n FinalizeMesh = _swig_new_instance_method(_mesh.Mesh_FinalizeMesh)\n\n def FinalizeTopology(self, generate_bdr=True):\n r\"\"\"FinalizeTopology(Mesh self, bool generate_bdr=True)\"\"\"\n return _mesh.Mesh_FinalizeTopology(self, generate_bdr)\n FinalizeTopology = _swig_new_instance_method(_mesh.Mesh_FinalizeTopology)\n\n def Finalize(self, refine=False, fix_orientation=False):\n r\"\"\"Finalize(Mesh self, bool refine=False, bool fix_orientation=False)\"\"\"\n return _mesh.Mesh_Finalize(self, refine, fix_orientation)\n Finalize = _swig_new_instance_method(_mesh.Mesh_Finalize)\n\n def SetAttributes(self):\n r\"\"\"SetAttributes(Mesh self)\"\"\"\n return _mesh.Mesh_SetAttributes(self)\n SetAttributes = _swig_new_instance_method(_mesh.Mesh_SetAttributes)\n\n def GetGeckoElementOrdering(self, ordering, iterations=4, window=4, period=2, seed=0, verbose=False, time_limit=0):\n r\"\"\"GetGeckoElementOrdering(Mesh self, intArray ordering, int iterations=4, int window=4, int period=2, int seed=0, bool verbose=False, double time_limit=0) -> double\"\"\"\n return _mesh.Mesh_GetGeckoElementOrdering(self, ordering, iterations, window, period, seed, verbose, time_limit)\n GetGeckoElementOrdering = _swig_new_instance_method(_mesh.Mesh_GetGeckoElementOrdering)\n\n def GetHilbertElementOrdering(self, ordering):\n r\"\"\"GetHilbertElementOrdering(Mesh self, intArray ordering)\"\"\"\n return _mesh.Mesh_GetHilbertElementOrdering(self, ordering)\n GetHilbertElementOrdering = _swig_new_instance_method(_mesh.Mesh_GetHilbertElementOrdering)\n\n def ReorderElements(self, ordering, reorder_vertices=True):\n r\"\"\"ReorderElements(Mesh self, intArray ordering, bool reorder_vertices=True)\"\"\"\n return _mesh.Mesh_ReorderElements(self, ordering, reorder_vertices)\n ReorderElements = _swig_new_instance_method(_mesh.Mesh_ReorderElements)\n\n def Load(self, input, generate_edges=0, refine=1, fix_orientation=True):\n r\"\"\"Load(Mesh self, std::istream & input, int generate_edges=0, int refine=1, bool fix_orientation=True)\"\"\"\n return _mesh.Mesh_Load(self, input, generate_edges, refine, fix_orientation)\n Load = _swig_new_instance_method(_mesh.Mesh_Load)\n\n def Clear(self):\n r\"\"\"Clear(Mesh self)\"\"\"\n return _mesh.Mesh_Clear(self)\n Clear = _swig_new_instance_method(_mesh.Mesh_Clear)\n\n def MeshGenerator(self):\n r\"\"\"MeshGenerator(Mesh self) -> int\"\"\"\n return _mesh.Mesh_MeshGenerator(self)\n MeshGenerator = _swig_new_instance_method(_mesh.Mesh_MeshGenerator)\n\n def GetNV(self):\n r\"\"\"GetNV(Mesh self) -> int\"\"\"\n return _mesh.Mesh_GetNV(self)\n GetNV = _swig_new_instance_method(_mesh.Mesh_GetNV)\n\n def GetNE(self):\n r\"\"\"GetNE(Mesh self) -> int\"\"\"\n return _mesh.Mesh_GetNE(self)\n GetNE = _swig_new_instance_method(_mesh.Mesh_GetNE)\n\n def GetNBE(self):\n r\"\"\"GetNBE(Mesh self) -> int\"\"\"\n return _mesh.Mesh_GetNBE(self)\n GetNBE = _swig_new_instance_method(_mesh.Mesh_GetNBE)\n\n def GetNEdges(self):\n r\"\"\"GetNEdges(Mesh self) -> int\"\"\"\n return _mesh.Mesh_GetNEdges(self)\n GetNEdges = _swig_new_instance_method(_mesh.Mesh_GetNEdges)\n\n def GetNFaces(self):\n r\"\"\"GetNFaces(Mesh self) -> int\"\"\"\n return _mesh.Mesh_GetNFaces(self)\n GetNFaces = _swig_new_instance_method(_mesh.Mesh_GetNFaces)\n\n def GetNumFaces(self):\n r\"\"\"GetNumFaces(Mesh self) -> int\"\"\"\n return _mesh.Mesh_GetNumFaces(self)\n GetNumFaces = _swig_new_instance_method(_mesh.Mesh_GetNumFaces)\n\n def GetNFbyType(self, type):\n r\"\"\"GetNFbyType(Mesh self, mfem::FaceType type) -> int\"\"\"\n return _mesh.Mesh_GetNFbyType(self, type)\n GetNFbyType = _swig_new_instance_method(_mesh.Mesh_GetNFbyType)\n\n def ReduceInt(self, value):\n r\"\"\"ReduceInt(Mesh self, int value) -> long\"\"\"\n return _mesh.Mesh_ReduceInt(self, value)\n ReduceInt = _swig_new_instance_method(_mesh.Mesh_ReduceInt)\n\n def GetGlobalNE(self):\n r\"\"\"GetGlobalNE(Mesh self) -> long\"\"\"\n return _mesh.Mesh_GetGlobalNE(self)\n GetGlobalNE = _swig_new_instance_method(_mesh.Mesh_GetGlobalNE)\n\n def GetGeometricFactors(self, *args, **kwargs):\n r\"\"\"GetGeometricFactors(Mesh self, IntegrationRule ir, int const flags, mfem::MemoryType d_mt=MemoryType::DEFAULT) -> GeometricFactors\"\"\"\n return _mesh.Mesh_GetGeometricFactors(self, *args, **kwargs)\n GetGeometricFactors = _swig_new_instance_method(_mesh.Mesh_GetGeometricFactors)\n\n def GetFaceGeometricFactors(self, ir, flags, type):\n r\"\"\"GetFaceGeometricFactors(Mesh self, IntegrationRule ir, int const flags, mfem::FaceType type) -> FaceGeometricFactors\"\"\"\n return _mesh.Mesh_GetFaceGeometricFactors(self, ir, flags, type)\n GetFaceGeometricFactors = _swig_new_instance_method(_mesh.Mesh_GetFaceGeometricFactors)\n\n def DeleteGeometricFactors(self):\n r\"\"\"DeleteGeometricFactors(Mesh self)\"\"\"\n return _mesh.Mesh_DeleteGeometricFactors(self)\n DeleteGeometricFactors = _swig_new_instance_method(_mesh.Mesh_DeleteGeometricFactors)\n\n def EulerNumber(self):\n r\"\"\"EulerNumber(Mesh self) -> int\"\"\"\n return _mesh.Mesh_EulerNumber(self)\n EulerNumber = _swig_new_instance_method(_mesh.Mesh_EulerNumber)\n\n def EulerNumber2D(self):\n r\"\"\"EulerNumber2D(Mesh self) -> int\"\"\"\n return _mesh.Mesh_EulerNumber2D(self)\n EulerNumber2D = _swig_new_instance_method(_mesh.Mesh_EulerNumber2D)\n\n def Dimension(self):\n r\"\"\"Dimension(Mesh self) -> int\"\"\"\n return _mesh.Mesh_Dimension(self)\n Dimension = _swig_new_instance_method(_mesh.Mesh_Dimension)\n\n def SpaceDimension(self):\n r\"\"\"SpaceDimension(Mesh self) -> int\"\"\"\n return _mesh.Mesh_SpaceDimension(self)\n SpaceDimension = _swig_new_instance_method(_mesh.Mesh_SpaceDimension)\n\n def GetVertex(self, *args):\n r\"\"\"\n GetVertex(Mesh self, int i) -> double const\n GetVertex(Mesh self, int i) -> double *\n \"\"\"\n return _mesh.Mesh_GetVertex(self, *args)\n GetVertex = _swig_new_instance_method(_mesh.Mesh_GetVertex)\n\n def GetElementData(self, geom, elem_vtx, attr):\n r\"\"\"GetElementData(Mesh self, int geom, intArray elem_vtx, intArray attr)\"\"\"\n return _mesh.Mesh_GetElementData(self, geom, elem_vtx, attr)\n GetElementData = _swig_new_instance_method(_mesh.Mesh_GetElementData)\n\n def GetBdrElementData(self, geom, bdr_elem_vtx, bdr_attr):\n r\"\"\"GetBdrElementData(Mesh self, int geom, intArray bdr_elem_vtx, intArray bdr_attr)\"\"\"\n return _mesh.Mesh_GetBdrElementData(self, geom, bdr_elem_vtx, bdr_attr)\n GetBdrElementData = _swig_new_instance_method(_mesh.Mesh_GetBdrElementData)\n\n def ChangeVertexDataOwnership(self, vertices, len_vertices, zerocopy=False):\n r\"\"\"ChangeVertexDataOwnership(Mesh self, double * vertices, int len_vertices, bool zerocopy=False)\"\"\"\n return _mesh.Mesh_ChangeVertexDataOwnership(self, vertices, len_vertices, zerocopy)\n ChangeVertexDataOwnership = _swig_new_instance_method(_mesh.Mesh_ChangeVertexDataOwnership)\n\n def GetElementsArray(self):\n r\"\"\"GetElementsArray(Mesh self) -> mfem::Element const *const *\"\"\"\n return _mesh.Mesh_GetElementsArray(self)\n GetElementsArray = _swig_new_instance_method(_mesh.Mesh_GetElementsArray)\n\n def GetElement(self, *args):\n r\"\"\"\n GetElement(Mesh self, int i) -> Element\n GetElement(Mesh self, int i) -> Element\n \"\"\"\n return _mesh.Mesh_GetElement(self, *args)\n GetElement = _swig_new_instance_method(_mesh.Mesh_GetElement)\n\n def GetBdrElement(self, *args):\n r\"\"\"\n GetBdrElement(Mesh self, int i) -> Element\n GetBdrElement(Mesh self, int i) -> Element\n \"\"\"\n return _mesh.Mesh_GetBdrElement(self, *args)\n GetBdrElement = _swig_new_instance_method(_mesh.Mesh_GetBdrElement)\n\n def GetFace(self, i):\n r\"\"\"GetFace(Mesh self, int i) -> Element\"\"\"\n return _mesh.Mesh_GetFace(self, i)\n GetFace = _swig_new_instance_method(_mesh.Mesh_GetFace)\n\n def GetFaceGeometry(self, i):\n r\"\"\"GetFaceGeometry(Mesh self, int i) -> mfem::Geometry::Type\"\"\"\n return _mesh.Mesh_GetFaceGeometry(self, i)\n GetFaceGeometry = _swig_new_instance_method(_mesh.Mesh_GetFaceGeometry)\n\n def GetElementGeometry(self, i):\n r\"\"\"GetElementGeometry(Mesh self, int i) -> mfem::Geometry::Type\"\"\"\n return _mesh.Mesh_GetElementGeometry(self, i)\n GetElementGeometry = _swig_new_instance_method(_mesh.Mesh_GetElementGeometry)\n\n def GetBdrElementGeometry(self, i):\n r\"\"\"GetBdrElementGeometry(Mesh self, int i) -> mfem::Geometry::Type\"\"\"\n return _mesh.Mesh_GetBdrElementGeometry(self, i)\n GetBdrElementGeometry = _swig_new_instance_method(_mesh.Mesh_GetBdrElementGeometry)\n\n def GetFaceBaseGeometry(self, i):\n r\"\"\"GetFaceBaseGeometry(Mesh self, int i) -> mfem::Geometry::Type\"\"\"\n return _mesh.Mesh_GetFaceBaseGeometry(self, i)\n GetFaceBaseGeometry = _swig_new_instance_method(_mesh.Mesh_GetFaceBaseGeometry)\n\n def GetElementBaseGeometry(self, i):\n r\"\"\"GetElementBaseGeometry(Mesh self, int i) -> mfem::Geometry::Type\"\"\"\n return _mesh.Mesh_GetElementBaseGeometry(self, i)\n GetElementBaseGeometry = _swig_new_instance_method(_mesh.Mesh_GetElementBaseGeometry)\n\n def GetBdrElementBaseGeometry(self, i):\n r\"\"\"GetBdrElementBaseGeometry(Mesh self, int i) -> mfem::Geometry::Type\"\"\"\n return _mesh.Mesh_GetBdrElementBaseGeometry(self, i)\n GetBdrElementBaseGeometry = _swig_new_instance_method(_mesh.Mesh_GetBdrElementBaseGeometry)\n\n def HasGeometry(self, geom):\n r\"\"\"HasGeometry(Mesh self, mfem::Geometry::Type geom) -> bool\"\"\"\n return _mesh.Mesh_HasGeometry(self, geom)\n HasGeometry = _swig_new_instance_method(_mesh.Mesh_HasGeometry)\n\n def GetNumGeometries(self, dim):\n r\"\"\"GetNumGeometries(Mesh self, int dim) -> int\"\"\"\n return _mesh.Mesh_GetNumGeometries(self, dim)\n GetNumGeometries = _swig_new_instance_method(_mesh.Mesh_GetNumGeometries)\n\n def GetGeometries(self, dim, el_geoms):\n r\"\"\"GetGeometries(Mesh self, int dim, GeometryTypeArray el_geoms)\"\"\"\n return _mesh.Mesh_GetGeometries(self, dim, el_geoms)\n GetGeometries = _swig_new_instance_method(_mesh.Mesh_GetGeometries)\n\n def GetElementVertices(self, i):\n from .array import intArray\n ivert = intArray()\n _mesh.Mesh_GetElementVertices(self, i, ivert)\n return ivert.ToList()\n\n\n\n def GetBdrElementVertices(self, i):\n from .array import intArray\n ivert = intArray()\n _mesh.Mesh_GetBdrElementVertices(self, i, ivert)\n return ivert.ToList()\n\n\n\n def GetElementEdges(self, i):\n from .array import intArray\n ia = intArray()\n ib = intArray() \n _mesh.Mesh_GetElementEdges(self, i, ia, ib)\n return ia.ToList(), ib.ToList() \n\n\n\n def GetBdrElementEdges(self, i):\n from .array import intArray\n ia = intArray()\n ib = intArray() \n _mesh.Mesh_GetBdrElementEdges(self, i, ia, ib)\n return ia.ToList(), ib.ToList()\n\n\n\n def GetFaceEdges(self, i):\n from .array import intArray\n ia = intArray()\n ib = intArray() \n _mesh.Mesh_GetFaceEdges(self, i, ia, ib)\n return ia.ToList(), ib.ToList()\n\n\n\n def GetFaceVertices(self, i):\n from .array import intArray\n ia = intArray()\n _mesh.Mesh_GetFaceVertices(self, i, ia)\n return ia.ToList()\n\n\n\n def GetEdgeVertices(self, i):\n from .array import intArray\n ia = intArray()\n _mesh.Mesh_GetEdgeVertices(self, i, ia)\n return ia.ToList()\n\n\n\n def GetFaceEdgeTable(self):\n r\"\"\"GetFaceEdgeTable(Mesh self) -> Table\"\"\"\n return _mesh.Mesh_GetFaceEdgeTable(self)\n GetFaceEdgeTable = _swig_new_instance_method(_mesh.Mesh_GetFaceEdgeTable)\n\n def GetEdgeVertexTable(self):\n r\"\"\"GetEdgeVertexTable(Mesh self) -> Table\"\"\"\n return _mesh.Mesh_GetEdgeVertexTable(self)\n GetEdgeVertexTable = _swig_new_instance_method(_mesh.Mesh_GetEdgeVertexTable)\n\n def GetElementFaces(self, i):\n from .array import intArray\n ia = intArray()\n ib = intArray() \n _mesh.Mesh_GetElementFaces(self, i, ia, ib)\n return ia.ToList(), ib.ToList()\n\n\n\n def GetBdrElementEdgeIndex(self, i):\n r\"\"\"GetBdrElementEdgeIndex(Mesh self, int i) -> int\"\"\"\n return _mesh.Mesh_GetBdrElementEdgeIndex(self, i)\n GetBdrElementEdgeIndex = _swig_new_instance_method(_mesh.Mesh_GetBdrElementEdgeIndex)\n\n def GetBdrElementAdjacentElement(self, bdr_el):\n from mfem.par import intp\n el = intp()\n info = intp() \n _mesh.Mesh_GetBdrElementAdjacentElement(self, bdr_el, el, info)\n return el.value(), info.value()\n\n\n\n def GetElementType(self, i):\n r\"\"\"GetElementType(Mesh self, int i) -> mfem::Element::Type\"\"\"\n return _mesh.Mesh_GetElementType(self, i)\n GetElementType = _swig_new_instance_method(_mesh.Mesh_GetElementType)\n\n def GetBdrElementType(self, i):\n r\"\"\"GetBdrElementType(Mesh self, int i) -> mfem::Element::Type\"\"\"\n return _mesh.Mesh_GetBdrElementType(self, i)\n GetBdrElementType = _swig_new_instance_method(_mesh.Mesh_GetBdrElementType)\n\n def GetPointMatrix(self, i, pointmat):\n r\"\"\"GetPointMatrix(Mesh self, int i, DenseMatrix pointmat)\"\"\"\n return _mesh.Mesh_GetPointMatrix(self, i, pointmat)\n GetPointMatrix = _swig_new_instance_method(_mesh.Mesh_GetPointMatrix)\n\n def GetBdrPointMatrix(self, i, pointmat):\n r\"\"\"GetBdrPointMatrix(Mesh self, int i, DenseMatrix pointmat)\"\"\"\n return _mesh.Mesh_GetBdrPointMatrix(self, i, pointmat)\n GetBdrPointMatrix = _swig_new_instance_method(_mesh.Mesh_GetBdrPointMatrix)\n\n @staticmethod\n def GetTransformationFEforElementType(arg1):\n r\"\"\"GetTransformationFEforElementType(mfem::Element::Type arg1) -> FiniteElement\"\"\"\n return _mesh.Mesh_GetTransformationFEforElementType(arg1)\n GetTransformationFEforElementType = _swig_new_static_method(_mesh.Mesh_GetTransformationFEforElementType)\n\n def GetElementTransformation(self, i):\n from mfem.par import IsoparametricTransformation\n Tr = IsoparametricTransformation()\n _mesh.Mesh_GetElementTransformation(self, i, Tr)\n return Tr\n\n\n\n def GetBdrElementTransformation(self, i):\n from mfem.par import IsoparametricTransformation\n Tr = IsoparametricTransformation()\n _mesh.Mesh_GetBdrElementTransformation(self, i, Tr)\n return Tr\n\n\n\n def GetLocalFaceTransformation(self, face_type, elem_type, Transf, info):\n r\"\"\"GetLocalFaceTransformation(Mesh self, int face_type, int elem_type, IsoparametricTransformation Transf, int info)\"\"\"\n return _mesh.Mesh_GetLocalFaceTransformation(self, face_type, elem_type, Transf, info)\n GetLocalFaceTransformation = _swig_new_instance_method(_mesh.Mesh_GetLocalFaceTransformation)\n\n def GetFaceTransformation(self, i):\n from mfem.par import IsoparametricTransformation\n Tr = IsoparametricTransformation()\n _mesh.Mesh_GetFaceTransformation(self, i, Tr)\n return Tr\n\n\n\n def GetEdgeTransformation(self, i):\n from mfem.par import IsoparametricTransformation\n Tr = IsoparametricTransformation()\n _mesh.Mesh_GetEdgeTransformation(self, i, Tr)\n return Tr\n\n\n\n def GetFaceElementTransformations(self, FaceNo, mask=31):\n r\"\"\"GetFaceElementTransformations(Mesh self, int FaceNo, int mask=31) -> FaceElementTransformations\"\"\"\n return _mesh.Mesh_GetFaceElementTransformations(self, FaceNo, mask)\n GetFaceElementTransformations = _swig_new_instance_method(_mesh.Mesh_GetFaceElementTransformations)\n\n def GetInteriorFaceTransformations(self, FaceNo):\n r\"\"\"GetInteriorFaceTransformations(Mesh self, int FaceNo) -> FaceElementTransformations\"\"\"\n return _mesh.Mesh_GetInteriorFaceTransformations(self, FaceNo)\n GetInteriorFaceTransformations = _swig_new_instance_method(_mesh.Mesh_GetInteriorFaceTransformations)\n\n def GetBdrFaceTransformations(self, BdrElemNo):\n r\"\"\"GetBdrFaceTransformations(Mesh self, int BdrElemNo) -> FaceElementTransformations\"\"\"\n return _mesh.Mesh_GetBdrFaceTransformations(self, BdrElemNo)\n GetBdrFaceTransformations = _swig_new_instance_method(_mesh.Mesh_GetBdrFaceTransformations)\n\n def GetBdrFace(self, BdrElemNo):\n r\"\"\"GetBdrFace(Mesh self, int BdrElemNo) -> int\"\"\"\n return _mesh.Mesh_GetBdrFace(self, BdrElemNo)\n GetBdrFace = _swig_new_instance_method(_mesh.Mesh_GetBdrFace)\n\n def FaceIsInterior(self, FaceNo):\n r\"\"\"FaceIsInterior(Mesh self, int FaceNo) -> bool\"\"\"\n return _mesh.Mesh_FaceIsInterior(self, FaceNo)\n FaceIsInterior = _swig_new_instance_method(_mesh.Mesh_FaceIsInterior)\n\n def GetFaceElements(self, Face):\n from mfem.par import intp \n Elem1 = intp()\n Elem2 = intp() \n val = _mesh.Mesh_GetFaceElements(self, Face, Elem1, Elem2)\n return Elem1.value(), Elem2.value()\n\n\n\n def GetFaceInfos(self, i):\n from mfem.ser import intp\n Elem1 = intp()\n Elem2 = intp() \n\n _mesh.Mesh_GetFaceInfos(self, i, Elem1, Elem2)\n return Elem1.value(), Elem2.value()\n\n\n\n def GetFaceGeometryType(self, Face):\n r\"\"\"GetFaceGeometryType(Mesh self, int Face) -> mfem::Geometry::Type\"\"\"\n return _mesh.Mesh_GetFaceGeometryType(self, Face)\n GetFaceGeometryType = _swig_new_instance_method(_mesh.Mesh_GetFaceGeometryType)\n\n def GetFaceElementType(self, Face):\n r\"\"\"GetFaceElementType(Mesh self, int Face) -> mfem::Element::Type\"\"\"\n return _mesh.Mesh_GetFaceElementType(self, Face)\n GetFaceElementType = _swig_new_instance_method(_mesh.Mesh_GetFaceElementType)\n\n def CheckElementOrientation(self, fix_it=True):\n r\"\"\"CheckElementOrientation(Mesh self, bool fix_it=True) -> int\"\"\"\n return _mesh.Mesh_CheckElementOrientation(self, fix_it)\n CheckElementOrientation = _swig_new_instance_method(_mesh.Mesh_CheckElementOrientation)\n\n def CheckBdrElementOrientation(self, fix_it=True):\n r\"\"\"CheckBdrElementOrientation(Mesh self, bool fix_it=True) -> int\"\"\"\n return _mesh.Mesh_CheckBdrElementOrientation(self, fix_it)\n CheckBdrElementOrientation = _swig_new_instance_method(_mesh.Mesh_CheckBdrElementOrientation)\n\n def GetAttribute(self, i):\n r\"\"\"GetAttribute(Mesh self, int i) -> int\"\"\"\n return _mesh.Mesh_GetAttribute(self, i)\n GetAttribute = _swig_new_instance_method(_mesh.Mesh_GetAttribute)\n\n def SetAttribute(self, i, attr):\n r\"\"\"SetAttribute(Mesh self, int i, int attr)\"\"\"\n return _mesh.Mesh_SetAttribute(self, i, attr)\n SetAttribute = _swig_new_instance_method(_mesh.Mesh_SetAttribute)\n\n def GetBdrAttribute(self, i):\n r\"\"\"GetBdrAttribute(Mesh self, int i) -> int\"\"\"\n return _mesh.Mesh_GetBdrAttribute(self, i)\n GetBdrAttribute = _swig_new_instance_method(_mesh.Mesh_GetBdrAttribute)\n\n def SetBdrAttribute(self, i, attr):\n r\"\"\"SetBdrAttribute(Mesh self, int i, int attr)\"\"\"\n return _mesh.Mesh_SetBdrAttribute(self, i, attr)\n SetBdrAttribute = _swig_new_instance_method(_mesh.Mesh_SetBdrAttribute)\n\n def ElementToElementTable(self):\n r\"\"\"ElementToElementTable(Mesh self) -> Table\"\"\"\n return _mesh.Mesh_ElementToElementTable(self)\n ElementToElementTable = _swig_new_instance_method(_mesh.Mesh_ElementToElementTable)\n\n def ElementToFaceTable(self):\n r\"\"\"ElementToFaceTable(Mesh self) -> Table\"\"\"\n return _mesh.Mesh_ElementToFaceTable(self)\n ElementToFaceTable = _swig_new_instance_method(_mesh.Mesh_ElementToFaceTable)\n\n def ElementToEdgeTable(self):\n r\"\"\"ElementToEdgeTable(Mesh self) -> Table\"\"\"\n return _mesh.Mesh_ElementToEdgeTable(self)\n ElementToEdgeTable = _swig_new_instance_method(_mesh.Mesh_ElementToEdgeTable)\n\n def GetVertexToElementTable(self):\n r\"\"\"GetVertexToElementTable(Mesh self) -> Table\"\"\"\n return _mesh.Mesh_GetVertexToElementTable(self)\n GetVertexToElementTable = _swig_new_instance_method(_mesh.Mesh_GetVertexToElementTable)\n\n def GetFaceToElementTable(self):\n r\"\"\"GetFaceToElementTable(Mesh self) -> Table\"\"\"\n return _mesh.Mesh_GetFaceToElementTable(self)\n GetFaceToElementTable = _swig_new_instance_method(_mesh.Mesh_GetFaceToElementTable)\n\n def ReorientTetMesh(self):\n r\"\"\"ReorientTetMesh(Mesh self)\"\"\"\n return _mesh.Mesh_ReorientTetMesh(self)\n ReorientTetMesh = _swig_new_instance_method(_mesh.Mesh_ReorientTetMesh)\n\n def CartesianPartitioning(self, nxyz, return_list=False):\n import mfem.par as mfem\n import warnings\n try:\n nxyz = list(nxyz)\n d = mfem.intArray(nxyz)\n dd = d.GetData()\n except BaseException:\n dd = nxyz\n warnings.warn(\"CartesianPartitioning argument should be iterable\",\n \t\t DeprecationWarning,)\n\n r = _mesh.Mesh_CartesianPartitioning(self, dd)\n\n if not return_list:\n return r\n else:\t \n result = mfem.intArray()\n result.MakeRef(r, self.GetNE())\n result.MakeDataOwner()\n return result.ToList()\n\n\n\n def GeneratePartitioning(self, nparts, part_method=1):\n r\"\"\"GeneratePartitioning(Mesh self, int nparts, int part_method=1) -> int *\"\"\"\n return _mesh.Mesh_GeneratePartitioning(self, nparts, part_method)\n GeneratePartitioning = _swig_new_instance_method(_mesh.Mesh_GeneratePartitioning)\n\n def CheckPartitioning(self, partitioning_):\n r\"\"\"CheckPartitioning(Mesh self, int * partitioning_)\"\"\"\n return _mesh.Mesh_CheckPartitioning(self, partitioning_)\n CheckPartitioning = _swig_new_instance_method(_mesh.Mesh_CheckPartitioning)\n\n def CheckDisplacements(self, displacements, tmax):\n r\"\"\"CheckDisplacements(Mesh self, Vector displacements, double & tmax)\"\"\"\n return _mesh.Mesh_CheckDisplacements(self, displacements, tmax)\n CheckDisplacements = _swig_new_instance_method(_mesh.Mesh_CheckDisplacements)\n\n def MoveVertices(self, displacements):\n r\"\"\"MoveVertices(Mesh self, Vector displacements)\"\"\"\n return _mesh.Mesh_MoveVertices(self, displacements)\n MoveVertices = _swig_new_instance_method(_mesh.Mesh_MoveVertices)\n\n def GetVertices(self, vert_coord):\n r\"\"\"GetVertices(Mesh self, Vector vert_coord)\"\"\"\n return _mesh.Mesh_GetVertices(self, vert_coord)\n GetVertices = _swig_new_instance_method(_mesh.Mesh_GetVertices)\n\n def SetVertices(self, vert_coord):\n r\"\"\"SetVertices(Mesh self, Vector vert_coord)\"\"\"\n return _mesh.Mesh_SetVertices(self, vert_coord)\n SetVertices = _swig_new_instance_method(_mesh.Mesh_SetVertices)\n\n def GetNode(self, i, coord):\n r\"\"\"GetNode(Mesh self, int i, double * coord)\"\"\"\n return _mesh.Mesh_GetNode(self, i, coord)\n GetNode = _swig_new_instance_method(_mesh.Mesh_GetNode)\n\n def SetNode(self, i, coord):\n r\"\"\"SetNode(Mesh self, int i, double const * coord)\"\"\"\n return _mesh.Mesh_SetNode(self, i, coord)\n SetNode = _swig_new_instance_method(_mesh.Mesh_SetNode)\n\n def MoveNodes(self, displacements):\n r\"\"\"MoveNodes(Mesh self, Vector displacements)\"\"\"\n return _mesh.Mesh_MoveNodes(self, displacements)\n MoveNodes = _swig_new_instance_method(_mesh.Mesh_MoveNodes)\n\n def SetNodes(self, node_coord):\n r\"\"\"SetNodes(Mesh self, Vector node_coord)\"\"\"\n return _mesh.Mesh_SetNodes(self, node_coord)\n SetNodes = _swig_new_instance_method(_mesh.Mesh_SetNodes)\n\n def OwnsNodes(self):\n r\"\"\"OwnsNodes(Mesh self) -> bool\"\"\"\n return _mesh.Mesh_OwnsNodes(self)\n OwnsNodes = _swig_new_instance_method(_mesh.Mesh_OwnsNodes)\n\n def SetNodesOwner(self, nodes_owner):\n r\"\"\"SetNodesOwner(Mesh self, bool nodes_owner)\"\"\"\n return _mesh.Mesh_SetNodesOwner(self, nodes_owner)\n SetNodesOwner = _swig_new_instance_method(_mesh.Mesh_SetNodesOwner)\n\n def NewNodes(self, nodes, make_owner=False):\n r\"\"\"NewNodes(Mesh self, GridFunction nodes, bool make_owner=False)\"\"\"\n return _mesh.Mesh_NewNodes(self, nodes, make_owner)\n NewNodes = _swig_new_instance_method(_mesh.Mesh_NewNodes)\n\n def SwapNodes(self, nodes, own_nodes_):\n r\"\"\"SwapNodes(Mesh self, mfem::GridFunction *& nodes, int & own_nodes_)\"\"\"\n return _mesh.Mesh_SwapNodes(self, nodes, own_nodes_)\n SwapNodes = _swig_new_instance_method(_mesh.Mesh_SwapNodes)\n\n def GetNodes(self, *args):\n r\"\"\"\n GetNodes(Mesh self, Vector node_coord)\n GetNodes(Mesh self) -> GridFunction\n GetNodes(Mesh self) -> GridFunction\n GetNodes(Mesh self, GridFunction nodes)\n \"\"\"\n return _mesh.Mesh_GetNodes(self, *args)\n GetNodes = _swig_new_instance_method(_mesh.Mesh_GetNodes)\n\n def SetNodalFESpace(self, nfes):\n r\"\"\"SetNodalFESpace(Mesh self, FiniteElementSpace nfes)\"\"\"\n return _mesh.Mesh_SetNodalFESpace(self, nfes)\n SetNodalFESpace = _swig_new_instance_method(_mesh.Mesh_SetNodalFESpace)\n\n def SetNodalGridFunction(self, nodes, make_owner=False):\n r\"\"\"SetNodalGridFunction(Mesh self, GridFunction nodes, bool make_owner=False)\"\"\"\n return _mesh.Mesh_SetNodalGridFunction(self, nodes, make_owner)\n SetNodalGridFunction = _swig_new_instance_method(_mesh.Mesh_SetNodalGridFunction)\n\n def GetNodalFESpace(self):\n r\"\"\"GetNodalFESpace(Mesh self) -> FiniteElementSpace\"\"\"\n return _mesh.Mesh_GetNodalFESpace(self)\n GetNodalFESpace = _swig_new_instance_method(_mesh.Mesh_GetNodalFESpace)\n\n def EnsureNodes(self):\n r\"\"\"EnsureNodes(Mesh self)\"\"\"\n return _mesh.Mesh_EnsureNodes(self)\n EnsureNodes = _swig_new_instance_method(_mesh.Mesh_EnsureNodes)\n\n def SetCurvature(self, order, discont=False, space_dim=-1, ordering=1):\n r\"\"\"SetCurvature(Mesh self, int order, bool discont=False, int space_dim=-1, int ordering=1)\"\"\"\n return _mesh.Mesh_SetCurvature(self, order, discont, space_dim, ordering)\n SetCurvature = _swig_new_instance_method(_mesh.Mesh_SetCurvature)\n\n def UniformRefinement(self, ref_algo=0):\n r\"\"\"UniformRefinement(Mesh self, int ref_algo=0)\"\"\"\n return _mesh.Mesh_UniformRefinement(self, ref_algo)\n UniformRefinement = _swig_new_instance_method(_mesh.Mesh_UniformRefinement)\n\n def GeneralRefinement(self, *args):\n r\"\"\"\n GeneralRefinement(Mesh self, RefinementArray refinements, int nonconforming=-1, int nc_limit=0)\n GeneralRefinement(Mesh self, intArray el_to_refine, int nonconforming=-1, int nc_limit=0)\n \"\"\"\n return _mesh.Mesh_GeneralRefinement(self, *args)\n GeneralRefinement = _swig_new_instance_method(_mesh.Mesh_GeneralRefinement)\n\n def RandomRefinement(self, prob, aniso=False, nonconforming=-1, nc_limit=0):\n r\"\"\"RandomRefinement(Mesh self, double prob, bool aniso=False, int nonconforming=-1, int nc_limit=0)\"\"\"\n return _mesh.Mesh_RandomRefinement(self, prob, aniso, nonconforming, nc_limit)\n RandomRefinement = _swig_new_instance_method(_mesh.Mesh_RandomRefinement)\n\n def RefineAtVertex(self, vert, eps=0.0, nonconforming=-1):\n r\"\"\"RefineAtVertex(Mesh self, Vertex vert, double eps=0.0, int nonconforming=-1)\"\"\"\n return _mesh.Mesh_RefineAtVertex(self, vert, eps, nonconforming)\n RefineAtVertex = _swig_new_instance_method(_mesh.Mesh_RefineAtVertex)\n\n def RefineByError(self, *args):\n r\"\"\"\n RefineByError(Mesh self, doubleArray elem_error, double threshold, int nonconforming=-1, int nc_limit=0) -> bool\n RefineByError(Mesh self, Vector elem_error, double threshold, int nonconforming=-1, int nc_limit=0) -> bool\n \"\"\"\n return _mesh.Mesh_RefineByError(self, *args)\n RefineByError = _swig_new_instance_method(_mesh.Mesh_RefineByError)\n\n def DerefineByError(self, *args):\n r\"\"\"\n DerefineByError(Mesh self, doubleArray elem_error, double threshold, int nc_limit=0, int op=1) -> bool\n DerefineByError(Mesh self, Vector elem_error, double threshold, int nc_limit=0, int op=1) -> bool\n \"\"\"\n return _mesh.Mesh_DerefineByError(self, *args)\n DerefineByError = _swig_new_instance_method(_mesh.Mesh_DerefineByError)\n\n def KnotInsert(self, *args):\n r\"\"\"\n KnotInsert(Mesh self, mfem::Array< mfem::KnotVector * > & kv)\n KnotInsert(Mesh self, mfem::Array< mfem::Vector * > & kv)\n \"\"\"\n return _mesh.Mesh_KnotInsert(self, *args)\n KnotInsert = _swig_new_instance_method(_mesh.Mesh_KnotInsert)\n\n def DegreeElevate(self, rel_degree, degree=16):\n r\"\"\"DegreeElevate(Mesh self, int rel_degree, int degree=16)\"\"\"\n return _mesh.Mesh_DegreeElevate(self, rel_degree, degree)\n DegreeElevate = _swig_new_instance_method(_mesh.Mesh_DegreeElevate)\n\n def EnsureNCMesh(self, simplices_nonconforming=False):\n r\"\"\"EnsureNCMesh(Mesh self, bool simplices_nonconforming=False)\"\"\"\n return _mesh.Mesh_EnsureNCMesh(self, simplices_nonconforming)\n EnsureNCMesh = _swig_new_instance_method(_mesh.Mesh_EnsureNCMesh)\n\n def Conforming(self):\n r\"\"\"Conforming(Mesh self) -> bool\"\"\"\n return _mesh.Mesh_Conforming(self)\n Conforming = _swig_new_instance_method(_mesh.Mesh_Conforming)\n\n def Nonconforming(self):\n r\"\"\"Nonconforming(Mesh self) -> bool\"\"\"\n return _mesh.Mesh_Nonconforming(self)\n Nonconforming = _swig_new_instance_method(_mesh.Mesh_Nonconforming)\n\n def GetRefinementTransforms(self):\n r\"\"\"GetRefinementTransforms(Mesh self) -> CoarseFineTransformations\"\"\"\n return _mesh.Mesh_GetRefinementTransforms(self)\n GetRefinementTransforms = _swig_new_instance_method(_mesh.Mesh_GetRefinementTransforms)\n\n def GetLastOperation(self):\n r\"\"\"GetLastOperation(Mesh self) -> mfem::Mesh::Operation\"\"\"\n return _mesh.Mesh_GetLastOperation(self)\n GetLastOperation = _swig_new_instance_method(_mesh.Mesh_GetLastOperation)\n\n def GetSequence(self):\n r\"\"\"GetSequence(Mesh self) -> long\"\"\"\n return _mesh.Mesh_GetSequence(self)\n GetSequence = _swig_new_instance_method(_mesh.Mesh_GetSequence)\n\n def Save(self, fname, precision=16):\n r\"\"\"Save(Mesh self, char const * fname, int precision=16)\"\"\"\n return _mesh.Mesh_Save(self, fname, precision)\n Save = _swig_new_instance_method(_mesh.Mesh_Save)\n\n def PrintVTU(self, *args):\n r\"\"\"\n PrintVTU(Mesh self, std::ostream & out, int ref=1, mfem::VTKFormat format=ASCII, bool high_order_output=False, int compression_level=0, bool bdr_elements=False)\n PrintVTU(Mesh self, std::string fname, mfem::VTKFormat format=ASCII, bool high_order_output=False, int compression_level=0, bool bdr=False)\n \"\"\"\n return _mesh.Mesh_PrintVTU(self, *args)\n PrintVTU = _swig_new_instance_method(_mesh.Mesh_PrintVTU)\n\n def PrintBdrVTU(self, *args, **kwargs):\n r\"\"\"PrintBdrVTU(Mesh self, std::string fname, mfem::VTKFormat format=ASCII, bool high_order_output=False, int compression_level=0)\"\"\"\n return _mesh.Mesh_PrintBdrVTU(self, *args, **kwargs)\n PrintBdrVTU = _swig_new_instance_method(_mesh.Mesh_PrintBdrVTU)\n\n def GetElementColoring(self, colors, el0=0):\n r\"\"\"GetElementColoring(Mesh self, intArray colors, int el0=0)\"\"\"\n return _mesh.Mesh_GetElementColoring(self, colors, el0)\n GetElementColoring = _swig_new_instance_method(_mesh.Mesh_GetElementColoring)\n\n def PrintWithPartitioning(self, partitioning, out, elem_attr=0):\n r\"\"\"PrintWithPartitioning(Mesh self, int * partitioning, std::ostream & out, int elem_attr=0)\"\"\"\n return _mesh.Mesh_PrintWithPartitioning(self, partitioning, out, elem_attr)\n PrintWithPartitioning = _swig_new_instance_method(_mesh.Mesh_PrintWithPartitioning)\n\n def PrintElementsWithPartitioning(self, partitioning, out, interior_faces=0):\n r\"\"\"PrintElementsWithPartitioning(Mesh self, int * partitioning, std::ostream & out, int interior_faces=0)\"\"\"\n return _mesh.Mesh_PrintElementsWithPartitioning(self, partitioning, out, interior_faces)\n PrintElementsWithPartitioning = _swig_new_instance_method(_mesh.Mesh_PrintElementsWithPartitioning)\n\n def PrintSurfaces(self, Aface_face, out):\n r\"\"\"PrintSurfaces(Mesh self, Table Aface_face, std::ostream & out)\"\"\"\n return _mesh.Mesh_PrintSurfaces(self, Aface_face, out)\n PrintSurfaces = _swig_new_instance_method(_mesh.Mesh_PrintSurfaces)\n\n def ScaleSubdomains(self, sf):\n r\"\"\"ScaleSubdomains(Mesh self, double sf)\"\"\"\n return _mesh.Mesh_ScaleSubdomains(self, sf)\n ScaleSubdomains = _swig_new_instance_method(_mesh.Mesh_ScaleSubdomains)\n\n def ScaleElements(self, sf):\n r\"\"\"ScaleElements(Mesh self, double sf)\"\"\"\n return _mesh.Mesh_ScaleElements(self, sf)\n ScaleElements = _swig_new_instance_method(_mesh.Mesh_ScaleElements)\n\n def Transform(self, *args):\n r\"\"\"\n Transform(Mesh self, void (*)(mfem::Vector const &,mfem::Vector &) f)\n Transform(Mesh self, VectorCoefficient deformation)\n \"\"\"\n return _mesh.Mesh_Transform(self, *args)\n Transform = _swig_new_instance_method(_mesh.Mesh_Transform)\n\n def RemoveUnusedVertices(self):\n r\"\"\"RemoveUnusedVertices(Mesh self)\"\"\"\n return _mesh.Mesh_RemoveUnusedVertices(self)\n RemoveUnusedVertices = _swig_new_instance_method(_mesh.Mesh_RemoveUnusedVertices)\n\n def RemoveInternalBoundaries(self):\n r\"\"\"RemoveInternalBoundaries(Mesh self)\"\"\"\n return _mesh.Mesh_RemoveInternalBoundaries(self)\n RemoveInternalBoundaries = _swig_new_instance_method(_mesh.Mesh_RemoveInternalBoundaries)\n\n def GetElementSize(self, *args):\n r\"\"\"\n GetElementSize(Mesh self, int i, int type=0) -> double\n GetElementSize(Mesh self, int i, Vector dir) -> double\n \"\"\"\n return _mesh.Mesh_GetElementSize(self, *args)\n GetElementSize = _swig_new_instance_method(_mesh.Mesh_GetElementSize)\n\n def GetElementVolume(self, i):\n r\"\"\"GetElementVolume(Mesh self, int i) -> double\"\"\"\n return _mesh.Mesh_GetElementVolume(self, i)\n GetElementVolume = _swig_new_instance_method(_mesh.Mesh_GetElementVolume)\n\n def GetElementCenter(self, i, center):\n r\"\"\"GetElementCenter(Mesh self, int i, Vector center)\"\"\"\n return _mesh.Mesh_GetElementCenter(self, i, center)\n GetElementCenter = _swig_new_instance_method(_mesh.Mesh_GetElementCenter)\n\n def GetBoundingBox(self, ref = 2):\n from .vector import Vector\n min = Vector()\n max = Vector() \n _mesh.Mesh_GetBoundingBox(self, min, max, ref) \n return min.GetDataArray().copy(), max.GetDataArray().copy()\n\n\n\n def GetCharacteristics(self, h_min, h_max, kappa_min, kappa_max, Vh=None, Vk=None):\n r\"\"\"GetCharacteristics(Mesh self, double & h_min, double & h_max, double & kappa_min, double & kappa_max, Vector Vh=None, Vector Vk=None)\"\"\"\n return _mesh.Mesh_GetCharacteristics(self, h_min, h_max, kappa_min, kappa_max, Vh, Vk)\n GetCharacteristics = _swig_new_instance_method(_mesh.Mesh_GetCharacteristics)\n\n @staticmethod\n def PrintElementsByGeometry(dim, num_elems_by_geom, out):\n r\"\"\"PrintElementsByGeometry(int dim, intArray num_elems_by_geom, std::ostream & out)\"\"\"\n return _mesh.Mesh_PrintElementsByGeometry(dim, num_elems_by_geom, out)\n PrintElementsByGeometry = _swig_new_static_method(_mesh.Mesh_PrintElementsByGeometry)\n\n def PrintCharacteristics(self, *args, **kwargs):\n r\"\"\"PrintCharacteristics(Mesh self, Vector Vh=None, Vector Vk=None, std::ostream & out=out)\"\"\"\n return _mesh.Mesh_PrintCharacteristics(self, *args, **kwargs)\n PrintCharacteristics = _swig_new_instance_method(_mesh.Mesh_PrintCharacteristics)\n\n def FindPoints(self, pp, warn=True, inv_trans=None): \n r\"\"\"count, element_id, integration_points = FindPoints(points, warn=True, int_trans=None)\"\"\"\n import numpy as np\n import mfem.par as mfem\n\n pp = np.array(pp, copy=False, dtype=float).transpose() \n M = mfem.DenseMatrix(pp.shape[0], pp.shape[1])\n M.Assign(pp)\n elem_ids = mfem.intArray()\n int_points = mfem.IntegrationPointArray()\n count = _mesh.Mesh_FindPoints(self, M, elem_ids, int_points, warn, inv_trans) \n elem_ids = elem_ids.ToList()\n return count, elem_ids, int_points\n\n\n\n def Swap(self, other, non_geometry):\n r\"\"\"Swap(Mesh self, Mesh other, bool non_geometry)\"\"\"\n return _mesh.Mesh_Swap(self, other, non_geometry)\n Swap = _swig_new_instance_method(_mesh.Mesh_Swap)\n __swig_destroy__ = _mesh.delete_Mesh\n\n def __init__(self, *args):\n r\"\"\"\n __init__(Mesh self) -> Mesh\n __init__(Mesh self, Mesh mesh, bool copy_nodes=True) -> Mesh\n __init__(Mesh self, Mesh mesh) -> Mesh\n __init__(Mesh self, double * vertices, int num_vertices, int * element_indices, mfem::Geometry::Type element_type, int * element_attributes, int num_elements, int * boundary_indices, mfem::Geometry::Type boundary_type, int * boundary_attributes, int num_boundary_elements, int dimension, int space_dimension=-1) -> Mesh\n __init__(Mesh self, int Dim_, int NVert, int NElem, int NBdrElem=0, int spaceDim_=-1) -> Mesh\n __init__(Mesh self, int nx, int ny, int nz, mfem::Element::Type type, bool generate_edges=False, double sx=1.0, double sy=1.0, double sz=1.0, bool sfc_ordering=True) -> Mesh\n __init__(Mesh self, int nx, int ny, mfem::Element::Type type, bool generate_edges=False, double sx=1.0, double sy=1.0, bool sfc_ordering=True) -> Mesh\n __init__(Mesh self, int n, double sx=1.0) -> Mesh\n __init__(Mesh self, char const * filename, int generate_edges=0, int refine=1, bool fix_orientation=True) -> Mesh\n __init__(Mesh self, std::istream & input, int generate_edges=0, int refine=1, bool fix_orientation=True) -> Mesh\n __init__(Mesh self, mfem::Mesh *[] mesh_array, int num_pieces) -> Mesh\n __init__(Mesh self, Mesh orig_mesh, int ref_factor, int ref_type) -> Mesh\n __init__(Mesh self, int nx, int ny, int nz, char const * type, bool generate_edges=False, double sx=1.0, double sy=1.0, double sz=1.0, bool sfc_ordering=True) -> Mesh\n __init__(Mesh self, int nx, int ny, char const * type, bool generate_edges=False, double sx=1.0, double sy=1.0, bool sfc_ordering=True) -> Mesh\n \"\"\"\n _mesh.Mesh_swiginit(self, _mesh.new_Mesh(*args))\n\n def PrintToFile(self, mesh_file, precision):\n r\"\"\"PrintToFile(Mesh self, char const * mesh_file, int const precision)\"\"\"\n return _mesh.Mesh_PrintToFile(self, mesh_file, precision)\n PrintToFile = _swig_new_instance_method(_mesh.Mesh_PrintToFile)\n\n def WriteToStream(self, StringIO):\n r\"\"\"WriteToStream(Mesh self, PyObject * StringIO) -> PyObject *\"\"\"\n return _mesh.Mesh_WriteToStream(self, StringIO)\n WriteToStream = _swig_new_instance_method(_mesh.Mesh_WriteToStream)\n\n def GetAttributeArray(self):\n r\"\"\"GetAttributeArray(Mesh self) -> PyObject *\"\"\"\n return _mesh.Mesh_GetAttributeArray(self)\n GetAttributeArray = _swig_new_instance_method(_mesh.Mesh_GetAttributeArray)\n\n def GetVertexArray(self, *args):\n r\"\"\"\n GetVertexArray(Mesh self, int i) -> PyObject\n GetVertexArray(Mesh self) -> PyObject *\n \"\"\"\n return _mesh.Mesh_GetVertexArray(self, *args)\n GetVertexArray = _swig_new_instance_method(_mesh.Mesh_GetVertexArray)\n\n def GetBdrElementFace(self, *args):\n r\"\"\"\n GetBdrElementFace(Mesh self, int i, int * f, int * o)\n GetBdrElementFace(Mesh self, int i) -> PyObject *\n \"\"\"\n return _mesh.Mesh_GetBdrElementFace(self, *args)\n GetBdrElementFace = _swig_new_instance_method(_mesh.Mesh_GetBdrElementFace)\n\n def GetBdrAttributeArray(self):\n r\"\"\"GetBdrAttributeArray(Mesh self) -> PyObject *\"\"\"\n return _mesh.Mesh_GetBdrAttributeArray(self)\n GetBdrAttributeArray = _swig_new_instance_method(_mesh.Mesh_GetBdrAttributeArray)\n\n def GetBdrArray(self, idx):\n r\"\"\"GetBdrArray(Mesh self, int idx) -> PyObject *\"\"\"\n return _mesh.Mesh_GetBdrArray(self, idx)\n GetBdrArray = _swig_new_instance_method(_mesh.Mesh_GetBdrArray)\n\n def GetDomainArray(self, idx):\n r\"\"\"GetDomainArray(Mesh self, int idx) -> PyObject *\"\"\"\n return _mesh.Mesh_GetDomainArray(self, idx)\n GetDomainArray = _swig_new_instance_method(_mesh.Mesh_GetDomainArray)\n\n def GetElementCenterArray(self, idx):\n r\"\"\"GetElementCenterArray(Mesh self, int idx) -> PyObject *\"\"\"\n return _mesh.Mesh_GetElementCenterArray(self, idx)\n GetElementCenterArray = _swig_new_instance_method(_mesh.Mesh_GetElementCenterArray)\n\n def GetScaledJacobian(self, i, sd=2):\n r\"\"\"GetScaledJacobian(Mesh self, int i, int sd=2) -> double\"\"\"\n return _mesh.Mesh_GetScaledJacobian(self, i, sd)\n GetScaledJacobian = _swig_new_instance_method(_mesh.Mesh_GetScaledJacobian)\n\n def PrintInfo(self, *args):\n r\"\"\"\n PrintInfo(Mesh self, std::ostream & out=out)\n PrintInfo(Mesh self, char const * file, int precision=16)\n \"\"\"\n return _mesh.Mesh_PrintInfo(self, *args)\n PrintInfo = _swig_new_instance_method(_mesh.Mesh_PrintInfo)\n\n def PrintInfoGZ(self, file, precision=16):\n r\"\"\"PrintInfoGZ(Mesh self, char const * file, int precision=16)\"\"\"\n return _mesh.Mesh_PrintInfoGZ(self, file, precision)\n PrintInfoGZ = _swig_new_instance_method(_mesh.Mesh_PrintInfoGZ)\n\n def Print(self, *args):\n r\"\"\"\n Print(Mesh self, std::ostream & out=out)\n Print(Mesh self, char const * file, int precision=16)\n \"\"\"\n return _mesh.Mesh_Print(self, *args)\n Print = _swig_new_instance_method(_mesh.Mesh_Print)\n\n def PrintGZ(self, file, precision=16):\n r\"\"\"PrintGZ(Mesh self, char const * file, int precision=16)\"\"\"\n return _mesh.Mesh_PrintGZ(self, file, precision)\n PrintGZ = _swig_new_instance_method(_mesh.Mesh_PrintGZ)\n\n def PrintXG(self, *args):\n r\"\"\"\n PrintXG(Mesh self, std::ostream & out=out)\n PrintXG(Mesh self, char const * file, int precision=16)\n \"\"\"\n return _mesh.Mesh_PrintXG(self, *args)\n PrintXG = _swig_new_instance_method(_mesh.Mesh_PrintXG)\n\n def PrintXGGZ(self, file, precision=16):\n r\"\"\"PrintXGGZ(Mesh self, char const * file, int precision=16)\"\"\"\n return _mesh.Mesh_PrintXGGZ(self, file, precision)\n PrintXGGZ = _swig_new_instance_method(_mesh.Mesh_PrintXGGZ)\n\n def PrintVTK(self, *args):\n r\"\"\"\n PrintVTK(Mesh self, std::ostream & out)\n PrintVTK(Mesh self, std::ostream & out, int ref, int field_data=0)\n PrintVTK(Mesh self, char const * file, int precision=16)\n \"\"\"\n return _mesh.Mesh_PrintVTK(self, *args)\n PrintVTK = _swig_new_instance_method(_mesh.Mesh_PrintVTK)\n\n def PrintVTKGZ(self, file, precision=16):\n r\"\"\"PrintVTKGZ(Mesh self, char const * file, int precision=16)\"\"\"\n return _mesh.Mesh_PrintVTKGZ(self, file, precision)\n PrintVTKGZ = _swig_new_instance_method(_mesh.Mesh_PrintVTKGZ)\n\n# Register Mesh in _mesh:\n_mesh.Mesh_swigregister(Mesh)\ncvar = _mesh.cvar\n\ndef Mesh_LoadFromFile(filename, generate_edges=0, refine=1, fix_orientation=True):\n r\"\"\"Mesh_LoadFromFile(char const * filename, int generate_edges=0, int refine=1, bool fix_orientation=True) -> Mesh\"\"\"\n return _mesh.Mesh_LoadFromFile(filename, generate_edges, refine, fix_orientation)\nMesh_LoadFromFile = _mesh.Mesh_LoadFromFile\n\ndef Mesh_MakeCartesian1D(n, sx=1.0):\n r\"\"\"Mesh_MakeCartesian1D(int n, double sx=1.0) -> Mesh\"\"\"\n return _mesh.Mesh_MakeCartesian1D(n, sx)\nMesh_MakeCartesian1D = _mesh.Mesh_MakeCartesian1D\n\ndef Mesh_MakeCartesian2D(nx, ny, type, generate_edges=False, sx=1.0, sy=1.0, sfc_ordering=True):\n r\"\"\"Mesh_MakeCartesian2D(int nx, int ny, mfem::Element::Type type, bool generate_edges=False, double sx=1.0, double sy=1.0, bool sfc_ordering=True) -> Mesh\"\"\"\n return _mesh.Mesh_MakeCartesian2D(nx, ny, type, generate_edges, sx, sy, sfc_ordering)\nMesh_MakeCartesian2D = _mesh.Mesh_MakeCartesian2D\n\ndef Mesh_MakeCartesian3D(nx, ny, nz, type, sx=1.0, sy=1.0, sz=1.0, sfc_ordering=True):\n r\"\"\"Mesh_MakeCartesian3D(int nx, int ny, int nz, mfem::Element::Type type, double sx=1.0, double sy=1.0, double sz=1.0, bool sfc_ordering=True) -> Mesh\"\"\"\n return _mesh.Mesh_MakeCartesian3D(nx, ny, nz, type, sx, sy, sz, sfc_ordering)\nMesh_MakeCartesian3D = _mesh.Mesh_MakeCartesian3D\n\ndef Mesh_MakeRefined(*args):\n r\"\"\"\n Mesh_MakeRefined(Mesh orig_mesh, int ref_factor, int ref_type) -> Mesh\n Mesh_MakeRefined(Mesh orig_mesh, intArray ref_factors, int ref_type) -> Mesh\n \"\"\"\n return _mesh.Mesh_MakeRefined(*args)\nMesh_MakeRefined = _mesh.Mesh_MakeRefined\n\ndef Mesh_MakeSimplicial(orig_mesh):\n r\"\"\"Mesh_MakeSimplicial(Mesh orig_mesh) -> Mesh\"\"\"\n return _mesh.Mesh_MakeSimplicial(orig_mesh)\nMesh_MakeSimplicial = _mesh.Mesh_MakeSimplicial\n\ndef Mesh_MakePeriodic(orig_mesh, v2v):\n r\"\"\"Mesh_MakePeriodic(Mesh orig_mesh, std::vector< int > const & v2v) -> Mesh\"\"\"\n return _mesh.Mesh_MakePeriodic(orig_mesh, v2v)\nMesh_MakePeriodic = _mesh.Mesh_MakePeriodic\n\ndef Mesh_GetTransformationFEforElementType(arg1):\n r\"\"\"Mesh_GetTransformationFEforElementType(mfem::Element::Type arg1) -> FiniteElement\"\"\"\n return _mesh.Mesh_GetTransformationFEforElementType(arg1)\nMesh_GetTransformationFEforElementType = _mesh.Mesh_GetTransformationFEforElementType\n\ndef Mesh_PrintElementsByGeometry(dim, num_elems_by_geom, out):\n r\"\"\"Mesh_PrintElementsByGeometry(int dim, intArray num_elems_by_geom, std::ostream & out)\"\"\"\n return _mesh.Mesh_PrintElementsByGeometry(dim, num_elems_by_geom, out)\nMesh_PrintElementsByGeometry = _mesh.Mesh_PrintElementsByGeometry\n\n\ndef __lshift__(*args):\n r\"\"\"\n __lshift__(std::ostream & os, SparseMatrix mat) -> std::ostream\n __lshift__(std::ostream & out, GridFunction sol) -> std::ostream\n __lshift__(std::ostream & out, QuadratureFunction qf) -> std::ostream\n __lshift__(std::ostream & out, Mesh mesh) -> std::ostream &\n \"\"\"\n return _mesh.__lshift__(*args)\n__lshift__ = _mesh.__lshift__\nclass GeometricFactors(object):\n r\"\"\"Proxy of C++ mfem::GeometricFactors class.\"\"\"\n\n thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc=\"The membership flag\")\n __repr__ = _swig_repr\n mesh = property(_mesh.GeometricFactors_mesh_get, _mesh.GeometricFactors_mesh_set, doc=r\"\"\"mesh : p.q(const).mfem::Mesh\"\"\")\n IntRule = property(_mesh.GeometricFactors_IntRule_get, _mesh.GeometricFactors_IntRule_set, doc=r\"\"\"IntRule : p.q(const).mfem::IntegrationRule\"\"\")\n computed_factors = property(_mesh.GeometricFactors_computed_factors_get, _mesh.GeometricFactors_computed_factors_set, doc=r\"\"\"computed_factors : int\"\"\")\n COORDINATES = _mesh.GeometricFactors_COORDINATES\n \n JACOBIANS = _mesh.GeometricFactors_JACOBIANS\n \n DETERMINANTS = _mesh.GeometricFactors_DETERMINANTS\n \n\n def __init__(self, *args):\n r\"\"\"\n __init__(GeometricFactors self, Mesh mesh, IntegrationRule ir, int flags, mfem::MemoryType d_mt=MemoryType::DEFAULT) -> GeometricFactors\n __init__(GeometricFactors self, GridFunction nodes, IntegrationRule ir, int flags, mfem::MemoryType d_mt=MemoryType::DEFAULT) -> GeometricFactors\n \"\"\"\n _mesh.GeometricFactors_swiginit(self, _mesh.new_GeometricFactors(*args))\n X = property(_mesh.GeometricFactors_X_get, _mesh.GeometricFactors_X_set, doc=r\"\"\"X : mfem::Vector\"\"\")\n J = property(_mesh.GeometricFactors_J_get, _mesh.GeometricFactors_J_set, doc=r\"\"\"J : mfem::Vector\"\"\")\n detJ = property(_mesh.GeometricFactors_detJ_get, _mesh.GeometricFactors_detJ_set, doc=r\"\"\"detJ : mfem::Vector\"\"\")\n __swig_destroy__ = _mesh.delete_GeometricFactors\n\n# Register GeometricFactors in _mesh:\n_mesh.GeometricFactors_swigregister(GeometricFactors)\n\nclass FaceGeometricFactors(object):\n r\"\"\"Proxy of C++ mfem::FaceGeometricFactors class.\"\"\"\n\n thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc=\"The membership flag\")\n __repr__ = _swig_repr\n mesh = property(_mesh.FaceGeometricFactors_mesh_get, _mesh.FaceGeometricFactors_mesh_set, doc=r\"\"\"mesh : p.q(const).mfem::Mesh\"\"\")\n IntRule = property(_mesh.FaceGeometricFactors_IntRule_get, _mesh.FaceGeometricFactors_IntRule_set, doc=r\"\"\"IntRule : p.q(const).mfem::IntegrationRule\"\"\")\n computed_factors = property(_mesh.FaceGeometricFactors_computed_factors_get, _mesh.FaceGeometricFactors_computed_factors_set, doc=r\"\"\"computed_factors : int\"\"\")\n type = property(_mesh.FaceGeometricFactors_type_get, _mesh.FaceGeometricFactors_type_set, doc=r\"\"\"type : mfem::FaceType\"\"\")\n COORDINATES = _mesh.FaceGeometricFactors_COORDINATES\n \n JACOBIANS = _mesh.FaceGeometricFactors_JACOBIANS\n \n DETERMINANTS = _mesh.FaceGeometricFactors_DETERMINANTS\n \n NORMALS = _mesh.FaceGeometricFactors_NORMALS\n \n\n def __init__(self, mesh, ir, flags, type):\n r\"\"\"__init__(FaceGeometricFactors self, Mesh mesh, IntegrationRule ir, int flags, mfem::FaceType type) -> FaceGeometricFactors\"\"\"\n _mesh.FaceGeometricFactors_swiginit(self, _mesh.new_FaceGeometricFactors(mesh, ir, flags, type))\n X = property(_mesh.FaceGeometricFactors_X_get, _mesh.FaceGeometricFactors_X_set, doc=r\"\"\"X : mfem::Vector\"\"\")\n J = property(_mesh.FaceGeometricFactors_J_get, _mesh.FaceGeometricFactors_J_set, doc=r\"\"\"J : mfem::Vector\"\"\")\n detJ = property(_mesh.FaceGeometricFactors_detJ_get, _mesh.FaceGeometricFactors_detJ_set, doc=r\"\"\"detJ : mfem::Vector\"\"\")\n normal = property(_mesh.FaceGeometricFactors_normal_get, _mesh.FaceGeometricFactors_normal_set, doc=r\"\"\"normal : mfem::Vector\"\"\")\n __swig_destroy__ = _mesh.delete_FaceGeometricFactors\n\n# Register FaceGeometricFactors in _mesh:\n_mesh.FaceGeometricFactors_swigregister(FaceGeometricFactors)\n\nclass NodeExtrudeCoefficient(mfem._par.coefficient.VectorCoefficient):\n r\"\"\"Proxy of C++ mfem::NodeExtrudeCoefficient class.\"\"\"\n\n thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc=\"The membership flag\")\n\n def __init__(self, *args, **kwargs):\n raise AttributeError(\"No constructor defined - class is abstract\")\n __repr__ = _swig_repr\n\n def SetLayer(self, l):\n r\"\"\"SetLayer(NodeExtrudeCoefficient self, int const l)\"\"\"\n return _mesh.NodeExtrudeCoefficient_SetLayer(self, l)\n SetLayer = _swig_new_instance_method(_mesh.NodeExtrudeCoefficient_SetLayer)\n\n def Eval(self, *args):\n r\"\"\"\n Eval(NodeExtrudeCoefficient self, Vector V, ElementTransformation T, IntegrationPoint ip)\n Eval(NodeExtrudeCoefficient self, DenseMatrix M, ElementTransformation T, IntegrationRule ir)\n Eval(NodeExtrudeCoefficient self, Vector V, ElementTransformation T, IntegrationPoint ip)\n \"\"\"\n return _mesh.NodeExtrudeCoefficient_Eval(self, *args)\n Eval = _swig_new_instance_method(_mesh.NodeExtrudeCoefficient_Eval)\n __swig_destroy__ = _mesh.delete_NodeExtrudeCoefficient\n\n# Register NodeExtrudeCoefficient in _mesh:\n_mesh.NodeExtrudeCoefficient_swigregister(NodeExtrudeCoefficient)\n\n\ndef Extrude1D(mesh, ny, sy, closed=False):\n r\"\"\"Extrude1D(Mesh mesh, int const ny, double const sy, bool const closed=False) -> Mesh\"\"\"\n return _mesh.Extrude1D(mesh, ny, sy, closed)\nExtrude1D = _mesh.Extrude1D\n\ndef Extrude2D(mesh, nz, sz):\n r\"\"\"Extrude2D(Mesh mesh, int const nz, double const sz) -> Mesh\"\"\"\n return _mesh.Extrude2D(mesh, nz, sz)\nExtrude2D = _mesh.Extrude2D\n\ndef ShiftRight(a, b, c):\n r\"\"\"ShiftRight(int & a, int & b, int & c)\"\"\"\n return _mesh.ShiftRight(a, b, c)\nShiftRight = _mesh.ShiftRight\n\n\n",
"'''\n MFEM example 28p\n See c++ version in the MFEM library for more detail \n'''\nimport os\nimport mfem.par as mfem\nfrom mfem.par import intArray\nfrom os.path import expanduser, join, dirname\nimport numpy as np\nfrom numpy import sin, cos, exp, sqrt, pi, abs, array, floor, log\n\nfrom mpi4py import MPI\nnum_procs = MPI.COMM_WORLD.size\nmyid = MPI.COMM_WORLD.rank\nsmyid = '.'+'{:0>6d}'.format(myid)\n\n\ndef build_trapezoid_mesh(offset):\n assert offset < 0.9, \"offset is too large!\"\n\n dimension = 2\n nvt = 4 # vertices\n nbe = 4 # num boundary elements\n mesh = mfem.Mesh(dimension, nvt, 1, nbe)\n\n mesh.AddVertex(0.0, 0.0)\n mesh.AddVertex(1.0, 0.0)\n mesh.AddVertex(offset, 1.0)\n mesh.AddVertex(1.0, 1.0)\n\n # element\n mesh.AddQuad([0, 1, 3, 2], 1)\n\n # boundary\n mesh.AddBdrSegment([0, 1], 1)\n mesh.AddBdrSegment([1, 3], 2)\n mesh.AddBdrSegment([2, 3], 3)\n mesh.AddBdrSegment([0, 2], 4)\n\n mesh.FinalizeQuadMesh(1, 0, True)\n\n return mesh\n\n\ndef print0(*args):\n if myid == 0:\n print(*args)\n\n\ndef run(order=1,\n offset=0.3,\n reorder_space=True,\n penalty=0,\n visit=False,\n visualization=True):\n\n # 2. Build a trapezoidal mesh with a single quadrilateral element, where\n # 'offset' determines how far off it is from a rectangle.\n mesh = build_trapezoid_mesh(offset)\n dim = mesh.Dimension()\n\n # 3. Refine the mesh to increase the resolution. In this example we do\n # 'ref_levels' of uniform refinement. We choose 'ref_levels' to be the\n # largest number that gives a final mesh with no more than 1,000\n # elements.\n ref_levels = int(floor(log(1000./mesh.GetNE())/log(2.)/dim))\n for l in range(ref_levels):\n mesh.UniformRefinement()\n\n pmesh = mfem.ParMesh(MPI.COMM_WORLD, mesh)\n pmesh.UniformRefinement()\n\n # 4. Define a finite element space on the mesh. Here we use vector finite\n # elements, i.e. dim copies of a scalar finite element space. The vector\n # dimension is specified by the last argument of the FiniteElementSpace\n # constructor.\n use_nodal_space = pmesh.NURBSext\n if use_nodal_space:\n fec = None\n fespace = pmesh.GetNodes().FESpace()\n else:\n fec = mfem.H1_FECollection(order, dim)\n if reorder_space:\n fespace = mfem.ParFiniteElementSpace(\n pmesh, fec, dim, mfem.Ordering.byNODES)\n else:\n fespace = mfem.ParFiniteElementSpace(\n pmesh, fec, dim, mfem.Ordering.byVDIM)\n\n s = fespace.GlobalTrueVSize()\n print0(\"Number of finite element unknowns: \" + str(s))\n print0(\"Assembling matrix and r.h.s... \")\n\n # 5. Determine the list of true (i.e. parallel conforming) essential\n # boundary dofs. In this example, there are no essential boundary\n # conditions in the usual sense, but we leave the machinery here for\n # users to modify if they wish.\n ess_tdof_list = mfem.intArray()\n ess_bdr = mfem.intArray(pmesh.bdr_attributes.Max())\n ess_bdr.Assign(0)\n fespace.GetEssentialTrueDofs(ess_bdr, ess_tdof_list)\n\n # 6. Set up the linear form b(.) which corresponds to the right-hand side of\n # the FEM linear system. In this case, b_i equals the boundary integral\n # of f*phi_i where f represents a \"push\" force on the right side of the\n # trapezoid.\n f = mfem.VectorArrayCoefficient(dim)\n for i in range(dim-1):\n f.Set(i, mfem.ConstantCoefficient(0.0))\n push_force = mfem.Vector(pmesh.bdr_attributes.Max())\n push_force.Assign(0.0)\n push_force[1] = -5.0e-2\n f.Set(0, mfem.PWConstCoefficient(push_force))\n\n b = mfem.ParLinearForm(fespace)\n b.AddBoundaryIntegrator(mfem.VectorBoundaryLFIntegrator(f))\n b.Assemble()\n\n # 7. Define the solution vector x as a finite element grid function\n # corresponding to fespace.\n x = mfem.ParGridFunction(fespace)\n x.Assign(0.0)\n\n # 8. Set up the bilinear form a(.,.) on the finite element space\n # corresponding to the linear elasticity integrator with piece-wise\n # constants coefficient lambda and mu. We use constant coefficients,\n # but see ex2 for how to set up piecewise constant coefficients based\n # on attribute.\n llambda = mfem.Vector(mesh.attributes.Max())\n llambda.Assign(1.0)\n lambda_func = mfem.PWConstCoefficient(llambda)\n mu = mfem.Vector(mesh.attributes.Max())\n mu.Assign(1.0)\n mu_func = mfem.PWConstCoefficient(mu)\n\n a = mfem.ParBilinearForm(fespace)\n a.AddDomainIntegrator(mfem.ElasticityIntegrator(lambda_func, mu_func))\n\n # 9. Assemble the bilinear form and the corresponding linear system,\n # applying any necessary transformations such as: eliminating boundary\n # conditions, applying conforming constraints for non-conforming AMR,\n # static condensation, etc.\n a.Assemble()\n\n A = mfem.HypreParMatrix()\n B = mfem.Vector()\n X = mfem.Vector()\n a.FormLinearSystem(ess_tdof_list, x, b, A, X, B)\n print0(\"done.\")\n print0(\"Size of linear system: \" + str(A.GetGlobalNumRows()))\n\n # 10. Set up constraint matrix to constrain normal displacement (but\n # allow tangential displacement) on specified boundaries.\n constraint_atts = mfem.intArray([1, 4])\n constraint_rowstarts = mfem.intArray()\n local_constraints = mfem.ParBuildNormalConstraints(fespace,\n constraint_atts,\n constraint_rowstarts)\n\n # 11. Define and apply an iterative solver for the constrained system\n # in saddle-point form with a Gauss-Seidel smoother for the\n # displacement block.\n if penalty == 0.0:\n solver = mfem.EliminationCGSolver(A, local_constraints,\n constraint_rowstarts, dim,\n reorder_space)\n else:\n solver = mfem.PenaltyPCGSolver(A, local_constraints, penalty,\n dim, reorder_space)\n\n solver.SetRelTol(1e-8)\n solver.SetMaxIter(500)\n solver.SetPrintLevel(1)\n solver.Mult(B, X)\n\n # 12. Recover the solution as a finite element grid function. Move the\n # mesh to reflect the displacement of the elastic body being\n # simulated, for purposes of output.\n a.RecoverFEMSolution(X, b, x)\n\n if not use_nodal_space:\n pmesh.SetNodalFESpace(fespace)\n\n nodes = pmesh.GetNodes()\n nodes += x\n\n # 13. Save the refined mesh and the solution in VisIt format.\n if visit:\n visit_dc = mfem.VisItDataCollection(MPI.COMM_WORLD, \"ex28p\", pmesh)\n visit_dc.SetLevelsOfDetail(4)\n visit_dc.RegisterField(\"displacement\", x)\n visit_dc.Save()\n\n # 14. Save the displaced mesh and the inverted solution (which gives the\n # backward displacements to the original grid). This output can be\n # viewed later using GLVis: \"glvis -m displaced.mesh -g sol.gf\".\n x *= -1 # sign convention for GLVis displacements\n pmesh.Print(\"mesh\"+smyid, 8)\n x.Save(\"sol\"+smyid, 8)\n\n # 15. Send the above data by socket to a GLVis server. Use the \"n\" and \"b\"\n # keys in GLVis to visualize the displacements.\n if visualization:\n sol_sock = mfem.socketstream('localhost', 19916)\n sol_sock.precision(8)\n sol_sock << \"parallel \" << num_procs << \" \" << myid << \"\\n\"\n sol_sock << \"solution\\n\" << mesh << x\n sol_sock.flush\n\n\nif __name__ == \"__main__\":\n from mfem.common.arg_parser import ArgParser\n\n parser = ArgParser(\n description='Ex28 (Constraints and sliding boundary conditions')\n parser.add_argument('-o', '--order',\n action='store', default=1, type=int,\n help=\"Finite element order (polynomial degree)\")\n parser.add_argument('-nodes', '--by-nodes',\n action='store_true',\n help=\"Use byNODES ordering of vector space\")\n parser.add_argument('-vdim', '--by-vdim',\n action='store_true',\n help=\"Use byVDIM ordering of vector space\")\n parser.add_argument('-offset', '--offset',\n action='store', default=0.3, type=float,\n help=\"How much to offset the trapezoid.\")\n parser.add_argument('-vis', '--visualization',\n action='store_true',\n help='Enable GLVis visualization')\n parser.add_argument('-visit', '--visit-datafiles',\n action='store_true',\n help=\"Save data files for VisIt (visit.llnl.gov) visualization.\")\n parser.add_argument('-p', '--penalty',\n action='store', default=0.0, type=float,\n help=\"Penalty parameter; 0 means use elemination solver\")\n\n args = parser.parse_args()\n\n reorder_space = False\n if args.by_nodes:\n reorder_space = True\n if args.by_vdim:\n reorder_space = False\n args.by_nodes = reorder_space\n args.by_vdim = not reorder_space\n\n if myid == 0:\n parser.print_options(args)\n\n run(order=args.order,\n reorder_space=reorder_space,\n offset=args.offset,\n penalty=args.penalty,\n visit=args.visit_datafiles,\n visualization=args.visualization,)\n",
"# This file was automatically generated by SWIG (http://www.swig.org).\n# Version 4.0.2\n#\n# Do not make changes to this file unless you know what you are doing--modify\n# the SWIG interface file instead.\n\nfrom sys import version_info as _swig_python_version_info\nif _swig_python_version_info < (2, 7, 0):\n raise RuntimeError(\"Python 2.7 or later required\")\n\n# Import the low-level C/C++ module\nif __package__ or \".\" in __name__:\n from . import _vector\nelse:\n import _vector\n\ntry:\n import builtins as __builtin__\nexcept ImportError:\n import __builtin__\n\n_swig_new_instance_method = _vector.SWIG_PyInstanceMethod_New\n_swig_new_static_method = _vector.SWIG_PyStaticMethod_New\n\ndef _swig_repr(self):\n try:\n strthis = \"proxy of \" + self.this.__repr__()\n except __builtin__.Exception:\n strthis = \"\"\n return \"<%s.%s; %s >\" % (self.__class__.__module__, self.__class__.__name__, strthis,)\n\n\ndef _swig_setattr_nondynamic_instance_variable(set):\n def set_instance_attr(self, name, value):\n if name == \"thisown\":\n self.this.own(value)\n elif name == \"this\":\n set(self, name, value)\n elif hasattr(self, name) and isinstance(getattr(type(self), name), property):\n set(self, name, value)\n else:\n raise AttributeError(\"You cannot add instance attributes to %s\" % self)\n return set_instance_attr\n\n\ndef _swig_setattr_nondynamic_class_variable(set):\n def set_class_attr(cls, name, value):\n if hasattr(cls, name) and not isinstance(getattr(cls, name), property):\n set(cls, name, value)\n else:\n raise AttributeError(\"You cannot add class attributes to %s\" % cls)\n return set_class_attr\n\n\ndef _swig_add_metaclass(metaclass):\n \"\"\"Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass\"\"\"\n def wrapper(cls):\n return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())\n return wrapper\n\n\nclass _SwigNonDynamicMeta(type):\n \"\"\"Meta class to enforce nondynamic attributes (no new attributes) for a class\"\"\"\n __setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)\n\n\nimport mfem._ser.array\nimport mfem._ser.mem_manager\n\ndef add_vector(*args):\n r\"\"\"\n add_vector(Vector v1, Vector v2, Vector v)\n add_vector(Vector v1, double alpha, Vector v2, Vector v)\n add_vector(double const a, Vector x, Vector y, Vector z)\n add_vector(double const a, Vector x, double const b, Vector y, Vector z)\n \"\"\"\n return _vector.add_vector(*args)\nadd_vector = _vector.add_vector\n\ndef subtract_vector(*args):\n r\"\"\"\n subtract_vector(Vector v1, Vector v2, Vector v)\n subtract_vector(double const a, Vector x, Vector y, Vector z)\n \"\"\"\n return _vector.subtract_vector(*args)\nsubtract_vector = _vector.subtract_vector\n\ndef CheckFinite(v, n):\n r\"\"\"CheckFinite(double const * v, int const n) -> int\"\"\"\n return _vector.CheckFinite(v, n)\nCheckFinite = _vector.CheckFinite\n\ndef infinity():\n r\"\"\"infinity() -> double\"\"\"\n return _vector.infinity()\ninfinity = _vector.infinity\nclass Vector(object):\n r\"\"\"Proxy of C++ mfem::Vector class.\"\"\"\n\n thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc=\"The membership flag\")\n __repr__ = _swig_repr\n\n def UseDevice(self, *args):\n r\"\"\"\n UseDevice(Vector self, bool use_dev)\n UseDevice(Vector self) -> bool\n \"\"\"\n return _vector.Vector_UseDevice(self, *args)\n UseDevice = _swig_new_instance_method(_vector.Vector_UseDevice)\n\n def Load(self, *args):\n r\"\"\"\n Load(Vector self, std::istream ** _in, int np, int * dim)\n Load(Vector self, std::istream & _in, int Size)\n Load(Vector self, std::istream & _in)\n \"\"\"\n return _vector.Vector_Load(self, *args)\n Load = _swig_new_instance_method(_vector.Vector_Load)\n\n def SetSize(self, *args):\n r\"\"\"\n SetSize(Vector self, int s)\n SetSize(Vector self, int s, mfem::MemoryType mt)\n SetSize(Vector self, int s, Vector v)\n \"\"\"\n return _vector.Vector_SetSize(self, *args)\n SetSize = _swig_new_instance_method(_vector.Vector_SetSize)\n\n def SetData(self, d):\n r\"\"\"SetData(Vector self, double * d)\"\"\"\n return _vector.Vector_SetData(self, d)\n SetData = _swig_new_instance_method(_vector.Vector_SetData)\n\n def SetDataAndSize(self, d, s):\n r\"\"\"SetDataAndSize(Vector self, double * d, int s)\"\"\"\n return _vector.Vector_SetDataAndSize(self, d, s)\n SetDataAndSize = _swig_new_instance_method(_vector.Vector_SetDataAndSize)\n\n def NewDataAndSize(self, d, s):\n r\"\"\"NewDataAndSize(Vector self, double * d, int s)\"\"\"\n return _vector.Vector_NewDataAndSize(self, d, s)\n NewDataAndSize = _swig_new_instance_method(_vector.Vector_NewDataAndSize)\n\n def NewMemoryAndSize(self, mem, s, own_mem):\n r\"\"\"NewMemoryAndSize(Vector self, mfem::Memory< double > const & mem, int s, bool own_mem)\"\"\"\n return _vector.Vector_NewMemoryAndSize(self, mem, s, own_mem)\n NewMemoryAndSize = _swig_new_instance_method(_vector.Vector_NewMemoryAndSize)\n\n def MakeRef(self, *args):\n r\"\"\"\n MakeRef(Vector self, Vector base, int offset, int size)\n MakeRef(Vector self, Vector base, int offset)\n \"\"\"\n return _vector.Vector_MakeRef(self, *args)\n MakeRef = _swig_new_instance_method(_vector.Vector_MakeRef)\n\n def MakeDataOwner(self):\n r\"\"\"MakeDataOwner(Vector self)\"\"\"\n return _vector.Vector_MakeDataOwner(self)\n MakeDataOwner = _swig_new_instance_method(_vector.Vector_MakeDataOwner)\n\n def Destroy(self):\n r\"\"\"Destroy(Vector self)\"\"\"\n return _vector.Vector_Destroy(self)\n Destroy = _swig_new_instance_method(_vector.Vector_Destroy)\n\n def DeleteDevice(self, copy_to_host=True):\n r\"\"\"DeleteDevice(Vector self, bool copy_to_host=True)\"\"\"\n return _vector.Vector_DeleteDevice(self, copy_to_host)\n DeleteDevice = _swig_new_instance_method(_vector.Vector_DeleteDevice)\n\n def Size(self):\n r\"\"\"Size(Vector self) -> int\"\"\"\n return _vector.Vector_Size(self)\n Size = _swig_new_instance_method(_vector.Vector_Size)\n\n def Capacity(self):\n r\"\"\"Capacity(Vector self) -> int\"\"\"\n return _vector.Vector_Capacity(self)\n Capacity = _swig_new_instance_method(_vector.Vector_Capacity)\n\n def GetData(self):\n r\"\"\"GetData(Vector self) -> double *\"\"\"\n return _vector.Vector_GetData(self)\n GetData = _swig_new_instance_method(_vector.Vector_GetData)\n\n def begin(self, *args):\n r\"\"\"\n begin(Vector self) -> double\n begin(Vector self) -> double const *\n \"\"\"\n return _vector.Vector_begin(self, *args)\n begin = _swig_new_instance_method(_vector.Vector_begin)\n\n def end(self, *args):\n r\"\"\"\n end(Vector self) -> double\n end(Vector self) -> double const *\n \"\"\"\n return _vector.Vector_end(self, *args)\n end = _swig_new_instance_method(_vector.Vector_end)\n\n def GetMemory(self, *args):\n r\"\"\"\n GetMemory(Vector self) -> mfem::Memory< double >\n GetMemory(Vector self) -> mfem::Memory< double > const &\n \"\"\"\n return _vector.Vector_GetMemory(self, *args)\n GetMemory = _swig_new_instance_method(_vector.Vector_GetMemory)\n\n def SyncMemory(self, v):\n r\"\"\"SyncMemory(Vector self, Vector v)\"\"\"\n return _vector.Vector_SyncMemory(self, v)\n SyncMemory = _swig_new_instance_method(_vector.Vector_SyncMemory)\n\n def SyncAliasMemory(self, v):\n r\"\"\"SyncAliasMemory(Vector self, Vector v)\"\"\"\n return _vector.Vector_SyncAliasMemory(self, v)\n SyncAliasMemory = _swig_new_instance_method(_vector.Vector_SyncAliasMemory)\n\n def OwnsData(self):\n r\"\"\"OwnsData(Vector self) -> bool\"\"\"\n return _vector.Vector_OwnsData(self)\n OwnsData = _swig_new_instance_method(_vector.Vector_OwnsData)\n\n def StealData(self, *args):\n r\"\"\"\n StealData(Vector self, double ** p)\n StealData(Vector self) -> double *\n \"\"\"\n return _vector.Vector_StealData(self, *args)\n StealData = _swig_new_instance_method(_vector.Vector_StealData)\n\n def Elem(self, *args):\n r\"\"\"\n Elem(Vector self, int i) -> double\n Elem(Vector self, int i) -> double const &\n \"\"\"\n return _vector.Vector_Elem(self, *args)\n Elem = _swig_new_instance_method(_vector.Vector_Elem)\n\n def __call__(self, *args):\n r\"\"\"\n __call__(Vector self, int i) -> double\n __call__(Vector self, int i) -> double const &\n \"\"\"\n return _vector.Vector___call__(self, *args)\n __call__ = _swig_new_instance_method(_vector.Vector___call__)\n\n def __mul__(self, *args):\n r\"\"\"\n __mul__(Vector self, double const * arg2) -> double\n __mul__(Vector self, Vector v) -> double\n \"\"\"\n return _vector.Vector___mul__(self, *args)\n __mul__ = _swig_new_instance_method(_vector.Vector___mul__)\n\n def __imul__(self, v):\n ret = _vector.Vector___imul__(self, v)\n #ret.thisown = self.thisown\n ret.thisown = 0 \n return self\n\n\n\n def __itruediv__(self, v):\n ret = _vector.Vector___itruediv__(self, v)\n #ret.thisown = self.thisown\n ret.thisown = 0 \n return self\n\n\n\n def __isub__(self, v):\n ret = _vector.Vector___isub__(self, v)\n #ret.thisown = self.thisown\n ret.thisown = 0 \n return self\n\n\n\n def __iadd__(self, v):\n ret = _vector.Vector___iadd__(self, v)\n #ret.thisown = self.thisown\n ret.thisown = 0 \n return self\n\n\n\n def Add(self, a, Va):\n r\"\"\"Add(Vector self, double const a, Vector Va) -> Vector\"\"\"\n return _vector.Vector_Add(self, a, Va)\n Add = _swig_new_instance_method(_vector.Vector_Add)\n\n def Set(self, a, x):\n r\"\"\"Set(Vector self, double const a, Vector x) -> Vector\"\"\"\n return _vector.Vector_Set(self, a, x)\n Set = _swig_new_instance_method(_vector.Vector_Set)\n\n def SetVector(self, v, offset):\n r\"\"\"SetVector(Vector self, Vector v, int offset)\"\"\"\n return _vector.Vector_SetVector(self, v, offset)\n SetVector = _swig_new_instance_method(_vector.Vector_SetVector)\n\n def Neg(self):\n r\"\"\"Neg(Vector self)\"\"\"\n return _vector.Vector_Neg(self)\n Neg = _swig_new_instance_method(_vector.Vector_Neg)\n\n def Swap(self, other):\n r\"\"\"Swap(Vector self, Vector other)\"\"\"\n return _vector.Vector_Swap(self, other)\n Swap = _swig_new_instance_method(_vector.Vector_Swap)\n\n def median(self, lo, hi):\n r\"\"\"median(Vector self, Vector lo, Vector hi)\"\"\"\n return _vector.Vector_median(self, lo, hi)\n median = _swig_new_instance_method(_vector.Vector_median)\n\n def GetSubVector(self, *args):\n r\"\"\"\n GetSubVector(Vector self, intArray dofs, Vector elemvect)\n GetSubVector(Vector self, intArray dofs, double * elem_data)\n \"\"\"\n return _vector.Vector_GetSubVector(self, *args)\n GetSubVector = _swig_new_instance_method(_vector.Vector_GetSubVector)\n\n def SetSubVector(self, *args):\n r\"\"\"\n SetSubVector(Vector self, intArray dofs, double const value)\n SetSubVector(Vector self, intArray dofs, Vector elemvect)\n SetSubVector(Vector self, intArray dofs, double * elem_data)\n \"\"\"\n return _vector.Vector_SetSubVector(self, *args)\n SetSubVector = _swig_new_instance_method(_vector.Vector_SetSubVector)\n\n def AddElementVector(self, *args):\n r\"\"\"\n AddElementVector(Vector self, intArray dofs, Vector elemvect)\n AddElementVector(Vector self, intArray dofs, double * elem_data)\n AddElementVector(Vector self, intArray dofs, double const a, Vector elemvect)\n \"\"\"\n return _vector.Vector_AddElementVector(self, *args)\n AddElementVector = _swig_new_instance_method(_vector.Vector_AddElementVector)\n\n def SetSubVectorComplement(self, dofs, val):\n r\"\"\"SetSubVectorComplement(Vector self, intArray dofs, double const val)\"\"\"\n return _vector.Vector_SetSubVectorComplement(self, dofs, val)\n SetSubVectorComplement = _swig_new_instance_method(_vector.Vector_SetSubVectorComplement)\n\n def PrintHash(self, out):\n r\"\"\"PrintHash(Vector self, std::ostream & out)\"\"\"\n return _vector.Vector_PrintHash(self, out)\n PrintHash = _swig_new_instance_method(_vector.Vector_PrintHash)\n\n def Randomize(self, seed=0):\n r\"\"\"Randomize(Vector self, int seed=0)\"\"\"\n return _vector.Vector_Randomize(self, seed)\n Randomize = _swig_new_instance_method(_vector.Vector_Randomize)\n\n def Norml2(self):\n r\"\"\"Norml2(Vector self) -> double\"\"\"\n return _vector.Vector_Norml2(self)\n Norml2 = _swig_new_instance_method(_vector.Vector_Norml2)\n\n def Normlinf(self):\n r\"\"\"Normlinf(Vector self) -> double\"\"\"\n return _vector.Vector_Normlinf(self)\n Normlinf = _swig_new_instance_method(_vector.Vector_Normlinf)\n\n def Norml1(self):\n r\"\"\"Norml1(Vector self) -> double\"\"\"\n return _vector.Vector_Norml1(self)\n Norml1 = _swig_new_instance_method(_vector.Vector_Norml1)\n\n def Normlp(self, p):\n r\"\"\"Normlp(Vector self, double p) -> double\"\"\"\n return _vector.Vector_Normlp(self, p)\n Normlp = _swig_new_instance_method(_vector.Vector_Normlp)\n\n def Max(self):\n r\"\"\"Max(Vector self) -> double\"\"\"\n return _vector.Vector_Max(self)\n Max = _swig_new_instance_method(_vector.Vector_Max)\n\n def Min(self):\n r\"\"\"Min(Vector self) -> double\"\"\"\n return _vector.Vector_Min(self)\n Min = _swig_new_instance_method(_vector.Vector_Min)\n\n def Sum(self):\n r\"\"\"Sum(Vector self) -> double\"\"\"\n return _vector.Vector_Sum(self)\n Sum = _swig_new_instance_method(_vector.Vector_Sum)\n\n def DistanceSquaredTo(self, p):\n r\"\"\"DistanceSquaredTo(Vector self, double const * p) -> double\"\"\"\n return _vector.Vector_DistanceSquaredTo(self, p)\n DistanceSquaredTo = _swig_new_instance_method(_vector.Vector_DistanceSquaredTo)\n\n def DistanceTo(self, p):\n r\"\"\"DistanceTo(Vector self, double const * p) -> double\"\"\"\n return _vector.Vector_DistanceTo(self, p)\n DistanceTo = _swig_new_instance_method(_vector.Vector_DistanceTo)\n\n def CheckFinite(self):\n r\"\"\"CheckFinite(Vector self) -> int\"\"\"\n return _vector.Vector_CheckFinite(self)\n CheckFinite = _swig_new_instance_method(_vector.Vector_CheckFinite)\n __swig_destroy__ = _vector.delete_Vector\n\n def Read(self, on_dev=True):\n r\"\"\"Read(Vector self, bool on_dev=True) -> double const *\"\"\"\n return _vector.Vector_Read(self, on_dev)\n Read = _swig_new_instance_method(_vector.Vector_Read)\n\n def HostRead(self):\n r\"\"\"HostRead(Vector self) -> double const *\"\"\"\n return _vector.Vector_HostRead(self)\n HostRead = _swig_new_instance_method(_vector.Vector_HostRead)\n\n def Write(self, on_dev=True):\n r\"\"\"Write(Vector self, bool on_dev=True) -> double *\"\"\"\n return _vector.Vector_Write(self, on_dev)\n Write = _swig_new_instance_method(_vector.Vector_Write)\n\n def HostWrite(self):\n r\"\"\"HostWrite(Vector self) -> double *\"\"\"\n return _vector.Vector_HostWrite(self)\n HostWrite = _swig_new_instance_method(_vector.Vector_HostWrite)\n\n def ReadWrite(self, on_dev=True):\n r\"\"\"ReadWrite(Vector self, bool on_dev=True) -> double *\"\"\"\n return _vector.Vector_ReadWrite(self, on_dev)\n ReadWrite = _swig_new_instance_method(_vector.Vector_ReadWrite)\n\n def HostReadWrite(self):\n r\"\"\"HostReadWrite(Vector self) -> double *\"\"\"\n return _vector.Vector_HostReadWrite(self)\n HostReadWrite = _swig_new_instance_method(_vector.Vector_HostReadWrite)\n\n def __init__(self, *args):\n r\"\"\"\n __init__(Vector self) -> Vector\n __init__(Vector self, Vector arg2) -> Vector\n __init__(Vector self, int s) -> Vector\n __init__(Vector self, double * data_, int size_) -> Vector\n __init__(Vector self, int size_, mfem::MemoryType mt) -> Vector\n __init__(Vector self, int size_, mfem::MemoryType h_mt, mfem::MemoryType d_mt) -> Vector\n __init__(Vector self, Vector v, int offset, int size) -> Vector\n \"\"\"\n\n from numpy import ndarray, ascontiguousarray\n keep_link = False\n own_data = False\n if len(args) == 1:\n if isinstance(args[0], list): \n args = (args[0], len(args[0]))\n own_data = True\n elif isinstance(args[0], ndarray):\n if args[0].dtype != 'float64':\n raise ValueError('Must be float64 array ' + str(args[0].dtype) +\n \t\t\t ' is given') \n else:\n args = (ascontiguousarray(args[0]), args[0].shape[0])\n # in this case, args[0] need to be maintained\n # in this object.\n keep_link = True\n\n\n _vector.Vector_swiginit(self, _vector.new_Vector(*args))\n\n if keep_link:\n self._link_to_data = args[0]\n if own_data:\n self.MakeDataOwner()\n\n\n\n\n def Assign(self, *args):\n r\"\"\"\n Assign(Vector self, double const v)\n Assign(Vector self, Vector v)\n Assign(Vector self, PyObject * param)\n \"\"\"\n\n from numpy import ndarray, ascontiguousarray, array\n keep_link = False\n if len(args) == 1:\n if isinstance(args[0], ndarray):\n if args[0].dtype != 'float64':\n raise ValueError('Must be float64 array ' + str(args[0].dtype) +\n \t\t ' is given')\n elif args[0].ndim != 1:\n raise ValueError('Ndim must be one') \n elif args[0].shape[0] != _vector.Vector_Size(self):\n raise ValueError('Length does not match')\n else:\n args = (ascontiguousarray(args[0]),)\n elif isinstance(args[0], tuple):\n args = (array(args[0], dtype = float),) \n elif isinstance(args[0], list):\t \n args = (array(args[0], dtype = float),) \n else:\n pass\n\n\n val = _vector.Vector_Assign(self, *args)\n\n return self\n\n\n return val\n\n\n def __setitem__(self, i, v):\n r\"\"\"__setitem__(Vector self, int i, double const v)\"\"\"\n return _vector.Vector___setitem__(self, i, v)\n __setitem__ = _swig_new_instance_method(_vector.Vector___setitem__)\n\n def __getitem__(self, param):\n r\"\"\"__getitem__(Vector self, PyObject * param) -> PyObject *\"\"\"\n return _vector.Vector___getitem__(self, param)\n __getitem__ = _swig_new_instance_method(_vector.Vector___getitem__)\n\n def GetDataArray(self):\n r\"\"\"GetDataArray(Vector self) -> PyObject *\"\"\"\n return _vector.Vector_GetDataArray(self)\n GetDataArray = _swig_new_instance_method(_vector.Vector_GetDataArray)\n\n def WriteToStream(self, StringIO, width=8):\n r\"\"\"WriteToStream(Vector self, PyObject * StringIO, int width=8) -> PyObject *\"\"\"\n return _vector.Vector_WriteToStream(self, StringIO, width)\n WriteToStream = _swig_new_instance_method(_vector.Vector_WriteToStream)\n\n def Print(self, *args):\n r\"\"\"\n Print(Vector self, std::ostream & out=mfem::out, int width=8)\n Print(Vector self, char const * file, int precision=16)\n \"\"\"\n return _vector.Vector_Print(self, *args)\n Print = _swig_new_instance_method(_vector.Vector_Print)\n\n def PrintGZ(self, file, precision=16):\n r\"\"\"PrintGZ(Vector self, char const * file, int precision=16)\"\"\"\n return _vector.Vector_PrintGZ(self, file, precision)\n PrintGZ = _swig_new_instance_method(_vector.Vector_PrintGZ)\n\n def Print_HYPREGZ(self, file, precision=16):\n r\"\"\"Print_HYPREGZ(Vector self, char const * file, int precision=16)\"\"\"\n return _vector.Vector_Print_HYPREGZ(self, file, precision)\n Print_HYPREGZ = _swig_new_instance_method(_vector.Vector_Print_HYPREGZ)\n\n def Print_HYPRE(self, *args):\n r\"\"\"\n Print_HYPRE(Vector self, std::ostream & out)\n Print_HYPRE(Vector self, char const * file, int precision=16)\n Print_HYPRE(Vector self)\n \"\"\"\n return _vector.Vector_Print_HYPRE(self, *args)\n Print_HYPRE = _swig_new_instance_method(_vector.Vector_Print_HYPRE)\n\n# Register Vector in _vector:\n_vector.Vector_swigregister(Vector)\n\n\ndef IsFinite(val):\n r\"\"\"IsFinite(double const & val) -> bool\"\"\"\n return _vector.IsFinite(val)\nIsFinite = _vector.IsFinite\n\ndef DistanceSquared(x, y, n):\n r\"\"\"DistanceSquared(double const * x, double const * y, int const n) -> double\"\"\"\n return _vector.DistanceSquared(x, y, n)\nDistanceSquared = _vector.DistanceSquared\n\ndef Distance(x, y, n):\n r\"\"\"Distance(double const * x, double const * y, int const n) -> double\"\"\"\n return _vector.Distance(x, y, n)\nDistance = _vector.Distance\n\nVector.__idiv__ = Vector.__itruediv__\n\n\n\n"
] | [
[
"numpy.array"
],
[
"numpy.log"
],
[
"numpy.ascontiguousarray",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xmengli999/self_supervised | [
"b2d40d452d203f60330c84fb213c3ba848468366",
"b2d40d452d203f60330c84fb213c3ba848468366"
] | [
"lib/NCEAverage.py",
"datasets/fundus_pm_crossvalidation.py"
] | [
"import torch\nfrom torch.autograd import Function\nfrom torch import nn\nfrom .alias_multinomial import AliasMethod\nimport math\n\nclass NCEFunction(Function):\n @staticmethod\n def forward(self, x, y, memory, idx, params):\n K = int(params[0].item())\n T = params[1].item()\n Z = params[2].item()\n\n momentum = params[3].item()\n batchSize = x.size(0)\n outputSize = memory.size(0)\n inputSize = memory.size(1)\n\n # sample positives & negatives\n idx.select(1,0).copy_(y.data)\n\n # sample correspoinding weights\n weight = torch.index_select(memory, 0, idx.view(-1))\n weight.resize_(batchSize, K+1, inputSize)\n\n # inner product\n out = torch.bmm(weight, x.data.resize_(batchSize, inputSize, 1))\n out.div_(T).exp_() # batchSize * self.K+1\n x.data.resize_(batchSize, inputSize)\n\n if Z < 0:\n params[2] = out.mean() * outputSize\n Z = params[2].item()\n print(\"normalization constant Z is set to {:.1f}\".format(Z))\n\n out.div_(Z).resize_(batchSize, K+1)\n\n self.save_for_backward(x, memory, y, weight, out, params)\n\n return out\n\n @staticmethod\n def backward(self, gradOutput):\n x, memory, y, weight, out, params = self.saved_tensors\n K = int(params[0].item())\n T = params[1].item()\n Z = params[2].item()\n momentum = params[3].item()\n batchSize = gradOutput.size(0)\n \n # gradients d Pm / d linear = exp(linear) / Z\n gradOutput.data.mul_(out.data)\n # add temperature\n gradOutput.data.div_(T)\n\n gradOutput.data.resize_(batchSize, 1, K+1)\n \n # gradient of linear\n gradInput = torch.bmm(gradOutput.data, weight)\n gradInput.resize_as_(x)\n\n # update the non-parametric data\n weight_pos = weight.select(1, 0).resize_as_(x)\n weight_pos.mul_(momentum)\n weight_pos.add_(torch.mul(x.data, 1-momentum))\n w_norm = weight_pos.pow(2).sum(1, keepdim=True).pow(0.5)\n updated_weight = weight_pos.div(w_norm)\n memory.index_copy_(0, y, updated_weight)\n \n return gradInput, None, None, None, None\n\nclass NCEAverage(nn.Module):\n\n def __init__(self, inputSize, outputSize, K, T=0.07, momentum=0.5, Z=None):\n super(NCEAverage, self).__init__()\n self.nLem = outputSize\n self.unigrams = torch.ones(self.nLem)\n self.multinomial = AliasMethod(self.unigrams)\n self.multinomial.cuda()\n self.K = K\n\n self.register_buffer('params',torch.tensor([K, T, -1, momentum]));\n stdv = 1. / math.sqrt(inputSize/3)\n self.register_buffer('memory', torch.rand(outputSize, inputSize).mul_(2*stdv).add_(-stdv))\n \n def forward(self, x, y):\n batchSize = x.size(0)\n idx = self.multinomial.draw(batchSize * (self.K+1)).view(batchSize, -1)\n out = NCEFunction.apply(x, y, self.memory, idx, self.params)\n return out\n\n",
"import cv2\nimport sys\nif sys.version_info[0] == 2:\n import cPickle as pickle\nelse:\n import pickle\nimport torch.utils.data as data\nimport numpy as np\nfrom skimage.transform import resize\nfrom PIL import Image\n\nclass traindataset(data.Dataset):\n \"\"\"Face Landmarks dataset.\"\"\"\n\n def __init__(self, root, transform=None, train=True, args=None):\n \"\"\"\n Args:\n csv_file (string): Path to the csv file with annotations.\n root_dir (string): Directory with all the images.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.root_dir = root\n self.transform = transform\n self.name = []\n self.train = train\n self.multiaug = args.multiaug\n self.multitask = args.multitask\n\n\n images_path = np.genfromtxt( self.root_dir + '/PAML/random_list.txt', dtype='str')\n images_path = list(images_path)\n images_path = [self.root_dir + item for item in images_path]\n\n num_fold = int(len(images_path) / 5)\n if args.seed == 0:\n test_path = images_path[:num_fold]\n elif args.seed == 1:\n test_path = images_path[num_fold:2*num_fold]\n elif args.seed == 2:\n test_path = images_path[2 * num_fold:3 * num_fold]\n elif args.seed == 3:\n test_path = images_path[3 * num_fold:4 * num_fold]\n elif args.seed == 4:\n test_path = images_path[4 * num_fold:5 * num_fold]\n\n train_path = list(set(images_path) - set(test_path))\n\n label_list_train = [1 if item.split(\"/\")[-1][0] == \"P\" else 0 for item in train_path]\n label_list_test = [1 if item.split(\"/\")[-1][0] == \"P\" else 0 for item in test_path]\n\n if self.train:\n self.train_dataset = []\n self.targets = []\n self.rotation_label = []\n for i in range(0, len(train_path)):\n image = cv2.imread(args.data + \"/PAML/resized_image_320/\" + train_path[i].split(\"/\")[-1])\n self.train_dataset.append(image)\n self.targets.append(label_list_train[i])\n self.name.append(train_path[i].split(\"/\")[-1])\n self.rotation_label.append(0)\n print(\"Train images PM \", len(self.train_dataset), \"P: \", sum(self.targets), \"Neg: \", len(self.targets) - sum(self.targets))\n else:\n self.train_dataset = []\n self.targets = []\n for i in range(0, len(test_path)):\n image = cv2.imread(args.data + \"/PAML/resized_image_320/\" + test_path[i].split(\"/\")[-1])\n self.train_dataset.append(image)\n self.targets.append(label_list_test[i])\n self.name.append(test_path[i].split(\"/\")[-1])\n print(\"Test images PM \", len(self.train_dataset), \"P: \", sum(self.targets), \"Neg: \", len(self.targets) - sum(self.targets))\n\n def __len__(self):\n return len(self.train_dataset)\n\n def __getitem__(self, idx):\n\n sample = self.train_dataset[idx]\n\n sample = Image.fromarray(np.uint8(sample))\n\n img = self.transform(sample)\n target = self.targets[idx]\n\n if self.train and self.multiaug:\n img2 = self.transform(sample)\n if self.multitask:\n rotation_label = self.rotation_label[idx]\n return [img, img2], [target, rotation_label], idx, self.name[idx]\n else:\n return [img, img2], target, idx, self.name[idx]\n\n return img, target, idx, self.name[idx]\n\nif __name__ == '__main__':\n count = 0\n tot_count = 0\n"
] | [
[
"torch.ones",
"torch.tensor",
"torch.mul",
"torch.bmm",
"torch.rand"
],
[
"numpy.uint8",
"numpy.genfromtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Mauriyin/TNT_pytorch | [
"fed7e182a45e5cf74d827f090d72251eedbd7cc1"
] | [
"tracklets/utils/pred_loc.py"
] | [
"import numpy as np\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom scipy import stats\n\n\n#TODO what does this function mean?\ndef pred_bbox_by_F(bbox, F, show_flag, img1, img2):\n # Create figure and axes\n if show_flag==1:\n fig1, ax1 = plt.subplots(1)\n # Display the image\n if show_flag==1:\n ax1.imshow(img1)\n \n pred_bbox = np.zeros((len(bbox),4))\n for n in range(len(bbox)):\n xmin = bbox[n,0]\n ymin = bbox[n,1]\n xmax = bbox[n,2]+bbox[n,0]\n ymax = bbox[n,3]+bbox[n,1]\n w = bbox[n,2]\n h = bbox[n,3]\n if show_flag==1:\n rect = patches.Rectangle((xmin,ymin), w, h, linewidth=1, edgecolor='#FF0000', facecolor='none')\n ax1.add_patch(rect)\n\n if show_flag==1:\n plt.show()\n \n # Display the image\n if show_flag==1:\n fig2, ax2 = plt.subplots(1)\n ax2.imshow(img2)\n \n for n in range(len(bbox)):\n xmin = bbox[n,0]\n ymin = bbox[n,1]\n xmax = bbox[n,2] + bbox[n,0]\n ymax = bbox[n,3] + bbox[n,1]\n w = bbox[n,2]\n h = bbox[n,3]\n \n temp_A = np.zeros((4,2))\n temp_b = np.zeros((4,1))\n temp_pt = np.zeros((1,3))\n temp_pt[0,:] = np.array([xmin,ymin,1])\n A1 = np.matmul(temp_pt, np.transpose(F))\n\n temp_A[0,0] = A1[0,0]\n temp_A[0,1] = A1[0,1]\n temp_b[0,0] = -A1[0,2]\n \n temp_pt[0,:] = np.array([xmax,ymin,1])\n A2 = np.matmul(temp_pt, np.transpose(F))\n temp_A[1,0] = A2[0,0]\n temp_A[1,1] = A2[0,1]\n temp_b[1,0] = -w * A2[0,0] - A2[0,2]\n \n temp_pt[0,:] = np.array([xmin,ymax,1])\n A3 = np.matmul(temp_pt, np.transpose(F))\n temp_A[2,0] = A3[0,0]\n temp_A[2,1] = A3[0,1]\n temp_b[2,0] = -h * A3[0,1] - A3[0,2]\n \n temp_pt[0,:] = np.array([xmax,ymax,1])\n A4 = np.matmul(temp_pt, np.transpose(F))\n temp_A[3,0] = A4[0,0]\n temp_A[3,1] = A4[0,1]\n temp_b[3,0] = -w * A4[0,0] - h * A4[0,1] - A4[0,2]\n \n new_loc = np.matmul(np.linalg.pinv(temp_A),temp_b)\n xmin = new_loc[0,0]\n ymin = new_loc[1,0]\n xmax = new_loc[0,0] + w\n ymax = new_loc[1,0] + h\n \n pred_bbox[n,0] = xmin\n pred_bbox[n,1] = ymin\n pred_bbox[n,2] = w\n pred_bbox[n,3] = h\n\n if show_flag==1:\n rect = patches.Rectangle((xmin,ymin),w,h,linewidth=1,edgecolor='#FF0000', facecolor='none')\n ax2.add_patch(rect)\n \n if show_flag==1:\n plt.show()\n\n return pred_bbox\n\n\ndef linear_pred(x):\n if len(x)==1:\n return x\n else:\n y = np.array(range(len(x)))\n slope, intercept, _, _, _ = stats.linregress(x, y)\n return slope * len(y) + intercept\n\n\ndef linear_pred_v2(tr_t, tr_y, ts_t):\n \"\"\"use (tr_t, tr_y) pred ts_y when ts_t in a linear way\n \"\"\"\n ts_y = np.ones(len(ts_t))\n if len(tr_t)==1:\n ts_y = ts_y*tr_y\n else:\n slope, intercept, _, _, _ = stats.linregress(tr_t, tr_y)\n ts_y = slope * ts_t + intercept\n return ts_y"
] | [
[
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.subplots",
"numpy.linalg.pinv",
"scipy.stats.linregress",
"numpy.transpose",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
eirikur16/flrs | [
"c98604593753def05086b54ce82f5551f01d2529"
] | [
"floris/tools/sowfa_utilities.py"
] | [
"# Copyright 2020 NREL\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\n# See https://floris.readthedocs.io for documentation\n\nimport os\nimport re\n\nimport numpy as np\nimport pandas as pd\n\nfrom .cut_plane import CutPlane, get_plane_from_flow_data\nfrom .flow_data import FlowData\nfrom ..utilities import Vec3\nfrom ..logging_manager import LoggerBase\n\n\nclass SowfaInterface(LoggerBase):\n \"\"\"\n Object to facilitate interaction with flow data output by SOWFA.\n\n Returns:\n :py:class:`floris.tools.sowfa_utilities.SowfaInterface`: object\n \"\"\"\n\n def __init__(\n self,\n case_folder,\n flow_data_sub_path=\"array_mean/array.mean0D_UAvg.vtk\",\n setup_sub_path=\"setUp\",\n turbine_array_sub_path=\"constant/turbineArrayProperties\",\n turbine_sub_path=\"constant/turbineProperties\",\n controlDict_sub_path=\"system/controlDict\",\n turbine_output_sub_path=\"turbineOutput/20000\",\n assumed_settling_time=None,\n ):\n \"\"\"\n SowfaInterface object init method.\n\n Args:\n case_folder (str): path to folder containing SOWFA data\n flow_data_sub_path (str, optional): path to mean data.\n Defaults to 'array_mean/array.mean0D_UAvg.vtk'.\n setup_sub_path (str, optional): path to setup info.\n Defaults to 'setUp'.\n turbine_array_sub_path (str, optional): path to wind plant\n info. Defaults to 'constant/turbineArrayProperties'.\n turbine_sub_path (str, optional): path to wind turbine\n info. Defaults to 'constant/turbineProperties'.\n controlDict_sub_path (str, optional): path to turbine\n controls info. Defaults to 'system/controlDict'.\n turbine_output_sub_path (str, optional): path to turbine\n operational data. Defaults to 'turbineOutput/20000'.\n assumed_settling_time (float, optional): Time to account\n for startup transients in simulation. Defaults to None.\n \"\"\"\n self.logger.info(case_folder)\n\n # Save the case_folder and sub_paths\n self.case_folder = case_folder\n self.setup_sub_path = setup_sub_path\n self.turbine_array_sub_path = turbine_array_sub_path\n self.turbine_sub_path = turbine_sub_path\n self.controlDict_sub_path = controlDict_sub_path\n self.turbine_output_sub_path = turbine_output_sub_path\n\n # Read in the input files\n\n # Get control settings from sc input file\n # TODO Assuming not dynamic and only one setting applied for each turbine\n # TODO If not using the super controller sowfa variant, need alternative\n\n # Get the turbine name and locations\n turbine_array_dict = read_foam_file(\n os.path.join(self.case_folder, self.turbine_array_sub_path)\n )\n self.turbine_name = turbine_array_dict[\"turbineType\"].replace(\n '\"', \"\"\n ) # TODO Assuming only one type\n self.layout_x, self.layout_y = get_turbine_locations(\n os.path.join(self.case_folder, self.turbine_array_sub_path)\n )\n\n # Save the number of turbines\n self.num_turbines = len(self.layout_x)\n\n # if SC input exists, use it for yaw and pitch as it will over-ride\n # if it does not exist, assume the values in turbineArray Properties\n if os.path.exists(os.path.join(self.case_folder, \"SC_INPUT.txt\")):\n df_SC = read_sc_input(self.case_folder)\n self.yaw_angles = df_SC.yaw.values\n self.pitch_angles = df_SC.pitch.values\n else:\n self.logger.info(\n \"No SC_INPUT.txt, getting pitch and yaw \" + \"from turbine array props\"\n )\n self.yaw_angles = get_turbine_yaw_angles(\n os.path.join(self.case_folder, self.turbine_array_sub_path)\n )\n self.pitch_angles = get_turbine_pitch_angles(\n os.path.join(self.case_folder, self.turbine_array_sub_path)\n )\n self.logger.info(self.yaw_angles)\n self.logger.info(self.pitch_angles)\n\n # Get the turbine rotor diameter and hub height\n turbine_dict = read_foam_file(\n os.path.join(self.case_folder, self.turbine_sub_path, self.turbine_name)\n )\n self.D = 2 * turbine_dict[\"TipRad\"]\n\n # Use the setup file and control file to determine the precursor wind\n # speed and the time flow averaging begins (settling time)\n setup_dict = read_foam_file(os.path.join(self.case_folder, self.setup_sub_path))\n controlDict_dict = read_foam_file(\n os.path.join(self.case_folder, self.controlDict_sub_path)\n )\n start_run_time = controlDict_dict[\"startTime\"]\n averaging_start_time = setup_dict[\"meanStartTime\"]\n if assumed_settling_time is not None:\n self.logger.info(\n \"Using assumed settling time of %.1f s\" % assumed_settling_time\n )\n self.settling_time = assumed_settling_time\n else:\n self.settling_time = averaging_start_time - start_run_time\n self.precursor_wind_speed = setup_dict[\"U0Mag\"]\n\n # Get the wind direction\n self.precursor_wind_dir = setup_dict[\"dir\"]\n\n # Get the surface roughness\n self.z0 = setup_dict[\"z0\"]\n\n # Read the outputs\n self.turbine_output = read_sowfa_df(\n os.path.join(self.case_folder, self.turbine_output_sub_path)\n )\n\n # Remove the settling time\n self.turbine_output = self.turbine_output[\n self.turbine_output.time > self.settling_time\n ]\n\n # Get the sim_time\n self.sim_time_length = self.turbine_output.time.max()\n\n # Read the flow data\n try:\n self.flow_data = self.read_flow_frame_SOWFA(\n os.path.join(case_folder, flow_data_sub_path)\n )\n\n # Re-set turbine positions to flow_field origin\n self.layout_x = self.layout_x - self.flow_data.origin.x1\n self.layout_y = self.layout_y - self.flow_data.origin.x2\n\n except FileNotFoundError:\n self.logger.info(\"No flow field found, setting NULL, origin at 0\")\n self.flow_data = None # TODO might need a null flow-field\n\n # Try to work out the precursor directory\n self.precursor_directory = \"unknown\"\n try:\n with open(os.path.join(case_folder, \"runscript.preprocess\"), \"r\") as fid:\n raw = fid.readlines()\n\n for i, line in enumerate(raw):\n if \"precursorDir=\" in line:\n self.precursor_directory = os.path.basename(\n line.replace(\"precursorDir=\", \"\")\n )\n\n except FileNotFoundError:\n self.logger.info(\"No preprocess file found\")\n\n def __str__(self):\n\n self.logger.info(\"---------------------\")\n self.logger.info(\"Case: %s\" % self.case_folder)\n self.logger.info(\"==Turbine Info==\")\n self.logger.info(\"Turbine: %s\" % self.turbine_name)\n self.logger.info(\"Diameter: %dm\" % self.D)\n self.logger.info(\"Num Turbines = %d\" % self.num_turbines)\n self.logger.info(\"==Control Settings==\")\n self.logger.info(\"Yaw Angles, [\" + \", \".join(map(str, self.yaw_angles)) + \"]\")\n self.logger.info(\n \"Pitch Angles, [\" + \", \".join(map(str, self.pitch_angles)) + \"]\"\n )\n self.logger.info(\"==Inflow Info==\")\n self.logger.info(\"U0Mag: %.2fm/s\" % self.precursor_wind_speed)\n self.logger.info(\"dir: %.1f\" % self.precursor_wind_dir)\n self.logger.info(\"==Timing Info==\")\n self.logger.info(\"Settling time: %.1fs\" % self.settling_time)\n self.logger.info(\"Simulation time: %.1fs\" % self.sim_time_length)\n self.logger.info(\"---------------------\")\n\n return \" \"\n\n def get_hor_plane(\n self, height, x_resolution=200, y_resolution=200, x_bounds=None, y_bounds=None\n ):\n \"\"\"\n Get a horizontal cut through plane at a specific height\n\n Args:\n height (float): height of cut plane, defaults to hub-height\n Defaults to Hub-height.\n x1_resolution (float, optional): output array resolution.\n Defaults to 200.\n x2_resolution (float, optional): output array resolution.\n Defaults to 200.\n x1_bounds (tuple, optional): limits of output array.\n Defaults to None.\n x2_bounds (tuple, optional): limits of output array.\n Defaults to None.\n\n Returns:\n horplane\n \"\"\"\n # Get points from flow data\n df = get_plane_from_flow_data(\n self.flow_data, normal_vector=\"z\", x3_value=height\n )\n\n # Compute and return the cutplane\n return CutPlane(df)\n\n def get_cross_plane(\n self, x_loc, x_resolution=200, y_resolution=200, x_bounds=None, y_bounds=None\n ):\n \"\"\"\n Get a horizontal cut through plane at a specific height\n\n Args:\n height (float): height of cut plane, defaults to hub-height\n Defaults to Hub-height.\n x1_resolution (float, optional): output array resolution.\n Defaults to 200.\n x2_resolution (float, optional): output array resolution.\n Defaults to 200.\n x1_bounds (tuple, optional): limits of output array.\n Defaults to None.\n x2_bounds (tuple, optional): limits of output array.\n Defaults to None.\n\n Returns:\n horplane\n \"\"\"\n # Get the points of data in a dataframe\n df = get_plane_from_flow_data(self.flow_data, normal_vector=\"x\", x3_value=x_loc)\n\n # Compute and return the cutplane\n return CutPlane(df)\n\n def get_y_plane(\n self, y_loc, x_resolution=200, y_resolution=200, x_bounds=None, y_bounds=None\n ):\n \"\"\"\n Get a horizontal cut through plane at a specific height\n\n Args:\n height (float): height of cut plane, defaults to hub-height\n Defaults to Hub-height.\n x1_resolution (float, optional): output array resolution.\n Defaults to 200.\n x2_resolution (float, optional): output array resolution.\n Defaults to 200.\n x1_bounds (tuple, optional): limits of output array.\n Defaults to None.\n x2_bounds (tuple, optional): limits of output array.\n Defaults to None.\n\n Returns:\n horplane\n \"\"\"\n # Get the points of data in a dataframe\n df = get_plane_from_flow_data(self.flow_data, normal_vector=\"y\", x3_value=y_loc)\n\n # Compute and return the cutplane\n return CutPlane(df)\n\n def get_average_powers(self):\n \"\"\"\n Return the average power from the simulation per turbine\n\n Args:\n\n\n Returns:\n pow_list (numpy array): an array of powers per turbine\n \"\"\"\n pow_list = list()\n for t in range(self.num_turbines):\n df_sub = self.turbine_output[self.turbine_output.turbine == t]\n pow_list.append(df_sub.powerGenerator.mean())\n return np.array(pow_list)\n\n def get_time_power_t(self, t):\n \"\"\"\n Return the power over time of a specific turbine t\n\n Args:\n t, turbine number\n\n Returns:\n power\n \"\"\"\n return self.turbine_output[self.turbine_output.turbine == t].powerGenerator\n\n def get_average_thrust(self):\n \"\"\"\n Return the average thrust from the simulation per turbine\n\n Args:\n\n\n Returns:\n pow_list (numpy array): an array of thrust per turbine\n \"\"\"\n thrust_list = list()\n for t in range(self.num_turbines):\n df_sub = self.turbine_output[self.turbine_output.turbine == t]\n thrust_list.append(df_sub.thrust.mean())\n return np.array(thrust_list)\n\n def read_flow_frame_SOWFA(self, filename):\n \"\"\"\n Read flow array output from SOWFA\n\n Args:\n filename (str): name of file containing flow data.\n\n Returns:\n FlowData (pd.DataFrame): a pandas table with the columns,\n of all relavent flow info (e.g. x, y, z, u, v, w).\n \"\"\"\n # Read the dimension info from the file\n with open(filename, \"r\") as f:\n for _ in range(10):\n read_data = f.readline()\n if \"SPACING\" in read_data:\n splitstring = read_data.rstrip().split(\" \")\n spacing = Vec3(\n float(splitstring[1]),\n float(splitstring[2]),\n float(splitstring[3]),\n )\n if \"DIMENSIONS\" in read_data:\n splitstring = read_data.rstrip().split(\" \")\n dimensions = Vec3(\n int(splitstring[1]), int(splitstring[2]), int(splitstring[3])\n )\n if \"ORIGIN\" in read_data:\n splitstring = read_data.rstrip().split(\" \")\n origin = Vec3(\n float(splitstring[1]),\n float(splitstring[2]),\n float(splitstring[3]),\n )\n\n # Set up x, y, z as lists\n if dimensions.x1 > 1.0:\n xRange = np.arange(0, dimensions.x1 * spacing.x1, spacing.x1)\n else:\n xRange = np.array([0.0])\n\n if dimensions.x2 > 1.0:\n yRange = np.arange(0, dimensions.x2 * spacing.x2, spacing.x2)\n else:\n yRange = np.array([0.0])\n\n if dimensions.x3 > 1.0:\n zRange = np.arange(0, dimensions.x3 * spacing.x3, spacing.x3)\n else:\n zRange = np.array([0.0])\n\n pts = np.array([(x, y, z) for z in zRange for y in yRange for x in xRange])\n\n df = pd.read_csv(\n filename, skiprows=10, sep=\"\\t\", header=None, names=[\"u\", \"v\", \"w\"]\n )\n x = pts[:, 0]\n y = pts[:, 1]\n z = pts[:, 2]\n\n return FlowData(\n x, y, z, df.u.values, df.v.values, df.w.values, spacing, dimensions, origin\n )\n\n\ndef read_sc_input(case_folder, wind_direction=270.0):\n \"\"\"\n Read the super controller (SC) input file to get the wind farm\n control settings.\n\n Args:\n case_folder (str): path to folder containing SC data.\n wind_direction (float, optional): Wind direction.\n Defaults to 270..\n\n Returns:\n df_SC (pd.DataFrame): dataframe containing SC info.\n \"\"\"\n sc_file = os.path.join(case_folder, \"SC_INPUT.txt\")\n\n df_SC = pd.read_csv(sc_file, delim_whitespace=True)\n\n df_SC.columns = [\"time\", \"turbine\", \"yaw\", \"pitch\"]\n\n df_SC[\"yaw\"] = wind_direction - df_SC.yaw\n\n df_SC = df_SC.set_index(\"turbine\")\n\n return df_SC\n\n\ndef read_sowfa_df(folder_name, channels=[]):\n \"\"\"\n New function to use pandas to read in files using pandas\n\n Args:\n folder_name (str): where to find the outputs of ALL channels,\n not really used for now, but could be a list of desired\n channels to only read.\n channels (list, optional): list of specific channels to read.\n Defaults to [].\n \"\"\"\n # Get the availble outputs\n outputNames = [\n f\n for f in os.listdir(folder_name)\n if os.path.isfile(os.path.join(folder_name, f))\n ]\n\n # Remove the harder input files for now (undo someday)\n hardFiles = [\n \"Vtangential\",\n \"Cl\",\n \"Cd\",\n \"Vradial\",\n \"x\",\n \"y\",\n \"z\",\n \"alpha\",\n \"axialForce\",\n ]\n simpleFiles = [\n \"nacYaw\",\n \"rotSpeedFiltered\",\n \"rotSpeed\",\n \"thrust\",\n \"torqueGen\",\n \"powerRotor\",\n \"powerGenerator\",\n \"torqueRotor\",\n \"azimuth\",\n \"pitch\",\n ]\n\n # Limit to files\n if len(channels) == 0:\n outputNames = [o for o in outputNames if o in simpleFiles]\n else:\n outputNames = channels\n\n # Get the number of channels\n num_channels = len(outputNames)\n\n if num_channels == 0:\n raise ValueError(\"Is %s a data folder?\" % folder_name)\n\n # Now loop through the files\n for c_idx, chan in enumerate(outputNames):\n\n filename = os.path.join(folder_name, chan)\n\n # Load the file\n df_inner = pd.read_csv(filename, sep=\" \", header=None, skiprows=1)\n\n # Rename the columns\n df_inner.columns = [\"turbine\", \"time\", \"dt\", chan]\n\n # Drop dt\n df_inner = df_inner[[\"time\", \"turbine\", chan]].set_index([\"time\", \"turbine\"])\n\n # On first run declare the new frame\n if c_idx == 0:\n # Declare the main data frame to return as copy\n df = df_inner.copy(deep=True)\n\n # On other loops just add the new frame\n else:\n df[chan] = df_inner[chan]\n\n # Reset the index\n df = df.reset_index()\n\n # Zero the time\n df[\"time\"] = df.time - df.time.min()\n\n return df\n\n\ndef read_foam_file(filename):\n \"\"\"\n Method to read scalar and boolean/string inputs from an OpenFOAM\n input file.\n\n Args:\n filename (str): path to file to read.\n\n Returns:\n data (dict): dictionary with OpenFOAM inputs\n \"\"\"\n data = {}\n\n with open(filename, \"r\") as fid:\n raw = fid.readlines()\n\n count = 0\n bloc_comment_test = False\n for i, line in enumerate(raw):\n\n if raw[i][0:2] == \"/*\":\n bloc_comment_test = True\n\n if not bloc_comment_test:\n\n # Check if the string is a comment and skip line\n if raw[i].strip()[0:2] == \"//\" or raw[i].strip()[0:1] == \"#\":\n pass\n\n elif len(raw[i].strip()) == 0: # Check if the string is empty and skip line\n pass\n\n else:\n tmp = raw[i].strip().rstrip().split()\n try:\n data[tmp[0].replace('\"', \"\")] = np.float(tmp[1][:-1])\n except:\n try:\n data[tmp[0].replace('\"', \"\")] = tmp[1][:-1]\n except:\n next\n\n if raw[i][0:2] == \"\\*\":\n bloc_comment_test = False\n\n return data\n\n\ndef get_turbine_locations(turbine_array_file):\n \"\"\"\n Extract wind turbine locations from SOWFA data.\n\n Args:\n turbine_array_file (str): path to file containing wind plant\n layout data.\n\n Returns:\n layout_x (np.array): wind plant layout coodinates (east-west).\n layout_y (np.array): wind plant layout coodinates (north-south).\n \"\"\"\n x = list()\n y = list()\n\n with open(turbine_array_file, \"r\") as f:\n for line in f:\n if \"baseLocation\" in line:\n # Extract the coordinates\n data = re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", line)\n\n # Append the data\n x.append(float(data[0]))\n y.append(float(data[1]))\n\n layout_x = np.array(x)\n layout_y = np.array(y)\n\n return layout_x, layout_y\n\n\ndef get_turbine_pitch_angles(turbine_array_file):\n \"\"\"\n Extract wind turbine blade pitch information from SOWFA data.\n\n Args:\n turbine_array_file (str): path to file containing pitch info.\n\n Returns:\n p (np.array): blade pitch info.\n \"\"\"\n p = list()\n\n with open(turbine_array_file, \"r\") as f:\n for line in f:\n if \"Pitch\" in line:\n # Extract the coordinates\n data = re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", line)\n\n # Append the data\n p.append(float(data[0]))\n\n return np.array(p)\n\n\ndef get_turbine_yaw_angles(turbine_array_file, wind_direction=270.0):\n \"\"\"\n Extract wind turbine yaw angle information from SOWFA data.\n\n Args:\n turbine_array_file (str): path to file containing yaw info.\n wind_direction (float, optional): Wind direction.\n Defaults to 270..\n\n Returns:\n y (np.array): wind turbine yaw info.\n \"\"\"\n y = list()\n\n with open(turbine_array_file, \"r\") as f:\n for line in f:\n if \"NacYaw\" in line:\n # Extract the coordinates\n data = re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", line)\n\n # Append the data\n y.append(wind_direction - float(data[0]))\n\n return np.array(y)\n"
] | [
[
"numpy.arange",
"numpy.array",
"pandas.read_csv",
"numpy.float"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
One-sixth/fid-helper-pytorch | [
"1d74e9e7e4622bd0ccb209a01a2cc10c74c73c01"
] | [
"gen_sample_from_stylegan3/torch_utils/custom_ops.py"
] | [
"# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# NVIDIA CORPORATION and its licensors retain all intellectual property\n# and proprietary rights in and to this software, related documentation\n# and any modifications thereto. Any use, reproduction, disclosure or\n# distribution of this software and related documentation without an express\n# license agreement from NVIDIA CORPORATION is strictly prohibited.\n\nimport glob\nimport hashlib\nimport importlib\nimport os\nimport re\nimport shutil\nimport uuid\n\nimport torch\nimport torch.utils.cpp_extension\nfrom torch.utils.file_baton import FileBaton\n\n#----------------------------------------------------------------------------\n# Global options.\n\nverbosity = 'brief' # Verbosity level: 'none', 'brief', 'full'\n\n#----------------------------------------------------------------------------\n# Internal helper funcs.\n\ndef _find_compiler_bindir():\n # patch here. To add VS 2022 x64 support.\n patterns = [\n 'C:/Program Files (x86)/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64',\n 'C:/Program Files (x86)/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64',\n 'C:/Program Files (x86)/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64',\n 'C:/Program Files (x86)/Microsoft Visual Studio */vc/bin',\n 'C:/Program Files/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64',\n 'C:/Program Files/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64',\n 'C:/Program Files/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64',\n 'C:/Program Files/Microsoft Visual Studio */vc/bin',\n ]\n for pattern in patterns:\n matches = sorted(glob.glob(pattern))\n if len(matches):\n return matches[-1]\n return None\n\n#----------------------------------------------------------------------------\n\ndef _get_mangled_gpu_name():\n name = torch.cuda.get_device_name().lower()\n out = []\n for c in name:\n if re.match('[a-z0-9_-]+', c):\n out.append(c)\n else:\n out.append('-')\n return ''.join(out)\n\n#----------------------------------------------------------------------------\n# Main entry point for compiling and loading C++/CUDA plugins.\n\n_cached_plugins = dict()\n\ndef get_plugin(module_name, sources, headers=None, source_dir=None, **build_kwargs):\n assert verbosity in ['none', 'brief', 'full']\n if headers is None:\n headers = []\n if source_dir is not None:\n sources = [os.path.join(source_dir, fname) for fname in sources]\n headers = [os.path.join(source_dir, fname) for fname in headers]\n\n # Already cached?\n if module_name in _cached_plugins:\n return _cached_plugins[module_name]\n\n # Print status.\n if verbosity == 'full':\n print(f'Setting up PyTorch plugin \"{module_name}\"...')\n elif verbosity == 'brief':\n print(f'Setting up PyTorch plugin \"{module_name}\"... ', end='', flush=True)\n verbose_build = (verbosity == 'full')\n\n # Compile and load.\n try: # pylint: disable=too-many-nested-blocks\n # Make sure we can find the necessary compiler binaries.\n if os.name == 'nt' and os.system(\"where cl.exe >nul 2>nul\") != 0:\n compiler_bindir = _find_compiler_bindir()\n if compiler_bindir is None:\n raise RuntimeError(f'Could not find MSVC/GCC/CLANG installation on this computer. Check _find_compiler_bindir() in \"{__file__}\".')\n os.environ['PATH'] += ';' + compiler_bindir\n\n # Some containers set TORCH_CUDA_ARCH_LIST to a list that can either\n # break the build or unnecessarily restrict what's available to nvcc.\n # Unset it to let nvcc decide based on what's available on the\n # machine.\n os.environ['TORCH_CUDA_ARCH_LIST'] = ''\n\n # Incremental build md5sum trickery. Copies all the input source files\n # into a cached build directory under a combined md5 digest of the input\n # source files. Copying is done only if the combined digest has changed.\n # This keeps input file timestamps and filenames the same as in previous\n # extension builds, allowing for fast incremental rebuilds.\n #\n # This optimization is done only in case all the source files reside in\n # a single directory (just for simplicity) and if the TORCH_EXTENSIONS_DIR\n # environment variable is set (we take this as a signal that the user\n # actually cares about this.)\n #\n # EDIT: We now do it regardless of TORCH_EXTENSIOS_DIR, in order to work\n # around the *.cu dependency bug in ninja config.\n #\n all_source_files = sorted(sources + headers)\n all_source_dirs = set(os.path.dirname(fname) for fname in all_source_files)\n if len(all_source_dirs) == 1: # and ('TORCH_EXTENSIONS_DIR' in os.environ):\n\n # Compute combined hash digest for all source files.\n hash_md5 = hashlib.md5()\n for src in all_source_files:\n with open(src, 'rb') as f:\n hash_md5.update(f.read())\n\n # Select cached build directory name.\n source_digest = hash_md5.hexdigest()\n build_top_dir = torch.utils.cpp_extension._get_build_directory(module_name, verbose=verbose_build) # pylint: disable=protected-access\n cached_build_dir = os.path.join(build_top_dir, f'{source_digest}-{_get_mangled_gpu_name()}')\n\n if not os.path.isdir(cached_build_dir):\n tmpdir = f'{build_top_dir}/srctmp-{uuid.uuid4().hex}'\n os.makedirs(tmpdir)\n for src in all_source_files:\n shutil.copyfile(src, os.path.join(tmpdir, os.path.basename(src)))\n try:\n os.replace(tmpdir, cached_build_dir) # atomic\n except OSError:\n # source directory already exists, delete tmpdir and its contents.\n shutil.rmtree(tmpdir)\n if not os.path.isdir(cached_build_dir): raise\n\n # Compile.\n cached_sources = [os.path.join(cached_build_dir, os.path.basename(fname)) for fname in sources]\n torch.utils.cpp_extension.load(name=module_name, build_directory=cached_build_dir,\n verbose=verbose_build, sources=cached_sources, **build_kwargs)\n else:\n torch.utils.cpp_extension.load(name=module_name, verbose=verbose_build, sources=sources, **build_kwargs)\n\n # Load.\n module = importlib.import_module(module_name)\n\n except:\n if verbosity == 'brief':\n print('Failed!')\n raise\n\n # Print status and add to cache dict.\n if verbosity == 'full':\n print(f'Done setting up PyTorch plugin \"{module_name}\".')\n elif verbosity == 'brief':\n print('Done.')\n _cached_plugins[module_name] = module\n return module\n\n#----------------------------------------------------------------------------\n"
] | [
[
"torch.utils.cpp_extension.load",
"torch.utils.cpp_extension._get_build_directory",
"torch.cuda.get_device_name"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Little-Potato-1990/learn_python | [
"9e54d150ef73e4bf53f8cd9b28a2a8bc65593fe1"
] | [
"Python-Data-Science-Handbook/notebooks/helpers_05_08.py"
] | [
"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.tree import DecisionTreeClassifier\nfrom ipywidgets import interact\n\n\ndef visualize_tree(estimator, X, y, boundaries=True,\n xlim=None, ylim=None, ax=None):\n ax = ax or plt.gca()\n \n # 绘制训练数据点\n ax.scatter(X[:, 0], X[:, 1], c=y, s=30, cmap='viridis',\n clim=(y.min(), y.max()), zorder=3)\n ax.axis('tight')\n ax.axis('off')\n if xlim is None:\n xlim = ax.get_xlim()\n if ylim is None:\n ylim = ax.get_ylim()\n \n # 拟合评估器\n estimator.fit(X, y)\n xx, yy = np.meshgrid(np.linspace(*xlim, num=200),\n np.linspace(*ylim, num=200))\n Z = estimator.predict(np.c_[xx.ravel(), yy.ravel()])\n\n # 将结果放到颜色图表中\n n_classes = len(np.unique(y))\n Z = Z.reshape(xx.shape)\n contours = ax.contourf(xx, yy, Z, alpha=0.3,\n levels=np.arange(n_classes + 1) - 0.5,\n cmap='viridis', clim=(y.min(), y.max()),\n zorder=1)\n\n ax.set(xlim=xlim, ylim=ylim)\n \n # 绘制决策树边界\n def plot_boundaries(i, xlim, ylim):\n if i >= 0:\n tree = estimator.tree_\n \n if tree.feature[i] == 0:\n ax.plot([tree.threshold[i], tree.threshold[i]], ylim, '-k', zorder=2)\n plot_boundaries(tree.children_left[i],\n [xlim[0], tree.threshold[i]], ylim)\n plot_boundaries(tree.children_right[i],\n [tree.threshold[i], xlim[1]], ylim)\n \n elif tree.feature[i] == 1:\n ax.plot(xlim, [tree.threshold[i], tree.threshold[i]], '-k', zorder=2)\n plot_boundaries(tree.children_left[i], xlim,\n [ylim[0], tree.threshold[i]])\n plot_boundaries(tree.children_right[i], xlim,\n [tree.threshold[i], ylim[1]])\n \n if boundaries:\n plot_boundaries(0, xlim, ylim)\n\n\ndef plot_tree_interactive(X, y):\n def interactive_tree(depth=5):\n clf = DecisionTreeClassifier(max_depth=depth, random_state=0)\n visualize_tree(clf, X, y)\n\n return interact(interactive_tree, depth=[1, 5])\n\n\ndef randomized_tree_interactive(X, y):\n N = int(0.75 * X.shape[0])\n \n xlim = (X[:, 0].min(), X[:, 0].max())\n ylim = (X[:, 1].min(), X[:, 1].max())\n \n def fit_randomized_tree(random_state=0):\n clf = DecisionTreeClassifier(max_depth=15)\n i = np.arange(len(y))\n rng = np.random.RandomState(random_state)\n rng.shuffle(i)\n visualize_tree(clf, X[i[:N]], y[i[:N]], boundaries=False,\n xlim=xlim, ylim=ylim)\n \n interact(fit_randomized_tree, random_state=[0, 100]);\n"
] | [
[
"matplotlib.pyplot.gca",
"numpy.linspace",
"numpy.unique",
"numpy.arange",
"sklearn.tree.DecisionTreeClassifier",
"numpy.random.RandomState"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shenghexu/CORL | [
"9f098b110b7ea36533542ddc89e473678f728d8b",
"9f098b110b7ea36533542ddc89e473678f728d8b"
] | [
"RocketFuel/SegmentRouting/rf1221_distributed/Equal/Parse_Equal.py",
"Abilene/Direct_FW/Direct_FW.py"
] | [
"\nimport math\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport scipy.io\n\nfrom pdb import set_trace as bp\n\n\nfor i in range(10):\n file_name = '../CORL_FW_SR/CORL_record_%d.txt'%(i)\n equal_delay = np.zeros(10000)\n equal_gain = np.zeros(10000)\n orig_delay = np.zeros(10000)\n count = 0\n with open(file_name) as file_in : \n for line in file_in:\n if count == 0:\n \tcount = count + 1\n else:\n count = count + 1\n\n e_delay_t = float(line.split()[1])\n o_delay_t = float(line.split()[2])\n num = int(line.split()[0])\n if num != count-2:\n bp()\n\n equal_delay[count-2] = e_delay_t\n equal_gain[count-2] = -1*(e_delay_t-o_delay_t)/o_delay_t\n orig_delay[count-2] = o_delay_t\n\n scipy.io.savemat('./Equal_%d.mat'%(i), dict(equal_delay=equal_delay, equal_gain=equal_gain, orig_delay=orig_delay))\n",
"from EP_N_Env import *\nimport math\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport scipy.optimize\n\nfrom pdb import set_trace as bp\n\n\nTensor = torch.FloatTensor\n\ndef get_input_optimizer(input_array):\n # this line to show that input is a parameter that requires a gradient\n optimizer = optim.Adam([input_array.requires_grad_()], lr=5e-2)\n return optimizer\n\n\n\n\nclass Ave_D_Loss(nn.Module):\n def __init__(self, loads, loads_all_t, N_node_in, N_node_out, weights, Caps, delay_ps, Link_mtx):\n super(Ave_D_Loss, self).__init__()\n #self.net = net.eval()\n # loads should be 1 X N_node\n self.load_mtx = loads.repeat([N_node_in, 1])\n #self.loads_all = loads_all\n self.load_mtx.requires_grad = False\n self.loads_all_t = Tensor(loads_all_t)\n self.N_node_in = N_node_in\n self.N_node_out = N_node_out\n self.Caps = Tensor(Caps)\n self.delay_ps = Tensor(delay_ps)\n self.Link_mtx = Tensor(Link_mtx)\n self.target_loads = torch.sum(self.loads_all_t[0:self.N_node_in, self.N_node_in:self.N_node_in+self.N_node_out], 0)\n self.weights = Tensor(weights)\n\n\n\n def forward(self, in_x):\n # X source X dest\n # loads dest\n #torch.autograd.set_detect_anomaly(True)\n x_portion = in_x\n x_final = x_portion*self.load_mtx\n\n loads_all_t_2 = self.loads_all_t.clone()\n loads_all_t_2[0:self.N_node_in,self.N_node_in:self.N_node_in+self.N_node_out] = x_final\n #bp()\n link_ut = torch.mv(torch.t(self.Link_mtx), loads_all_t_2.flatten())\n #link_ut = link_ut.squeeze(1)\n uts = link_ut/self.Caps\n link_ut = self.Caps - link_ut\n link_ut = link_ut/self.Caps\n link_ut[link_ut<0] = -1\n delays = self.weights/link_ut\n delays[delays<0.0] = 1000.0\n delays[delays>1000.0] = 1000.0\n delays = delays + self.delay_ps\n #bp()\n\n #bp()\n\n delays = torch.squeeze(delays)\n #bp()\n #print(delays)\n delays = torch.mv(self.Link_mtx, delays)\n #delays = delays.squeeze(1)\n\n #row_sum = torch.sum(loads_all_t_2, 1)\n #row_sum[row_sum==0] = 1.0\n #loads_all_t_2[row_sum!=0, :] = loads_all_t_2[row_sum!=0, :]/row_sum[row_sum!=0].unsqueeze(1)\n #loads_all_t_2 = loads_all_t_2/row_sum.unsqueeze(1)\n #bp()\n\n #delays = delays*loads_all_t_2.flatten()\n #print(delays)\n\n\n\n delays = delays.view(12, 12)\n delays = delays[0:self.N_node_in, self.N_node_in:self.N_node_in+self.N_node_out]\n #X_mtx_2_sel = x_final\n #bp()\n delays = delays*x_final\n ave_delay = torch.sum(delays)/torch.sum(x_final)\n\n\n return x_final, ave_delay\n\nclass Ave_D_Loss_Batch(nn.Module):\n def __init__(self, loads, loads_all_t, N_node_in, N_node_out, weights, Caps, delay_ps, Link_mtx):\n super(Ave_D_Loss_Batch, self).__init__()\n #self.net = net.eval()\n # loads should be 1 X N_node\n self.load_mtx = loads.repeat([N_node_in, 1])\n #self.loads_all = loads_all\n self.load_mtx.requires_grad = False\n self.loads_all_t = Tensor(loads_all_t)\n self.N_node_in = N_node_in\n self.N_node_out = N_node_out\n self.Caps = Tensor(Caps)\n self.delay_ps = Tensor(delay_ps)\n self.Link_mtx = Tensor(Link_mtx)\n self.target_loads = torch.sum(self.loads_all_t[0:self.N_node_in, self.N_node_in:self.N_node_in+self.N_node_out], 0)\n self.weights = Tensor(weights)\n\n\n\n def forward(self, in_x):\n # X source X dest\n # loads dest\n #torch.autograd.set_detect_anomaly(True)\n x_portion = in_x\n x_final = x_portion*self.load_mtx\n\n batch_size = x_portion.size()[0]\n\n loads_all_t_2 = self.loads_all_t.clone().unsqueeze(0).repeat(batch_size, 1, 1)\n\n loads_all_t_2[:, 0:self.N_node_in,self.N_node_in:self.N_node_in+self.N_node_out] = x_final\n\n #bp()\n #bp()\n link_ut = torch.mm(loads_all_t_2.view(batch_size, -1), self.Link_mtx)\n #link_ut = link_ut.squeeze(1)\n #uts = link_ut/self.Caps\n #bp()\n link_ut = self.Caps.unsqueeze(0).repeat(batch_size, 1) - link_ut\n link_ut = link_ut/self.Caps\n link_ut[link_ut<0] = -1\n delays = self.weights.unsqueeze(0).repeat(batch_size, 1)/link_ut\n delays[delays<0.0] = 1000.0\n delays[delays>1000.0] = 1000.0\n delays = delays + self.delay_ps.unsqueeze(0).repeat(batch_size, 1)\n #bp()\n\n #bp()\n\n #delays = torch.squeeze(delays)\n #bp()\n #print(delays)\n delays = torch.mm(delays, torch.t(self.Link_mtx))\n #delays = delays.squeeze(1)\n\n #row_sum = torch.sum(loads_all_t_2, 2)\n #row_sum[row_sum==0] = 1.0\n #loads_all_t_2[row_sum!=0, :] = loads_all_t_2[row_sum!=0, :]/row_sum[row_sum!=0].unsqueeze(1)\n #loads_all_t_2 = loads_all_t_2/row_sum.unsqueeze(2)\n #bp()\n\n #delays = delays*loads_all_t_2.view(batch_size, -1)\n #print(delays)\n\n delays = delays.view(batch_size, 12, 12)\n delays = delays[:, 0:self.N_node_in, self.N_node_in:self.N_node_in+self.N_node_out]\n #bp()\n\n\n #delays = delays[0:self.N_node_in, self.N_node_in:self.N_node_in+self.N_node_out]\n #X_mtx_2_sel = x_final\n #bp()\n delays = delays*x_final\n ave_delay = torch.sum(torch.sum(delays, 2), 1)/torch.sum(torch.sum(x_final, 2), 1)\n\n\n # delays = delays*x_portion\n # ave_delay = torch.sum(torch.sum(delays, 2), 1)/self.N_node_out\n\n\n return x_final, ave_delay\n\nclass Actor(nn.Module):\n def __init__(self, input_size, hidden_size, N_in, N_out):\n super(Actor, self).__init__()\n self.linear1 = nn.Linear(input_size, hidden_size)\n self.linear2 = nn.Linear(hidden_size, hidden_size)\n self.linear3 = nn.Linear(hidden_size, N_in*N_out)\n self.N_in = N_in\n self.N_out = N_out\n \n def forward(self, s):\n\n x = F.relu(self.linear1(s))\n #bp()\n x = F.relu(self.linear2(x))\n x = self.linear3(x)\n #bp()\n\n x = x.view(-1, self.N_in, self.N_out)\n #bp()\n x = torch.nn.functional.softmax(x, dim=1)\n #bp()\n x = x.view(-1, self.N_in*self.N_out)\n\n return x\n\n\nclass Critic(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super().__init__()\n self.linear1 = nn.Linear(input_size, hidden_size)\n self.linear2 = nn.Linear(hidden_size, hidden_size)\n self.linear3 = nn.Linear(hidden_size, output_size)\n\n def forward(self, stt, act):\n #x = torch.cat([s, a], 1)\n #bp()\n #bs = a.size(0)\n #bp()\n\n x = stt*act\n x = F.relu(self.linear1(x))\n x = F.relu(self.linear2(x))\n x = self.linear3(x)\n\n return x\n\n\nclass Agent(object):\n def __init__(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n s_dim = self.env.observation_shape\n a_dim = self.env.action_shape\n self.N_in = self.env.N_node_in\n self.N_out = self.env.N_node_out\n self.N_init = 1000\n\n\n #self.actor = Actor(s_dim, 256, self.N_in, self.N_out)\n #self.actor_target = Actor(s_dim, 256, self.N_in, self.N_out)\n self.critic = Critic(a_dim, 256, 1)\n #bp()\n self.critic_target = Critic(a_dim, 256, 1)\n #self.actor_optim = optim.Adam(self.actor.parameters(), lr = self.actor_lr)\n self.critic_optim = optim.Adam(self.critic.parameters(), lr = self.critic_lr)\n self.buffer = []\n \n #self.actor_target.load_state_dict(self.actor.state_dict())\n self.critic_target.load_state_dict(self.critic.state_dict())\n\n A_eq = np.zeros((self.N_out, self.N_in*self.N_out))\n\n for i in range(self.N_out):\n for j in range(self.N_in):\n A_eq[i][i+j*self.N_out] = 1.0\n\n self.A_eq = A_eq\n self.b_eq = np.zeros(self.N_out)+1.0\n\n # if os.path.isfile('../Topology_Ab/Top_Ab_ECMP.mat'):\n # f_mat = scipy.io.loadmat('../Topology_Ab/Top_Ab_ECMP.mat')\n # self.Link_mtx = f_mat['Link_mtx']\n # self.Caps = f_mat['Caps'][0]/8e6\n # self.weights = f_mat['weights'][0]\n # self.delay_ps = f_mat['delay_ps'][0]\n # #bp()\n # else:\n # print('Config File Does Not Exist!!!')\n # self.Link_mtx_trans = self.Link_mtx.transpose()\n self.Link_mtx = self.env.Link_mtx \n self.Caps = self.env.Caps\n self.weights = self.env.weights\n self.delay_ps = self.env.delay_ps\n self.Link_mtx_trans = self.env.Link_mtx_trans\n\n self.x_bounds = []\n\n for i in range(a_dim):\n self.x_bounds.append((0.0,1.0))\n\n \n def act(self, s0, all_mtx):\n s0 = torch.tensor(s0, dtype=torch.float).unsqueeze(0)\n load_temp = Variable(Tensor(s0), requires_grad=False)\n #a0 = self.actor(s0).squeeze(0).detach().numpy()\n x_init_np = np.random.uniform(0.0, 1.0, (self.N_init, self.N_in, self.N_out))\n #bp()\n\n \n for i in range(self.N_init):\n for j in range(self.N_out):\n x_init_np[i,:,j] = x_init_np[i,:,j]/np.sum(x_init_np[i,:,j])\n #bp()\n\n #bp()\n\n\n x_init = Tensor(x_init_np)\n #bp()\n #x_init = torch.nn.functional.softmax(x_init, dim=1)\n #x_init_final = load_temp.unsqueeze(0).repeat(self.N_init, 1, 1)*x_init\n #d_temp_a = -1*self.critic(load_temp.unsqueeze(0).repeat(self.N_init, self.N_in, 1).view(self.N_init, -1), x_init.view(self.N_init, -1))\n D_loss_i = Ave_D_Loss(load_temp, all_mtx, self.N_in, self.N_out, self.weights, self.Caps, self.delay_ps, self.Link_mtx)\n\n #D_loss_i_debug = Ave_D_Loss_debug(load_temp, all_mtx, self.N_in, self.N_out, self.weights, self.Caps, self.delay_ps, self.Link_mtx)\n\n D_loss_i_batch = Ave_D_Loss_Batch(load_temp, all_mtx, self.N_in, self.N_out, self.weights, self.Caps, self.delay_ps, self.Link_mtx)\n __, d_batch_a = D_loss_i_batch(x_init)\n init_n_min = torch.argmin(d_batch_a, dim=0)\n x_chosen = x_init[init_n_min]\n #bp()\n x_chosen = x_chosen.squeeze(0)\n\n\n x = Variable(x_chosen, requires_grad = True)\n #bp()\n #print(torch.sum(x, 0))\n optimizer = get_input_optimizer(x)\n\n #load_temp = np.expand_dims(s0, axis=0)\n #bp()\n #bp()\n \n #bp()\n \n\n opt_step = 0\n x_grad_prev = None\n d_prev = None\n\n while opt_step < 100:\n \n\n \n optimizer.zero_grad()\n x_temp, d_temp = D_loss_i(x)\n #bp()\n\n if d_prev is not None:\n print(d_prev - d_temp)\n\n d_prev = d_temp\n\n d_temp.backward()\n x_grad = x.grad\n\n if x_grad_prev is not None and (torch.sum(torch.abs(x_grad - x_grad_prev))) <1e-5:\n bp()\n #bp()\n #if x_grad is None:\n #bp()\n x_grad_flat = x_grad.flatten().detach().numpy()\n\n res = scipy.optimize.linprog(x_grad_flat, A_eq=self.A_eq, b_eq=self.b_eq, bounds=self.x_bounds)\n\n if res.success:\n\n s_from_grad = res.x\n else:\n print('Linear Optimization Error')\n\n #bp()\n\n dt = s_from_grad - x.flatten().detach().numpy()\n #bp()\n\n gt = -1*np.sum(x_grad_flat*dt)\n\n if gt<0:\n print('GT error!!!!!!! %e'%(gt))\n #bp()\n\n if gt < 1e-9:\n print('Optimization stopped at step %d'%(opt_step))\n break\n\n step_size = 2/(2+opt_step)\n\n dt = Tensor(dt).view(self.N_in, self.N_out)\n\n lin_val = torch.linspace(0.0, 1.0, steps=1000).unsqueeze(1).unsqueeze(2)\n\n x_lin = x.data.unsqueeze(0) + lin_val*dt.unsqueeze(0)\n\n #bp()\n\n #__, d_batch = D_loss_i_batch(x_lin)\n # for i in range(100):\n\n # __, d_batch[i] = D_loss_i(x.data + lin_val[i]*dt)\n\n #step_idx = torch.argmin(d_batch)\n\n\n\n\n x2 = x.data + step_size*dt\n x = Variable(x2, requires_grad = True)\n\n\n\n\n opt_step = opt_step + 1\n x_grad_prev = x_grad\n\n\n\n\n x_temp, d_temp = D_loss_i(x)\n\n\n # test_case = Tensor([[0,0,0,0,0,0,0,0,0,0,0,0], \n # [0,0,0,0,0,0,0,0,0,0,0,0],\n # [1,1,1,0,0,1,0,0,1,0,0,1],\n # [0,0,0,1,0,0,0,1,0,1,1,0],\n # [0,0,0,0,1,0,1,0,0,0,0,0],\n # [0,0,0,0,0,0,0,0,0,0,0,0]])\n\n # print('orig')\n # tx, ty = D_loss_i_debug(x)\n # print('CO')\n\n # tx, ty = D_loss_i_debug(test_case)\n\n # test_case[2,0]=0.0\n\n # test_case[0,0]= 1\n\n\n # tx, ty = D_loss_i_debug(test_case)\n\n\n\n\n print('Latency est:%e'%(d_temp.detach().numpy()))\n\n # bp()\n \n x2 = x.detach().numpy()\n\n\n return x2\n \n def put(self, *transition): \n if len(self.buffer)== self.capacity:\n self.buffer.pop(0)\n self.buffer.append(transition)\n\n def clear(self):\n self.buffer.clear()\n \n def learn(self):\n if len(self.buffer) < self.batch_size:\n return \n \n samples = random.sample(self.buffer, self.batch_size)\n \n s0, a0, r1, s1 = zip(*samples)\n\n #bp()\n \n s0 = torch.tensor(s0, dtype=torch.float)\n s0 = s0.unsqueeze(1)\n s0 = s0.repeat(1, self.N_in, 1)\n s0 = s0.view(self.batch_size, -1)\n a0 = torch.tensor(a0, dtype=torch.float).view(self.batch_size,-1)\n #bp()\n r1 = torch.tensor(r1, dtype=torch.float).view(self.batch_size,-1)\n s1 = torch.tensor(s1, dtype=torch.float)\n #bp()\n \n def critic_learn():\n #a1 = self.actor_target(s1).detach()\n #y_true = r1 + self.gamma * self.critic_target(s1, a1).detach()\n \n y_pred = self.critic(s0, a0)\n \n loss_fn = nn.MSELoss()\n loss = loss_fn(y_pred, r1)\n self.critic_optim.zero_grad()\n loss.backward()\n self.critic_optim.step()\n \n def soft_update(net_target, net, tau):\n for target_param, param in zip(net_target.parameters(), net.parameters()):\n target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)\n \ndef Run_Simulation(rand_seed_n):\n \n \n\n rep_time = 1\n env = EP_Env(4, 8, rep_time, rand_seed_n*10)\n\n\n txt_file = './D_FW_record_%d.txt'%(rand_seed_n)\n with open(txt_file, 'w') as filep:\n filep.write('Sample equal_delay orig_delay fw_delay gain\\n')\n\n FW_delays = np.zeros(40000)\n FW_actions = np.zeros((40000, 4, 8))\n\n params = {\n 'env': env,\n 'gamma': 0.99, \n 'actor_lr': 0.001, \n 'critic_lr': 0.001,\n 'tau': 0.02,\n 'capacity': 1000, \n 'batch_size': 32,\n }\n\n agent = Agent(**params)\n\n for episode in range(1):\n s0, all_mtx = env.initial_state()\n #s_max = np.max(s0)\n #s0= s0/s_max\n \n \n for step in range(40000*rep_time):\n \n\n a0 = agent.act(s0, all_mtx)\n #bp()\n d_o, d_e, d_r, s1, all_mtx = env.env_step(np.reshape(a0, (4, 8)))\n #s1 = s1/s_max\n r1 = -1*d_r\n #print(a0)\n \n s0 = s1\n if step % rep_time ==0:\n #bp()\n print('step:%d, eq_delay:%e, orig_delay:%e, nn_delay:%e, gain:%e'%(step, d_e, d_o, d_r, (d_o-d_r)/d_o))\n record_file = open(txt_file, 'a')\n record_file.write('%d %e %e %e %e\\n'%(step, d_e, d_o, d_r, (d_o-d_r)/d_o))\n record_file.close()\n FW_delays[step] = d_r\n FW_actions[step] = np.reshape(a0, (4, 8))\n # if step % 1000 ==0 and step>100:\n # env.Change_weights(step)\n # agent.Link_mtx = env.Link_mtx\n # agent.Link_mtx_trans = agent.Link_mtx.transpose()\n \n\n scipy.io.savemat('./Direct_FW_%d.mat'%(rand_seed_n), dict(FW_delays=FW_delays, FW_actions=FW_actions))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('')\n parser.add_argument('--seed_n', type=int)\n\n args = parser.parse_args()\n Run_Simulation(args.seed_n)\n"
] | [
[
"numpy.zeros"
],
[
"torch.abs",
"torch.nn.functional.softmax",
"torch.mv",
"numpy.sum",
"torch.linspace",
"numpy.reshape",
"torch.sum",
"torch.argmin",
"torch.tensor",
"torch.nn.Linear",
"numpy.random.uniform",
"torch.t",
"numpy.zeros",
"torch.nn.MSELoss",
"torch.squeeze"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
noahjgreen295/feature_engine | [
"77248ade812f03bb1b9c0c129320f0c0baad61ed",
"77248ade812f03bb1b9c0c129320f0c0baad61ed",
"77248ade812f03bb1b9c0c129320f0c0baad61ed",
"77248ade812f03bb1b9c0c129320f0c0baad61ed"
] | [
"tests/test_selection/test_recursive_feature_selectors.py",
"feature_engine/datetime/_datetime_constants.py",
"feature_engine/outliers/winsorizer.py",
"tests/test_transformation/test_boxcox_transformer.py"
] | [
"import numpy as np\nimport pytest\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import Lasso, LogisticRegression\nfrom sklearn.model_selection import KFold, StratifiedKFold\nfrom sklearn.tree import DecisionTreeRegressor\n\nfrom feature_engine.selection import (\n RecursiveFeatureAddition,\n RecursiveFeatureElimination,\n)\n\n_selectors = [\n RecursiveFeatureElimination,\n RecursiveFeatureAddition,\n]\n\n_input_params = [\n (RandomForestClassifier(), \"roc_auc\", 3, 0.1, None),\n (Lasso(), \"neg_mean_squared_error\", KFold(), 0.01, [\"var_a\", \"var_b\"]),\n (DecisionTreeRegressor(), \"r2\", StratifiedKFold(), 0.5, [\"var_a\"]),\n (RandomForestClassifier(), \"accuracy\", 5, 0.002, \"var_a\"),\n]\n\n\[email protected](\"_selector\", _selectors)\[email protected](\n \"_estimator, _scoring, _cv, _threshold, _variables\", _input_params\n)\ndef test_input_params_assignment(\n _selector, _estimator, _scoring, _cv, _threshold, _variables\n):\n sel = _selector(\n estimator=_estimator,\n scoring=_scoring,\n cv=_cv,\n threshold=_threshold,\n variables=_variables,\n )\n\n assert sel.estimator == _estimator\n assert sel.scoring == _scoring\n assert sel.cv == _cv\n assert sel.threshold == _threshold\n assert sel.variables == _variables\n\n\n_thresholds = [None, [0.1], \"a_string\"]\n\n\[email protected](\"_selector\", _selectors)\[email protected](\"_thresholds\", _thresholds)\ndef test_raises_threshold_error(_selector, _thresholds):\n with pytest.raises(ValueError):\n _selector(RandomForestClassifier(), threshold=_thresholds)\n\n\n_estimators_and_results = [\n (\n RandomForestClassifier(random_state=1),\n Lasso(alpha=0.01, random_state=1),\n 0.9971,\n 0.8489,\n ),\n (\n LogisticRegression(random_state=1),\n DecisionTreeRegressor(random_state=1),\n 0.9966,\n 0.9399,\n ),\n]\n\n\[email protected](\"_selector\", _selectors)\[email protected](\"_classifier, _regressor, _roc, _r2\", _estimators_and_results)\ndef test_fit_initial_model_performance(\n _selector, _classifier, _regressor, _roc, _r2, df_test\n):\n X, y = df_test\n\n sel = _selector(_classifier).fit(X, y)\n\n assert np.round(sel.initial_model_performance_, 4) == _roc\n\n sel = _selector(\n _regressor,\n scoring=\"r2\",\n ).fit(X, y)\n\n assert np.round(sel.initial_model_performance_, 4) == _r2\n\n\n_estimators_importance = [\n (\n RandomForestClassifier(random_state=1),\n [\n 0.0238,\n 0.0042,\n 0.0022,\n 0.0021,\n 0.2583,\n 0.0034,\n 0.2012,\n 0.38,\n 0.0145,\n 0.1044,\n 0.0035,\n 0.0024,\n ],\n ),\n (\n LogisticRegression(random_state=1),\n [\n 1.4106,\n 0.1924,\n 0.0876,\n 0.066,\n 0.5421,\n 0.0825,\n 0.5658,\n 2.1938,\n 1.5259,\n 0.1173,\n 0.1673,\n 0.1792,\n ],\n ),\n (\n Lasso(alpha=0.01, random_state=1),\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2126, 0.0557, 0.0, 0.0, 0.0],\n ),\n (\n DecisionTreeRegressor(random_state=1),\n [\n 0.0016,\n 0.0,\n 0.002,\n 0.002,\n 0.0013,\n 0.001,\n 0.0026,\n 0.976,\n 0.0106,\n 0.0,\n 0.0006,\n 0.0022,\n ],\n ),\n]\n\n\[email protected](\"_estimator, _importance\", _estimators_importance)\ndef test_feature_importances(_estimator, _importance, df_test):\n X, y = df_test\n\n sel = RecursiveFeatureAddition(_estimator).fit(X, y)\n _importance.sort(reverse=True)\n assert list(np.round(sel.feature_importances_.values, 4)) == _importance\n\n sel = RecursiveFeatureElimination(_estimator).fit(X, y)\n _importance.sort(reverse=False)\n assert list(np.round(sel.feature_importances_.values, 4)) == _importance\n",
"import numpy as np\n\nFEATURES_SUPPORTED = [\n \"month\",\n \"quarter\",\n \"semester\",\n \"year\",\n \"week\",\n \"day_of_week\",\n \"day_of_month\",\n \"day_of_year\",\n \"weekend\",\n \"month_start\",\n \"month_end\",\n \"quarter_start\",\n \"quarter_end\",\n \"year_start\",\n \"year_end\",\n \"leap_year\",\n \"days_in_month\",\n \"hour\",\n \"minute\",\n \"second\",\n]\n\nFEATURES_DEFAULT = [\n \"month\",\n \"year\",\n \"day_of_week\",\n \"day_of_month\",\n \"hour\",\n \"minute\",\n \"second\",\n]\n\nFEATURES_SUFFIXES = {\n \"month\": \"_month\",\n \"quarter\": \"_quarter\",\n \"semester\": \"_semester\",\n \"year\": \"_year\",\n \"week\": \"_week\",\n \"day_of_week\": \"_day_of_week\",\n \"day_of_month\": \"_day_of_month\",\n \"day_of_year\": \"_day_of_year\",\n \"weekend\": \"_weekend\",\n \"month_start\": \"_month_start\",\n \"month_end\": \"_month_end\",\n \"quarter_start\": \"_quarter_start\",\n \"quarter_end\": \"_quarter_end\",\n \"year_start\": \"_year_start\",\n \"year_end\": \"_year_end\",\n \"leap_year\": \"_leap_year\",\n \"days_in_month\": \"_days_in_month\",\n \"hour\": \"_hour\",\n \"minute\": \"_minute\",\n \"second\": \"_second\",\n}\n\nFEATURES_FUNCTIONS = {\n \"month\": lambda x: x.dt.month,\n \"quarter\": lambda x: x.dt.quarter,\n \"semester\": lambda x: np.where(x.dt.month <= 6, 1, 2).astype(np.int64),\n \"year\": lambda x: x.dt.year,\n \"week\": lambda x: x.dt.isocalendar().week.astype(np.int64),\n \"day_of_week\": lambda x: x.dt.dayofweek,\n \"day_of_month\": lambda x: x.dt.day,\n \"day_of_year\": lambda x: x.dt.dayofyear,\n \"weekend\": lambda x: np.where(x.dt.dayofweek <= 4, 0, 1).astype(np.int64),\n \"month_start\": lambda x: x.dt.is_month_start.astype(np.int64),\n \"month_end\": lambda x: x.dt.is_month_end.astype(np.int64),\n \"quarter_start\": lambda x: x.dt.is_quarter_start.astype(np.int64),\n \"quarter_end\": lambda x: x.dt.is_quarter_end.astype(np.int64),\n \"year_start\": lambda x: x.dt.is_year_start.astype(np.int64),\n \"year_end\": lambda x: x.dt.is_year_end.astype(np.int64),\n \"leap_year\": lambda x: x.dt.is_leap_year.astype(np.int64),\n \"days_in_month\": lambda x: x.dt.days_in_month.astype(np.int64),\n \"hour\": lambda x: x.dt.hour,\n \"minute\": lambda x: x.dt.minute,\n \"second\": lambda x: x.dt.second,\n}\n",
"# Authors: Soledad Galli <[email protected]>\n# License: BSD 3 clause\n\nfrom typing import List, Union\n\nimport numpy as np\nimport pandas as pd\n\nfrom feature_engine.dataframe_checks import check_X\nfrom feature_engine._docstrings.methods import _fit_transform_docstring\nfrom feature_engine._docstrings.fit_attributes import (\n _variables_attribute_docstring,\n _feature_names_in_docstring,\n _n_features_in_docstring,\n)\nfrom feature_engine._docstrings.class_inputs import (\n _variables_numerical_docstring,\n _missing_values_docstring,\n)\nfrom feature_engine._docstrings.substitute import Substitution\nfrom feature_engine.outliers.base_outlier import WinsorizerBase\n\n\n@Substitution(\n intro_docstring=WinsorizerBase._intro_docstring,\n capping_method=WinsorizerBase._capping_method_docstring,\n tail=WinsorizerBase._tail_docstring,\n fold=WinsorizerBase._fold_docstring,\n variables=_variables_numerical_docstring,\n missing_values=_missing_values_docstring,\n right_tail_caps_=WinsorizerBase._right_tail_caps_docstring,\n left_tail_caps_=WinsorizerBase._left_tail_caps_docstring,\n variables_=_variables_attribute_docstring,\n feature_names_in_=_feature_names_in_docstring,\n n_features_in_=_n_features_in_docstring,\n fit_transform=_fit_transform_docstring,\n)\nclass Winsorizer(WinsorizerBase):\n \"\"\"\n The Winsorizer() caps maximum and/or minimum values of a variable at automatically\n determined values, and optionally adds indicators.\n\n {intro_docstring}\n\n The Winsorizer() works only with numerical variables. A list of variables can\n be indicated. Alternatively, the Winsorizer() will select and cap all numerical\n variables in the train set.\n\n The transformer first finds the values at one or both tails of the distributions\n (fit). The transformer then caps the variables (transform).\n\n More details in the :ref:`User Guide <winsorizer>`.\n\n Parameters\n ----------\n {capping_method}\n\n {tail}\n\n {fold}\n\n add_indicators: bool, default=False\n Whether to add indicator variables to flag the capped outliers.\n If 'True', binary variables will be added to flag outliers on the left and right\n tails of the distribution. One binary variable per tail, per variable.\n\n {variables}\n\n {missing_values}\n\n Attributes\n ----------\n {right_tail_caps_}\n\n {left_tail_caps_}\n\n {variables_}\n\n {feature_names_in_}\n\n {n_features_in_}\n\n Methods\n -------\n fit:\n Learn the values that will replace the outliers.\n\n {fit_transform}\n\n transform:\n Cap the variables.\n\n \"\"\"\n\n def __init__(\n self,\n capping_method: str = \"gaussian\",\n tail: str = \"right\",\n fold: Union[int, float] = 3,\n add_indicators: bool = False,\n variables: Union[None, int, str, List[Union[str, int]]] = None,\n missing_values: str = \"raise\",\n ) -> None:\n if not isinstance(add_indicators, bool):\n raise ValueError(\n \"add_indicators takes only booleans True and False\"\n f\"Got {add_indicators} instead.\"\n )\n super().__init__(capping_method, tail, fold, variables, missing_values)\n self.add_indicators = add_indicators\n\n def transform(self, X: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Cap the variable values. Optionally, add outlier indicators.\n\n Parameters\n ----------\n X: pandas dataframe of shape = [n_samples, n_features]\n The data to be transformed.\n\n Returns\n -------\n X_new: pandas dataframe of shape = [n_samples, n_features + n_ind]\n The dataframe with the capped variables and indicators.\n The number of output variables depends on the values for 'tail' and\n 'add_indicators': if passing 'add_indicators=False', will be equal\n to 'n_features', otherwise, will have an additional indicator column\n per processed feature for each tail.\n \"\"\"\n if not self.add_indicators:\n X_out = super().transform(X)\n\n else:\n X_orig = check_X(X)\n X_out = super().transform(X_orig)\n X_orig = X_orig[self.variables_]\n X_out_filtered = X_out[self.variables_]\n\n if self.tail in [\"left\", \"both\"]:\n X_left = X_out_filtered > X_orig\n X_left.columns = [str(cl) + \"_left\" for cl in self.variables_]\n if self.tail in [\"right\", \"both\"]:\n X_right = X_out_filtered < X_orig\n X_right.columns = [str(cl) + \"_right\" for cl in self.variables_]\n if self.tail == \"left\":\n X_out = pd.concat([X_out, X_left.astype(np.float64)], axis=1)\n elif self.tail == \"right\":\n X_out = pd.concat([X_out, X_right.astype(np.float64)], axis=1)\n else:\n X_both = pd.concat([X_left, X_right], axis=1).astype(np.float64)\n X_both = X_both[\n [\n cl1\n for cl2 in zip(X_left.columns.values, X_right.columns.values)\n for cl1 in cl2\n ]\n ]\n X_out = pd.concat([X_out, X_both], axis=1)\n\n return X_out\n\n def get_feature_names_out(self) -> List:\n \"\"\"Get output feature names for transformation.\n\n Returns\n -------\n feature_names_out: list\n The feature names.\n \"\"\"\n feature_names = super().get_feature_names_out()\n\n if self.add_indicators is True:\n if self.tail == \"left\":\n indicators = [str(cl) + \"_left\" for cl in self.variables_]\n elif self.tail == \"right\":\n indicators = [str(cl) + \"_right\" for cl in self.variables_]\n else:\n indicators = []\n for cl in self.variables_:\n indicators.append(str(cl) + \"_left\")\n indicators.append(str(cl) + \"_right\")\n\n feature_names = feature_names + indicators\n\n return feature_names\n",
"import pandas as pd\nimport pytest\nfrom sklearn.exceptions import NotFittedError\n\nfrom feature_engine.transformation import BoxCoxTransformer\n\n\ndef test_automatically_finds_variables(df_vartypes):\n # test case 1: automatically select variables\n transformer = BoxCoxTransformer(variables=None)\n X = transformer.fit_transform(df_vartypes)\n\n # expected output\n transf_df = df_vartypes.copy()\n transf_df[\"Age\"] = [9.78731, 10.1666, 9.40189, 9.0099]\n transf_df[\"Marks\"] = [-0.101687, -0.207092, -0.316843, -0.431788]\n\n # test init params\n assert transformer.variables is None\n # test fit attr\n assert transformer.variables_ == [\"Age\", \"Marks\"]\n assert transformer.n_features_in_ == 5\n # test transform output\n pd.testing.assert_frame_equal(X, transf_df)\n\n\ndef test_fit_raises_error_if_df_contains_na(df_na):\n # test case 2: when dataset contains na, fit method\n with pytest.raises(ValueError):\n transformer = BoxCoxTransformer()\n transformer.fit(df_na)\n\n\ndef test_transform_raises_error_if_df_contains_na(df_vartypes, df_na):\n # test case 3: when dataset contains na, transform method\n with pytest.raises(ValueError):\n transformer = BoxCoxTransformer()\n transformer.fit(df_vartypes)\n transformer.transform(df_na[[\"Name\", \"City\", \"Age\", \"Marks\", \"dob\"]])\n\n\ndef test_error_if_df_contains_negative_values(df_vartypes):\n # test error when data contains negative values\n df_neg = df_vartypes.copy()\n df_neg.loc[1, \"Age\"] = -1\n\n # test case 4: when variable contains negative value, fit\n with pytest.raises(ValueError):\n transformer = BoxCoxTransformer()\n transformer.fit(df_neg)\n\n # test case 5: when variable contains negative value, transform\n with pytest.raises(ValueError):\n transformer = BoxCoxTransformer()\n transformer.fit(df_vartypes)\n transformer.transform(df_neg)\n\n\ndef test_non_fitted_error(df_vartypes):\n with pytest.raises(NotFittedError):\n transformer = BoxCoxTransformer()\n transformer.transform(df_vartypes)\n"
] | [
[
"sklearn.tree.DecisionTreeRegressor",
"sklearn.linear_model.LogisticRegression",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.model_selection.StratifiedKFold",
"sklearn.model_selection.KFold",
"sklearn.linear_model.Lasso",
"numpy.round"
],
[
"numpy.where"
],
[
"pandas.concat"
],
[
"pandas.testing.assert_frame_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
skelton-group/Phono3py-Power-Tools | [
"9534e9670290cae6a0cf865fb0303a32acac63aa"
] | [
"Phono3pyPowerTools/Plotting.py"
] | [
"# Phono3pyPowerTools/Plotting.py\n\n\n# ----------------\n# Module Docstring\n# ----------------\n\n\"\"\" Routines for working with Matplotlib. \"\"\"\n\n\n# -------\n# Imports\n# -------\n\nimport math\n\nimport numpy as np\nimport matplotlib as mpl\n\nfrom matplotlib.ticker import FuncFormatter\n\n\n# -------------------\n# Default Initialiser\n# -------------------\n\ndef InitialiseMatplotlib():\n \"\"\" Set some basic Matplotlib rc parameters for publication-ready plotting. \"\"\"\n\n font_size = 8\n line_width = 0.5\n\n # Fix for matplotlb.font_manager defaulting to bold variant of Times on some systems -- adapted from https://github.com/matplotlib/matplotlib/issues/5574.\n\n try:\n del mpl.font_manager.weight_dict['roman']\n mpl.font_manager._rebuild()\n except KeyError:\n pass\n\n # Fonts.\n\n mpl.rc('font', **{ 'family' : 'serif', 'size' : font_size, 'serif' : 'Times New Roman' })\n\n mpl.rc('mathtext', **{ 'fontset' : 'custom', 'rm' : 'Times New Roman', 'it' : 'Times New Roman:italic', 'bf' : 'Times New Roman:bold' })\n\n # Axes, lines and patches.\n\n mpl.rc('axes' , **{ 'linewidth' : line_width, 'labelsize' : font_size })\n mpl.rc('lines', **{ 'linewidth' : line_width, 'markeredgewidth' : line_width })\n mpl.rc('patch', **{ 'linewidth' : line_width })\n\n # Tick parameters.\n\n tick_params = {\n 'major.width' : line_width,\n 'minor.width' : line_width,\n 'direction' : 'in'\n }\n\n mpl.rc('xtick', **tick_params )\n mpl.rc('ytick', **tick_params )\n\n mpl.rc('xtick', **{ 'top' : True })\n mpl.rc('ytick', **{ 'right' : True })\n\n\n# -----------------\n# Utility Functions\n# -----------------\n\ndef HSBColourToRGB(h, s, b):\n \"\"\" Convert a colour specified in the HSB colour system to RGB. \"\"\"\n\n h %= 360.0\n\n temp_c = s * b\n temp_min = b - temp_c\n\n temp_h_prime = h / 60.0\n temp_x = temp_c * (1.0 - math.fabs((temp_h_prime % 2.0) - 1.0))\n\n r, g, b = 0.0, 0.0, 0.0\n\n if temp_h_prime < 1.0:\n r = temp_c\n g = temp_x\n b = 0\n elif temp_h_prime < 2.0:\n r = temp_x\n g = temp_c\n b = 0\n elif temp_h_prime < 3.0:\n r = 0\n g = temp_c\n b = temp_x\n elif temp_h_prime < 4.0:\n r = 0\n g = temp_x\n b = temp_c\n elif temp_h_prime < 5.0:\n r = temp_x\n g = 0\n b = temp_c\n else:\n r = temp_c\n g = 0\n b = temp_x\n\n return (r + temp_min, g + temp_min, b + temp_min)\n\ndef GetDefaultAxisLimits(axis_min, axis_max, data, log_scale = False):\n \"\"\"\n If axis_min and/or axis_max are None, set to default values based on the range of values in data.\n\n Keyword args:\n log_scale -- if True, set defaults for plotting on a log scale (default: False)\n \"\"\"\n\n # Only do something if either or both of axis_min and axis_max are not already set.\n\n if axis_min is None or axis_max is None:\n if log_scale:\n # To get a sensible axis range on a logarithmic scale, it is useful to exclude (spurious) small values.\n # A reasonably effective ad hoc way to do this is to select the largest 99 % of the values and round the minimum/maximum values down/up to the nearest powers of 10.\n\n sorted_data = np.sort(data)\n sorted_data = sorted_data[len(sorted_data) // 100:]\n\n if axis_min is None:\n axis_min = math.pow(10, math.floor(math.log10(np.min(sorted_data))))\n\n if axis_max is None:\n axis_max = math.pow(10, math.ceil(math.log10(np.max(sorted_data))))\n\n else:\n # A sensible axis range on a linear scale can be obtained by rounding the minimum/maximum values down/up to \"order of magnitude\" values.\n\n div = math.pow(10, math.floor(math.log10(np.max(data))))\n\n if axis_min is None:\n axis_min = div * math.floor(np.min(data) / div)\n\n if axis_max is None:\n axis_max = div * math.ceil(np.max(data) / div)\n\n # Return (possibly updated) min/max values.\n\n return (axis_min, axis_max)\n\n\n# --------------------\n# Formatting Functions\n# --------------------\n\ndef FormatToMinDP(val, max_dp):\n \"\"\" Format val as a string with the minimum number of required decimal places, up to a maximum of max_dp. \"\"\"\n\n num_dp = 0\n\n while True:\n pow = -1 * num_dp\n\n if val % (10 ** pow) == 0.0 or num_dp == max_dp:\n break\n\n num_dp += 1\n\n return \"{{0:.{0}f}}\".format(num_dp).format(val)\n\ndef FormatToStandardForm(val):\n \"\"\" Format val as a string in standard form. \"\"\"\n\n power = math.floor(\n math.log10(val)\n )\n\n val = val / (10 ** power)\n\n if val == 1.0:\n return r\"10$^{{{0:.0f}}}$\".format(power)\n else:\n return r\"{0:.1f} $\\times$ 10$^{{{1:.0f}}}$\".format(val, power)\n\ndef GetFixedPointFormatter(num_dp):\n \"\"\" Return a FuncFormatter object to display float values to num_dp decimal places. \"\"\"\n\n format_str = r\"{{0:.{0}f}}\".format(num_dp)\n\n return FuncFormatter(\n lambda val, pos : format_str.format(val)\n )\n\ndef GetLogFormatter():\n \"\"\" Return a FuncFormatter object to display integer values x as 10^x \"\"\"\n\n return FuncFormatter(\n lambda val, pos : r\"10$^{{{0:.0f}}}$\".format(int(val))\n )\n"
] | [
[
"matplotlib.font_manager._rebuild",
"numpy.min",
"numpy.sort",
"numpy.max",
"matplotlib.rc"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
daidaifan/deep-learning-101 | [
"0d018b42a67eb7ad83fa0683c653e3d4ed36b2ae"
] | [
"deep_learning_101/component/layer.py"
] | [
"import numpy as np\nfrom deep_learning_101.component.utils import softmax, cross_entropy_error\n\nclass ReLU:\n def __init__(self):\n self.mask = None\n\n def forward(self, x):\n self.mask = (x <= 0)\n out = x.copy()\n out[self.mask] = 0\n return out\n\n def backward(self, dout):\n dout[self.mask] = 0\n return dout\n\n\nclass Sigmoid:\n def __init__(self):\n self.out = None\n\n def forward(self, x):\n self.out = 1 / (1 + np.exp(-x))\n return self.out\n\n def backward(self, dout):\n return dout * (1. - self.out) * self.out\n\n\nclass Affine:\n def __init__(self, W, b):\n self.W = W\n self.b = b\n self.x = None\n self.original_x_shape = None\n self.dW = None\n self.db = None\n\n def forward(self, x):\n self.x = x\n self.original_x_shape = self.x.shape\n out = np.dot(self.x, self.W) + self.b\n return out\n\n def backward(self, dout):\n dx = np.dot(dout, self.W.T)\n self.dW = np.dot(self.x.T, dout)\n self.db = np.sum(dout, axis=0)\n dx = dx.reshape(*self.original_x_shape)\n return dx\n\n\nclass SoftmaxWithLoss:\n def __init__(self):\n self.loss = None\n self.y = None\n self.t = None\n\n def forward(self, x, y):\n self.y = y\n self.p = softmax(x)\n self.loss = cross_entropy_error(self.y, self.p)\n return self.loss\n\n def backward(self, dout):\n batch_size = self.y.shape[0]\n dx = (self.p - self.y) / batch_size\n return dx\n"
] | [
[
"numpy.dot",
"numpy.exp",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Gjacquenot/Pythagoras_tree | [
"cfe2f53c8ea54ad6d6496184f5659a4a496056e5"
] | [
"Pythagoras_tree.py"
] | [
"\"\"\"\nThis Python3 script creates a Pythagoras tree in SVG or PNG format\n\nRequires numpy and matplotlib to create pngs.\n\nGet started with : python Pythagoras_tree.py --help\n\"\"\"\nimport argparse\nfrom math import atan2, ceil, pi, sqrt\nfrom typing import List\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import cm, patches\n\n\ndef pythagoras_tree(ratio: float = 1.0, nb_levels: int = 12):\n \"\"\"\n Compute Pythagoras_tree\n The Pythagoras Tree is a plane fractal constructed from squares.\n It is named after Pythagoras because each triple of touching squares\n encloses a right triangle, in a configuration traditionally used to\n depict the Pythagorean theorem.\n http://en.wikipedia.org/wiki/Pythagoras_tree\n\n All these arguments are optional: the function can run with\n argument.\n Output :\n - Matrix M: Pythagoras tree is stored in a matrix M.\n This matrix has 5 columns.\n Each row corresponds to the coordinate of each square of the tree\n The two first columns give the bottom-left position of each\n square. The third column corresponds to the orientation angle of\n each square. The fourth column gives the size of each square. The\n fifth column specifies the level of recursion of each square.\n The first row corresponds to the root of the tree. It is always\n the same\n M[0,:] = [0 -1 0 1 1];\n The leaf located at row i will give 2 leaves located at 2*i and\n 2*i+1.\n \"\"\"\n # pylint: disable=too-many-locals\n # Check inputs\n if ratio <= 0:\n raise Exception(\"Length of ratio has to be greater than zero\")\n if int(nb_levels) != float(nb_levels):\n raise Exception(\"The number of level has to be integer\")\n\n # Compute constants\n c_d = sqrt(1.0 + ratio ** 2)\n # Normalized length 1\n c_1 = 1.0 / c_d\n # Normalized length 2\n c_2 = ratio / c_d\n # Translation pattern\n tr_pat = np.array(\n [[0.0, 1.0 / (1.0 + ratio ** 2)], [1.0, 1.0 + ratio / (1.0 + ratio ** 2)]]\n )\n # Defines the first rotation angle\n alpha1 = atan2(ratio, 1.0)\n # Defines the second rotation angle\n alpha2 = alpha1 - pi / 2.0\n # Number of elements (square)\n nb_elements = 2 ** (nb_levels + 1) - 1\n # Matrice containing the tree\n pt_arrray = np.zeros((nb_elements, 5))\n # Initialization of the tree\n pt_arrray[0, :] = [0.0, -1.0, 0.0, 1.0, 1.0]\n\n # Compute the level of each square contained in the resulting matrix\n offset = 0\n for i in range(nb_levels + 1):\n tmp = 2 ** i\n pt_arrray[offset : offset + tmp, 4] = i\n offset += tmp\n\n def mat_rot(angle_rad: float) -> np.ndarray:\n c_a = np.cos(angle_rad)\n s_a = np.sin(angle_rad)\n return np.array([[c_a, -s_a], [s_a, c_a]])\n\n # Compute the position and size of each square wrt its parent\n for i in range(1, nb_elements, 2):\n j = (i + 1) // 2 - 1\n t_m = pt_arrray[j, 3] * mat_rot(pt_arrray[j, 2]) @ tr_pat\n t_x = t_m[0, :] + pt_arrray[j, 0]\n t_y = t_m[1, :] + pt_arrray[j, 1]\n theta1 = (pt_arrray[j, 2] + alpha1) % (2.0 * pi)\n theta2 = (pt_arrray[j, 2] + alpha2) % (2.0 * pi)\n pt_arrray[i, 0:4] = [t_x[0], t_y[0], theta1, pt_arrray[j, 3] * c_1]\n pt_arrray[i + 1, 0:4] = [t_x[1], t_y[1], theta2, pt_arrray[j, 3] * c_2]\n return pt_arrray\n\n\ndef _svg_write_metadata(write):\n write(\"\\t<title>Pythagoras tree</title>\\n\")\n write(\"\\t<metadata>\\n\")\n write(\"\\t\\t<rdf:RDF>\\n\")\n write(\"\\t\\t\\t<cc:Work\\n\")\n write('\\t\\t\\t\\trdf:about=\"\">\\n')\n write(\"\\t\\t\\t\\t<dc:format>image/svg+xml</dc:format>\\n\")\n write(\"\\t\\t\\t\\t<dc:type\\n\")\n write(\"\\t\\t\\t\\t\\trdf:resource=\" + '\"http://purl.org/dc/dcmitype/StillImage\"/>\\n')\n write(\"\\t\\t\\t\\t<dc:title>Pythagoras tree</dc:title>\\n\")\n write(\"\\t\\t\\t\\t<dc:creator>\\n\")\n write(\"\\t\\t\\t\\t\\t<cc:Agent>\\n\")\n write(\"\\t\\t\\t\\t\\t\\t<dc:title>Guillaume Jacquenot</dc:title>\\n\")\n write(\"\\t\\t\\t\\t\\t</cc:Agent>\\n\")\n write(\"\\t\\t\\t\\t</dc:creator>\\n\")\n write(\"\\t\\t\\t\\t<cc:license\\n\")\n write(\n \"\\t\\t\\t\\t\\t\\trdf:resource=\"\n + '\"http://creativecommons.org/licenses/by-nc-sa/3.0/\"/>\\n'\n )\n write(\"\\t\\t\\t</cc:Work>\\n\")\n write(\"\\t\\t\\t<cc:License\\n\")\n write(\n \"\\t\\t\\t\\trdf:about=\" + '\"http://creativecommons.org/licenses/by-nc-sa/3.0/\">\\n'\n )\n write(\"\\t\\t\\t\\t<cc:permits\\n\")\n write(\n \"\\t\\t\\t\\t\\trdf:resource=\" + '\"http://creativecommons.org/ns#Reproduction\"/>\\n'\n )\n write(\"\\t\\t\\t\\t<cc:permits\\n\")\n write(\n \"\\t\\t\\t\\t\\trdf:resource=\" + '\"http://creativecommons.org/ns#Reproduction\"/>\\n'\n )\n write(\"\\t\\t\\t\\t<cc:permits\\n\")\n write(\n \"\\t\\t\\t\\t\\trdf:resource=\" + '\"http://creativecommons.org/ns#Distribution\"/>\\n'\n )\n write(\"\\t\\t\\t\\t<cc:requires\\n\")\n write(\"\\t\\t\\t\\t\\trdf:resource=\" + '\"http://creativecommons.org/ns#Notice\"/>\\n')\n write(\"\\t\\t\\t\\t<cc:requires\\n\")\n write(\"\\t\\t\\t\\t\\trdf:resource=\" + '\"http://creativecommons.org/ns#Attribution\"/>\\n')\n write(\"\\t\\t\\t\\t<cc:prohibits\\n\")\n write(\n \"\\t\\t\\t\\t\\trdf:resource=\" + '\"http://creativecommons.org/ns#CommercialUse\"/>\\n'\n )\n write(\"\\t\\t\\t\\t<cc:permits\\n\")\n write(\n \"\\t\\t\\t\\t\\trdf:resource=\"\n + '\"http://creativecommons.org/ns#DerivativeWorks\"/>\\n'\n )\n write(\"\\t\\t\\t\\t<cc:requires\\n\")\n write(\"\\t\\t\\t\\t\\trdf:resource=\" + '\"http://creativecommons.org/ns#ShareAlike\"/>\\n')\n write(\"\\t\\t\\t</cc:License>\\n\")\n write(\"\\t\\t</rdf:RDF>\\n\")\n write(\"\\t</metadata>\\n\")\n\n\ndef pythagor_tree_write2svg(\n pt_array: np.ndarray,\n ratio: float = 0.8,\n nb_levels: int = 12,\n colormap_name: str = \"summer\",\n):\n \"\"\"\n A svg file giving a vectorial display of the tree. The name of\n file is generated from the parameter m, n, colormap. The file is\n stored in the current folder.\n \"\"\"\n # pylint: disable=too-many-locals\n display_metadata = True\n\n nb_elements = pt_array.shape[0]\n length_offset = pt_array[nb_elements - 1, 3] + 0.1\n min_x = np.min(pt_array[:, 0] - sqrt(2) * pt_array[:, 3]) - length_offset\n max_x = np.max(pt_array[:, 0] + sqrt(2) * pt_array[:, 3]) + length_offset\n min_y = np.min(pt_array[:, 1]) - length_offset\n max_y = np.max(pt_array[:, 1] + sqrt(2) * pt_array[:, 3]) + length_offset\n\n # Compute the color of tree\n colormap = cm.get_cmap(colormap_name)\n nb_pixels = 100\n fig_w = ceil(nb_pixels * (max_x - min_x))\n fig_h = ceil(nb_pixels * (max_y - min_y))\n filename = (\n \"Pythagoras_tree_\"\n + str(ratio).replace(\".\", \"_\")\n + \"__\"\n + str(nb_levels)\n + \"__\"\n + colormap_name\n + \".svg\"\n )\n with open(filename, \"wt\", encoding=\"utf-8\") as fid:\n write = fid.write\n write('<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\\n')\n if not display_metadata:\n write('<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.1//EN\"\\n')\n write(' \"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd\">\\n')\n write('<svg width=\"{0}\" height=\"{1}\" version=\"1.1\"\\n'.format(fig_w, fig_h))\n # w(['<svg width=\"12cm\" height=\"4cm\" version=\"1.1\"\\n']); % Wfig,\n\n # w(['<svg width=\"15cm\" height=\"10cm\" '...\n # 'viewBox=\"0 0 %d %d\" version=\"1.1\"\\n'],...\n # Wfig,Hfig);\n if display_metadata:\n write('\\txmlns:dc=\"http://purl.org/dc/elements/1.1/\"\\n')\n write('\\txmlns:cc=\"http://creativecommons.org/ns#\"\\n')\n write('\\txmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\"\\n')\n write('\\txmlns:svg=\"http://www.w3.org/2000/svg\"\\n')\n write('\\txmlns=\"http://www.w3.org/2000/svg\"\\n')\n write('\\txmlns:xlink=\"http://www.w3.org/1999/xlink\">\\n')\n\n if display_metadata:\n _svg_write_metadata(write)\n write(\"\\t<defs>\\n\")\n write('\\t\\t<rect width=\"{0}\" height=\"{0}\" \\n'.format(nb_pixels))\n write('\\t\\t\\tx=\"0\" y=\"0\"\\n')\n write('\\t\\t\\tstyle=\"fill-opacity:1;stroke:#00d900;stroke-opacity:1\"\\n')\n write('\\t\\t\\tid=\"squa\"\\n')\n write(\"\\t\\t/>\\n\")\n write(\"\\t</defs>\\n\")\n write(\n '\\t<g transform=\"translate({0} {1}) rotate(180) \" >\\n'.format(\n round(nb_pixels * max_x), round(nb_pixels * max_y)\n )\n )\n for i in range(nb_levels + 1):\n write(\n '\\t\\t<g style=\"fill:{0};\" >\\n'.format(\n matplotlib.colors.rgb2hex(\n colormap(1.0 - float(i) / (nb_levels + 1))\n )\n )\n )\n offset = 2 ** i - 1\n for j in range(2 ** i):\n k = j + offset\n write(\n (\n '\\t\\t\\t<use xlink:href=\"#squa\" '\n + 'transform=\"translate({0:+010.5f} {1:+010.5f}) '\n + 'rotate({2:3.1f}) scale({3:8.6f})\" />\\n'\n ).format(\n nb_pixels * pt_array[k, 0],\n nb_pixels * pt_array[k, 1],\n pt_array[k, 2] * 180 / pi,\n pt_array[k, 3],\n )\n )\n write(\"\\t\\t</g>\\n\")\n write(\"\\t</g>\\n\")\n write(\"</svg>\\n\")\n\n\ndef pythagor_tree_plot(\n pt_array: np.ndarray, colormap_name: str = \"summer\", output_filename: str = \"lm.png\"\n):\n \"\"\"Plot a Pythagoras tree for a PNG format\"\"\"\n colormap = cm.get_cmap(colormap_name)\n fig, axis = plt.subplots()\n for i in range(pt_array.shape[0]):\n c_x = pt_array[i, 0]\n c_y = pt_array[i, 1]\n theta = pt_array[i, 2] * 180.0 / pi\n s_i = pt_array[i, 3]\n rect = patches.Rectangle(\n [c_x, c_y],\n s_i,\n s_i,\n angle=theta,\n ec=\"none\",\n color=colormap(1.0 - i / (pt_array[-1, 4] + 1)),\n )\n axis.add_patch(rect)\n plt.xlim([-4, 4])\n plt.ylim([-1.5, 3.5])\n # plt.gca().relim()\n plt.gca().set_aspect(\"equal\", adjustable=\"box\")\n plt.axis(\"off\")\n fig.savefig(output_filename, bbox_inches=\"tight\", dpi=300)\n\n\ndef get_list_of_colormaps() -> List[str]:\n \"\"\"Return the list of avalaible colors\"\"\"\n return [\n \"autumn\",\n \"bone\",\n \"colorcube\",\n \"cool\",\n \"copper\",\n \"flag\",\n \"gray\",\n \"hot\",\n \"hsv\",\n \"jet\",\n \"lines\",\n \"pink\",\n \"prism\",\n \"spring\",\n \"summer\",\n \"white\",\n \"winter\",\n ]\n\n\ndef is_colormap(cmap: str) -> bool:\n \"\"\"This function returns True if 'cmap' is a valid colormap\"\"\"\n return cmap in get_list_of_colormaps()\n\n\ndef main():\n \"\"\"Main entrypoint\"\"\"\n parser = argparse.ArgumentParser(\n description=\"This script creates a SVG image of a Pythagoras tree, a\"\n + \" plane fractal constructed from squares.\"\n )\n parser.add_argument(\n \"-r\",\n \"--ratio\",\n type=float,\n default=1.0,\n help=\"r ( r > 0 ) is the relative length of one of the side \"\n + \"right-angled triangle. \"\n + \"The second side of the right-angle is taken to be one. \"\n + \"To have a symmetric tree, r has to be 1.\",\n )\n parser.add_argument(\n \"-l\",\n \"--level\",\n type=int,\n default=10,\n help=\"n is the level of recursion. The number of elements of \"\n + \"tree is equal to 2**(n+1)-1. A reasonable number for n \"\n + \"is 10.\",\n )\n parser.add_argument(\n \"-p\",\n \"--plot\",\n action=\"store_true\",\n help=\"Option used to display the tree as a PNG image with matplotlib\",\n )\n parser.add_argument(\n \"-c\",\n \"--colormap\",\n type=str,\n default=\"summer\",\n help=\"Matplotlib colormap used to generate color of the different \"\n + \"levels of the tree. \"\n + \"Possible values are {0}\".format(\" \".join(get_list_of_colormaps())),\n )\n args = parser.parse_args()\n\n # Create a matrix containing the informations representing the tree\n # Each row represents a single square\n pt_array = pythagoras_tree(ratio=args.ratio, nb_levels=args.level)\n\n if args.plot:\n # Display the tree\n filename = (\n \"Pythagoras_tree_\"\n + str(args.ratio).replace(\".\", \"_\")\n + \"__\"\n + str(args.level)\n + \"__\"\n + args.colormap\n + \".png\"\n )\n pythagor_tree_plot(\n pt_array, colormap_name=args.colormap, output_filename=filename\n )\n\n # Write results to an SVG file\n pythagor_tree_write2svg(pt_array, args.ratio, args.level, args.colormap)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"matplotlib.pyplot.gca",
"numpy.min",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.subplots",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.axis",
"matplotlib.cm.get_cmap",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JonesRobM/Sapphire | [
"fba875af56e48e2c5a4a3cf6788f51f359f63800"
] | [
"build/lib/Sapphire/Post_Process/AtomicEnvironment.py"
] | [
"import numpy as np\nimport scipy.sparse as spa\nimport os\n\ndef count(Input, Value):\n \n return(len([x for x in Input if x == Value]))\n\nclass LAE():\n\n def __init__(self, System = None, Frame = None, Adj1 = None, Adj2 = None, HeAdj = None, \n EleNN = None, lae = None, HomoBonds = None, HeteroBonds = None, Mix = None,\n Metal = None, Species = None):\n \n self.System = System\n self.Frame = Frame\n self.Adj1 = Adj1\n self.Adj2 = Adj2\n self.HeAdj = HeAdj\n self.EleNN = EleNN\n self.lae = lae\n self.HomoBonds = HomoBonds\n self.HeteroBonds = HeteroBonds\n self.Species = Species \n self.Metal = Metal\n self.Mix = Mix\n self.Metal_Index = self.Species.index(self.Metal)\n self.calculate()\n self.write()\n \n def ensure_dir(self, base_dir='', file_path=''):\n \"\"\"\n\n Robert:\n\n A simple script to verify the existence of a directory\n given the path to it. If it does not exist, will create it.\n\n \"\"\"\n\n directory = base_dir + file_path\n if not os.path.exists(directory):\n\n os.makedirs(directory)\n\n def MakeFile(self, Attributes):\n self.out = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']\n\n if not os.path.isfile(self.out):\n with open(self.System['base_dir'] + Attributes['Dir'] + Attributes['File'], 'w') as out:\n out.close()\n else:\n pass\n \n def calculate(self):\n self.HoBonds = sum(self.Adj1)/2 + sum(self.Adj2)/2\n self.HeBonds = sum(self.HeAdj[0])\n self.Mix_Param = (self.HoBonds - self.HeBonds) / (self.HoBonds + self.HeBonds)\n \n def ele_nn(self):\n if self.Metal_Index == 0:\n self.EleNN = self.Adj1 + self.HeAdj[self.Metal_Index]\n elif self.Metal_Index == 1:\n self.EleNN = self.Adj2 + self.HeAdj[self.Metal_Index]\n\n def LAE(self):\n \"\"\"\n Returns.\n\n -------\n Temp : TYPE\n DESCRIPTION.\n\n \"\"\"\n self.Mat = np.zeros(13) #Initialise w.r.t total time\n for a in range(13):\n if self.Metal_Index == 0:\n self.Mat[a] += count(self.HeAdj[1], a)\n elif self.Metal_Index == 1:\n self.Mat[a] += count(self.HeAdj[0], a)\n \n \n def write(self):\n\n from Sapphire.IO import OutputInfoHetero as Out # Case 3\n if self.Mix:\n #Write object for the CoM\n Attributes = getattr(Out, str('mix')) #Loads in the write information for the object \n OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']\n self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir']) \n self.MakeFile(Attributes)\n with open(OutFile, 'a') as outfile:\n outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.Mix_Param) +'\\n')\n \n if self.HomoBonds:\n #Write object for the CoMDistances\n Attributes = getattr(Out, str('homo_bonds')) #Loads in the write information for the object \n OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']\n self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir']) \n self.MakeFile(Attributes)\n with open(OutFile, 'a') as outfile:\n outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.HoBonds) +'\\n')\n\n if self.HeteroBonds:\n #Write object for the homo CoM distances\n Attributes = getattr(Out, str('hetero_bonds')) #Loads in the write information for the object \n OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']\n self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir']) \n self.MakeFile(Attributes)\n with open(OutFile, 'a') as outfile:\n outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.HeBonds) +'\\n')\n \n if self.EleNN:\n self.ele_nn()\n #Write object for the homo CoM distances\n Attributes = getattr(Out, str('ele_nn')) #Loads in the write information for the object \n OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File'] + self.Metal\n self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir']) \n self.MakeFile(Attributes)\n with open(OutFile, 'a') as outfile:\n outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.EleNN) +'\\n')\n \n if self.lae:\n self.LAE()\n #Write object for the homo CoM distances\n Attributes = getattr(Out, str('lae')) #Loads in the write information for the object \n OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']\n self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir']) + self.Metal \n self.MakeFile(Attributes)\n with open(OutFile, 'a') as outfile:\n outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.Mat) +'\\n') "
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jingmouren/OptimalPortfolio | [
"cb27cbc6f0832bfc531c085454afe1ca457ea95e"
] | [
"PortOpt/base_est.py"
] | [
"import numpy as np\nfrom numpy.core.defchararray import upper\nimport pandas as pd\nfrom scipy.optimize import minimize\nimport warnings\nfrom . import utility_functions as utility_functions\nimport cvxpy as cp\n\n\nclass BaseEstimator:\n def __init__(self, tickers) -> None:\n self.tickers = tickers\n\n def _get_logreturns(self, prices, period=1) -> pd.DataFrame:\n return np.log(prices.shift(-1)/prices).dropna()\n\n def _pairwise_exp_cov(self, X, Y, span=180) -> pd.DataFrame:\n pair_cov = (X - X.mean()) * (Y - Y.mean())\n\n return pair_cov.ewm(span=span).mean().iloc[-1]\n\n def _cov_to_corr(self, cov):\n Dinv = np.diag(1 / np.sqrt(np.diag(cov))) \n corr = Dinv @ cov @ Dinv\n\n return corr\n\n def _corr_to_cov(self, corr, stds):\n return corr * np.outer(stds, stds)\n"
] | [
[
"numpy.diag",
"numpy.outer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rhololkeolke/catalyst-rl | [
"ec18ff4a58b6d00652f772231db8de86debb4b3d",
"ec18ff4a58b6d00652f772231db8de86debb4b3d",
"ec18ff4a58b6d00652f772231db8de86debb4b3d",
"ec18ff4a58b6d00652f772231db8de86debb4b3d"
] | [
"catalyst_rl/dl/callbacks/confusion_matrix.py",
"catalyst_rl/contrib/nn/optimizers/lookahead.py",
"catalyst_rl/rl/exploration/param_noise.py",
"catalyst_rl/utils/distributed.py"
] | [
"from typing import Dict, List # isort:skip\n\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix as confusion_matrix_fn\n\nfrom catalyst_rl.dl import Callback, CallbackNode, CallbackOrder, State, utils\nfrom catalyst_rl.utils import meters\n\n\nclass ConfusionMatrixCallback(Callback):\n def __init__(\n self,\n input_key: str = \"targets\",\n output_key: str = \"logits\",\n prefix: str = \"confusion_matrix\",\n version: str = \"tnt\",\n class_names: List[str] = None,\n num_classes: int = None,\n plot_params: Dict = None,\n tensorboard_callback_name: str = \"_tensorboard\",\n ):\n super().__init__(CallbackOrder.Metric, CallbackNode.Master)\n self.prefix = prefix\n self.output_key = output_key\n self.input_key = input_key\n self.tensorboard_callback_name = tensorboard_callback_name\n\n assert version in [\"tnt\", \"sklearn\"]\n self._version = version\n self._plot_params = plot_params or {}\n\n self.class_names = class_names\n self.num_classes = num_classes \\\n if class_names is None \\\n else len(class_names)\n\n assert self.num_classes is not None\n self._reset_stats()\n\n def _reset_stats(self):\n if self._version == \"tnt\":\n self.confusion_matrix = meters.ConfusionMeter(self.num_classes)\n elif self._version == \"sklearn\":\n self.outputs = []\n self.targets = []\n\n def _add_to_stats(self, outputs, targets):\n if self._version == \"tnt\":\n self.confusion_matrix.add(predicted=outputs, target=targets)\n elif self._version == \"sklearn\":\n outputs = outputs.cpu().numpy()\n targets = targets.cpu().numpy()\n\n outputs = np.argmax(outputs, axis=1)\n\n self.outputs.extend(outputs)\n self.targets.extend(targets)\n\n def _compute_confusion_matrix(self):\n if self._version == \"tnt\":\n confusion_matrix = self.confusion_matrix.value()\n elif self._version == \"sklearn\":\n confusion_matrix = confusion_matrix_fn(\n y_true=self.targets, y_pred=self.outputs\n )\n else:\n raise NotImplementedError()\n return confusion_matrix\n\n def _plot_confusion_matrix(\n self, logger, epoch, confusion_matrix, class_names=None\n ):\n fig = utils.plot_confusion_matrix(\n confusion_matrix,\n class_names=class_names,\n normalize=True,\n show=False,\n **self._plot_params\n )\n fig = utils.render_figure_to_tensor(fig)\n logger.add_image(f\"{self.prefix}/epoch\", fig, global_step=epoch)\n\n def on_loader_start(self, state: State):\n self._reset_stats()\n\n def on_batch_end(self, state: State):\n self._add_to_stats(\n state.batch_out[self.output_key].detach(),\n state.batch_in[self.input_key].detach()\n )\n\n def on_loader_end(self, state: State):\n class_names = \\\n self.class_names or \\\n [str(i) for i in range(self.num_classes)]\n confusion_matrix = self._compute_confusion_matrix()\n tb_callback = state.callbacks[self.tensorboard_callback_name]\n self._plot_confusion_matrix(\n logger=tb_callback.loggers[state.loader_name],\n epoch=state.global_epoch,\n confusion_matrix=confusion_matrix,\n class_names=class_names,\n )\n\n\n__all__ = [\"ConfusionMatrixCallback\"]\n",
"from typing import Dict # isort:skip\nfrom collections import defaultdict\n\nimport torch\nfrom torch.optim import Optimizer\n\n\nclass Lookahead(Optimizer):\n def __init__(self, optimizer: Optimizer, k: int = 5, alpha: float = 0.5):\n \"\"\"\n Taken from: https://github.com/alphadl/lookahead.pytorch\n \"\"\"\n self.optimizer = optimizer\n self.k = k\n self.alpha = alpha\n self.param_groups = self.optimizer.param_groups\n self.defaults = self.optimizer.defaults\n self.state = defaultdict(dict)\n self.fast_state = self.optimizer.state\n for group in self.param_groups:\n group[\"counter\"] = 0\n\n def update(self, group):\n for fast in group[\"params\"]:\n param_state = self.state[fast]\n if \"slow_param\" not in param_state:\n param_state[\"slow_param\"] = torch.zeros_like(fast.data)\n param_state[\"slow_param\"].copy_(fast.data)\n slow = param_state[\"slow_param\"]\n slow += (fast.data - slow) * self.alpha\n fast.data.copy_(slow)\n\n def update_lookahead(self):\n for group in self.param_groups:\n self.update(group)\n\n def step(self, closure=None):\n loss = self.optimizer.step(closure)\n for group in self.param_groups:\n if group[\"counter\"] == 0:\n self.update(group)\n group[\"counter\"] += 1\n if group[\"counter\"] >= self.k:\n group[\"counter\"] = 0\n return loss\n\n def state_dict(self):\n fast_state_dict = self.optimizer.state_dict()\n slow_state = {\n (id(k) if isinstance(k, torch.Tensor) else k): v\n for k, v in self.state.items()\n }\n fast_state = fast_state_dict[\"state\"]\n param_groups = fast_state_dict[\"param_groups\"]\n return {\n \"fast_state\": fast_state,\n \"slow_state\": slow_state,\n \"param_groups\": param_groups,\n }\n\n def load_state_dict(self, state_dict):\n slow_state_dict = {\n \"state\": state_dict[\"slow_state\"],\n \"param_groups\": state_dict[\"param_groups\"],\n }\n fast_state_dict = {\n \"state\": state_dict[\"fast_state\"],\n \"param_groups\": state_dict[\"param_groups\"],\n }\n super(Lookahead, self).load_state_dict(slow_state_dict)\n self.optimizer.load_state_dict(fast_state_dict)\n self.fast_state = self.optimizer.state\n\n def add_param_group(self, param_group):\n param_group[\"counter\"] = 0\n self.optimizer.add_param_group(param_group)\n\n @classmethod\n def get_from_params(\n cls,\n params: Dict,\n base_optimizer_params: Dict = None,\n **kwargs,\n ) -> \"Lookahead\":\n from catalyst_rl.dl.registry import OPTIMIZERS\n\n base_optimizer = OPTIMIZERS.get_from_params(\n params=params, **base_optimizer_params\n )\n optimizer = cls(optimizer=base_optimizer, **kwargs)\n return optimizer\n",
"import numpy as np\n\nimport torch\n\nfrom catalyst_rl.rl.core import ExplorationStrategy\nfrom catalyst_rl.rl.utils import get_network_weights, set_network_weights\n\nEPS = 1e-6\n\n\ndef _set_params_noise(\n actor, states, noise_delta=0.2, tol=1e-3, max_steps=1000\n):\n \"\"\"\n Perturbs parameters of the policy represented by the actor network.\n Binary search is employed to find the appropriate magnitude of the noise\n corresponding to the desired distance measure (noise_delta) between\n non-perturbed and perturbed policy.\n\n Args:\n actor: torch.nn.Module, neural network which represents actor\n states: batch of states to estimate the distance measure between the\n non-perturbed and perturbed policy\n noise_delta: float, parameter noise threshold value\n tol: float, controls the tolerance of binary search\n max_steps: maximum number of steps in binary search\n \"\"\"\n if states is None:\n return noise_delta\n\n exclude_norm = True\n orig_weights = get_network_weights(actor, exclude_norm=exclude_norm)\n orig_actions = actor(states)\n\n sigma_min = 0.\n sigma_max = 100.\n sigma = sigma_max\n\n for step in range(max_steps):\n noise_dist = torch.distributions.normal.Normal(0, sigma)\n weights = {\n key: w.clone() + noise_dist.sample(w.shape)\n for key, w in orig_weights.items()\n }\n set_network_weights(actor, weights, strict=not exclude_norm)\n\n new_actions = actor(states)\n distance = \\\n (new_actions - orig_actions).pow(2).sum(1).sqrt().mean().item()\n\n distance_mismatch = distance - noise_delta\n\n # the difference between current distance\n # and desired distance is too small\n if np.abs(distance_mismatch) < tol:\n break\n # too big sigma\n if distance_mismatch > 0:\n sigma_max = sigma\n # too small sigma\n else:\n sigma_min = sigma\n sigma = sigma_min + (sigma_max - sigma_min) / 2\n\n return distance\n\n\nclass ParameterSpaceNoise(ExplorationStrategy):\n \"\"\"\n For continuous environments only.\n At the beginning of the episode, perturbs the weights of actor network\n forcing it to produce more diverse actions.\n Paper: https://arxiv.org/abs/1706.01905\n \"\"\"\n def __init__(self, target_sigma, tolerance=1e-3, max_steps=1000):\n super().__init__()\n\n self.target_sigma = target_sigma\n self.tol = tolerance\n self.max_steps = max_steps\n\n def set_power(self, value):\n super().set_power(value)\n self.target_sigma *= self._power\n\n def update_actor(self, actor, states):\n return _set_params_noise(\n actor, states, self.target_sigma, self.tol, self.max_steps\n )\n\n def get_action(self, action):\n return action\n\n\n__all__ = [\"ParameterSpaceNoise\"]\n",
"from collections import OrderedDict\nimport copy\nimport os\nimport socket\nimport subprocess\nimport sys\nfrom typing import Dict, Tuple\n\nimport torch\nfrom torch import nn\nimport torch.distributed\n\nfrom catalyst_rl import utils\nfrom catalyst_rl.utils.tools.typing import (\n Criterion, Device, Model, Optimizer, Scheduler\n)\n\n\ndef is_torch_distributed_initialized() -> bool:\n \"\"\"\n Checks if torch.distributed is available and initialized\n \"\"\"\n return (\n torch.distributed.is_available() and torch.distributed.is_initialized()\n )\n\n\ndef get_rank() -> int:\n \"\"\"\n Returns the rank of the current worker.\n\n Returns:\n int: ``rank`` if torch.distributed is initialized,\n otherwise ``-1``\n \"\"\"\n if is_torch_distributed_initialized():\n return torch.distributed.get_rank()\n else:\n return -1\n\n\ndef is_apex_available() -> bool:\n \"\"\"\n Checks if apex is available\n \"\"\"\n env_apex = os.getenv(\"USE_APEX\", \"1\") == \"1\"\n try:\n import apex # noqa: F401\n from apex import amp # noqa: F401\n return True and env_apex\n except ImportError:\n return False and env_apex\n\n\ndef assert_fp16_available() -> None:\n \"\"\"\n Asserts for installed and available Apex FP16\n \"\"\"\n assert torch.backends.cudnn.enabled, \\\n \"fp16 mode requires cudnn backend to be enabled.\"\n\n assert is_apex_available(), \"NVidia Apex package must be installed. \" \\\n \"See https://github.com/NVIDIA/apex.\"\n\n\ndef distributed_mean(value: float):\n \"\"\"\n Computes distributed mean among all nodes\n \"\"\"\n if is_torch_distributed_initialized():\n value = torch.tensor(\n value,\n dtype=torch.float,\n device=f\"cuda:{torch.cuda.current_device()}\",\n requires_grad=False\n )\n torch.distributed.all_reduce(value)\n value = float(value.item() / torch.distributed.get_world_size())\n return value\n\n\ndef get_slurm_params():\n cmd = \"scontrol show hostnames '%s'\" % os.environ[\"SLURM_JOB_NODELIST\"]\n nodes = subprocess.getoutput(cmd).split()\n num_nodes = int(os.environ[\"SLURM_JOB_NUM_NODES\"])\n current_node = os.environ[\"SLURMD_NODENAME\"]\n master_node = socket.gethostbyname(nodes[0])\n cur_node_idx = nodes.index(current_node)\n return cur_node_idx, num_nodes, master_node\n\n\ndef is_slurm_available():\n return \"SLURM_JOB_NUM_NODES\" in os.environ and \"SLURM_NODEID\" in os.environ\n\n\ndef get_distributed_params():\n master_addr = \"127.0.0.1\"\n cur_node, num_nodes = 0, 1\n if is_slurm_available():\n cur_node, num_nodes, master_addr = get_slurm_params()\n\n os.environ[\"MASTER_ADDR\"] = os.getenv(\"MASTER_ADDR\", master_addr)\n os.environ[\"MASTER_PORT\"] = os.getenv(\"MASTER_PORT\", \"424242\")\n\n workers_per_node = torch.cuda.device_count()\n start_rank = cur_node * workers_per_node\n world_size = num_nodes * workers_per_node\n\n local_rank = os.getenv(\"LOCAL_RANK\", None)\n rank = os.getenv(\"RANK\", None)\n local_rank, rank = [v and int(v) for v in [local_rank, rank]]\n world_size = int(os.getenv(\"WORLD_SIZE\", world_size))\n\n output = OrderedDict(\n local_rank=local_rank,\n start_rank=start_rank,\n rank=rank,\n world_size=world_size,\n master_addr=os.environ[\"MASTER_ADDR\"],\n master_port=os.environ[\"MASTER_PORT\"],\n )\n\n return output\n\n\ndef get_distributed_env(\n local_rank, rank, world_size, use_cuda_visible_devices=True\n):\n env = os.environ.copy()\n env[\"RANK\"] = str(rank)\n env[\"WORLD_SIZE\"] = str(world_size)\n env[\"LOCAL_RANK\"] = str(local_rank)\n if use_cuda_visible_devices:\n available_gpus = utils.get_available_gpus()\n env[\"LOCAL_RANK\"] = \"0\"\n env[\"CUDA_VISIBLE_DEVICES\"] = str(available_gpus[local_rank])\n return env\n\n\ndef distributed_run(distributed, worker_fn, *args, **kwargs):\n \"\"\"\n Distributed run\n Args:\n distributed:\n worker_fn:\n args:\n kwargs:\n \"\"\"\n distributed_params = get_distributed_params()\n local_rank = distributed_params[\"local_rank\"]\n world_size = distributed_params[\"world_size\"]\n\n if not distributed or world_size <= 1:\n worker_fn(*args, **kwargs)\n elif local_rank is not None:\n torch.cuda.set_device(int(local_rank))\n\n torch.distributed.init_process_group(\n backend=\"nccl\", init_method=\"env://\"\n )\n worker_fn(*args, **kwargs)\n else:\n workers = []\n try:\n for local_rank in range(torch.cuda.device_count()):\n rank = distributed_params[\"start_rank\"] + local_rank\n env = get_distributed_env(local_rank, rank, world_size)\n cmd = [sys.executable] + sys.argv.copy()\n workers.append(subprocess.Popen(cmd, env=env))\n for worker in workers:\n worker.wait()\n finally:\n for worker in workers:\n worker.kill()\n\n\ndef initialize_apex(model, optimizer=None, **distributed_params):\n import apex\n amp_params = utils.get_fn_default_params(\n apex.amp.initialize, [\"models\", \"optimizers\"]\n )\n amp_params[\"opt_level\"] = \"O0\"\n for dp in distributed_params:\n if dp in amp_params:\n amp_params[dp] = distributed_params[dp]\n\n amp_result = apex.amp.initialize(model, optimizer, **amp_params)\n if optimizer is not None:\n model, optimizer = amp_result\n else:\n model = amp_result\n return model, optimizer\n\n\ndef process_components(\n model: Model,\n criterion: Criterion = None,\n optimizer: Optimizer = None,\n scheduler: Scheduler = None,\n distributed_params: Dict = None,\n device: Device = None,\n) -> Tuple[Model, Criterion, Optimizer, Scheduler, Device]:\n \"\"\"\n Returns the processed model, criterion, optimizer, scheduler and device\n\n Args:\n model (Model): torch model\n criterion (Criterion): criterion function\n optimizer (Optimizer): optimizer\n scheduler (Scheduler): scheduler\n distributed_params (dict, optional): dict with the parameters\n for distributed and FP16 methond\n device (Device, optional): device\n \"\"\"\n distributed_params = distributed_params or {}\n distributed_params = copy.deepcopy(distributed_params)\n distributed_params.update(get_distributed_params())\n if device is None:\n device = utils.get_device()\n\n use_apex = distributed_params.pop(\"apex\", True) and is_apex_available()\n\n model: Model = utils.maybe_recursive_call(model, \"to\", device=device)\n\n if utils.is_wrapped_with_ddp(model):\n pass\n # distributed data parallel run (ddp) (with apex support)\n elif get_rank() >= 0:\n assert isinstance(model, nn.Module), \\\n \"No support for dixtributed KV model yet\"\n\n local_rank = distributed_params.pop(\"local_rank\", 0)\n device = f\"cuda:{local_rank}\"\n model = utils.maybe_recursive_call(model, \"to\", device=device)\n\n syncbn = distributed_params.pop(\"syncbn\", False)\n\n if use_apex:\n import apex\n model, optimizer = initialize_apex(\n model, optimizer, **distributed_params\n )\n model = apex.parallel.DistributedDataParallel(model)\n\n if syncbn:\n model = apex.parallel.convert_syncbn_model(model)\n else:\n model = nn.parallel.DistributedDataParallel(\n model, device_ids=[local_rank], output_device=local_rank\n )\n # data parallel run (dp) (with apex support)\n else:\n # apex issue https://github.com/deepset-ai/FARM/issues/210\n can_use_apex = \\\n (use_apex and torch.cuda.device_count() == 1) \\\n or (\n torch.cuda.device_count() > 1\n and distributed_params.get(\"opt_level\", \"O0\") == \"O1\"\n )\n\n if can_use_apex:\n assert isinstance(model, nn.Module), \\\n \"No support for apex KV model yet\"\n\n model, optimizer = initialize_apex(\n model, optimizer, **distributed_params\n )\n\n if torch.cuda.device_count() > 1:\n if isinstance(model, nn.Module):\n model = nn.DataParallel(model)\n elif isinstance(model, dict):\n model = {k: nn.DataParallel(v) for k, v in model.items()}\n\n model: Model = utils.maybe_recursive_call(model, \"to\", device=device)\n\n return model, criterion, optimizer, scheduler, device\n\n\n__all__ = [\n \"get_rank\", \"process_components\", \"distributed_mean\", \"is_apex_available\",\n \"assert_fp16_available\", \"distributed_run\"\n]\n"
] | [
[
"numpy.argmax",
"sklearn.metrics.confusion_matrix"
],
[
"torch.zeros_like"
],
[
"torch.distributions.normal.Normal",
"numpy.abs"
],
[
"torch.distributed.init_process_group",
"torch.cuda.current_device",
"torch.distributed.is_initialized",
"torch.nn.DataParallel",
"torch.distributed.is_available",
"torch.distributed.get_rank",
"torch.cuda.device_count",
"torch.distributed.all_reduce",
"torch.distributed.get_world_size",
"torch.nn.parallel.DistributedDataParallel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kaylode/tern | [
"a85b7568c574515031a2a41e8c21df1002c05c64"
] | [
"modules/losses/triplet.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom pytorch_metric_learning import losses \n\nclass TripletLoss(nn.Module):\n def __init__(self, **kwargs) -> None:\n super().__init__(**kwargs)\n self.loss_fn = losses.TripletMarginLoss(**kwargs)\n\n def forward(self, feats1, feats2):\n labels = torch.arange(feats1.size(0))\n embeddings = torch.cat([feats1, feats2], dim=0)\n labels = torch.cat([labels, labels], dim=0)\n\n loss = self.loss_fn(embeddings, labels)\n return {'T': loss}"
] | [
[
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
feitonyliu/iForest | [
"ebcb06ae266249b049d848b53005fcabff1ca4b0"
] | [
"isotree/__init__.py"
] | [
"import numpy as np, pandas as pd\nfrom scipy.sparse import csc_matrix, csr_matrix, issparse, isspmatrix_csc, isspmatrix_csr, vstack as sp_vstack\nimport warnings\nimport multiprocessing\nimport ctypes\nimport json\nimport os\nfrom copy import deepcopy\nfrom ._cpp_interface import (\n isoforest_cpp_obj, _sort_csc_indices, _reconstruct_csr_sliced,\n _reconstruct_csr_with_categ, _get_has_openmp\n)\n\n__all__ = [\"IsolationForest\"]\n\n### Helpers\ndef _get_num_dtype(X_num=None, sample_weights=None, column_weights=None):\n if X_num is not None:\n return np.empty(0, dtype=X_num.dtype)\n elif sample_weights is not None:\n return np.empty(0, dtype=column_weights.dtype)\n elif column_weights is not None:\n return np.empty(0, dtype=sample_weights.dtype)\n else:\n return np.empty(0, dtype=ctypes.c_double)\n\ndef _get_int_dtype(X_num):\n if (X_num is not None) and (issparse(X_num)):\n return np.empty(0, dtype=X_num.indices.dtype)\n else:\n return np.empty(0, dtype=ctypes.c_size_t)\n\ndef _is_row_major(X_num):\n if (X_num is None) or (issparse(X_num)):\n return False\n else:\n return X_num.strides[1] == X_num.dtype.itemsize\n\ndef _is_col_major(X_num):\n if (X_num is None) or (issparse(X_num)):\n return False\n else:\n return X_num.strides[0] == X_num.dtype.itemsize\n\ndef _copy_if_subview(X_num, prefer_row_major=False):\n ### TODO: the C++ functions should accept a 'leading dimension'\n ### parameter so as to avoid copying the data here\n if (X_num is not None) and (not issparse(X_num)):\n col_major = _is_col_major(X_num)\n leading_dimension = int(X_num.strides[1 if col_major else 0] / X_num.dtype.itemsize)\n if (\n (leading_dimension != X_num.shape[0 if col_major else 1]) or\n (len(X_num.strides) != 2) or\n (not X_num.flags.aligned) or\n (not _is_row_major(X_num) and not _is_col_major(X_num))\n ):\n X_num = X_num.copy()\n if _is_col_major(X_num) != col_major:\n if prefer_row_major:\n X_num = np.ascontiguousarray(X_num)\n else:\n X_num = np.asfortranarray(X_num)\n return X_num\n\ndef _all_equal(x, y):\n if x.shape[0] != y.shape[0]:\n return False\n return np.all(x == y)\n\ndef _encode_categorical(cl, categories):\n if (cl.shape[0] >= 100) and (cl.dtype.name == \"category\"):\n if _all_equal(cl.cat.categories, categories):\n return cl.cat.codes\n return pd.Categorical(cl, categories).codes\n\ndef _process_nthreads(nthreads, warn_if_no_omp=False):\n if nthreads is None:\n nthreads = 1\n elif nthreads < 0:\n nthreads = multiprocessing.cpu_count() + 1 + nthreads\n if nthreads < 1:\n raise ValueError(\"Passed invalid 'nthreads'.\")\n\n if isinstance(nthreads, float):\n nthreads = int(nthreads)\n\n if (warn_if_no_omp) and (nthreads > 1) and (not _get_has_openmp()):\n msg_omp = \"Attempting to use more than 1 thread, but \"\n msg_omp += \"package was built without multi-threading \"\n msg_omp += \"support - see the project's GitHub page for \"\n msg_omp += \"more information.\"\n warnings.warn(msg_omp)\n\n assert nthreads > 0\n assert isinstance(nthreads, int)\n return nthreads\n\nclass IsolationForest:\n \"\"\"\n Isolation Forest model\n\n Isolation Forest is an algorithm originally developed for outlier detection that consists in splitting\n sub-samples of the data according to some attribute/feature/column at random. The idea is that, the rarer\n the observation, the more likely it is that a random uniform split on some feature would put outliers alone\n in one branch, and the fewer splits it will take to isolate an outlier observation like this. The concept\n is extended to splitting hyperplanes in the extended model (i.e. splitting by more than one column at a time), and to\n guided (not entirely random) splits in the SCiForest and FCF models that aim at isolating outliers faster and/or\n finding clustered outliers.\n\n This version adds heuristics to handle missing data and categorical variables. Can be used to aproximate pairwise\n distances by checking the depth after which two observations become separated, and to approximate densities by fitting\n trees beyond balanced-tree limit. Offers options to vary between randomized and deterministic splits too.\n\n Note\n ----\n The default parameters in this software do not correspond to the suggested parameters in\n any of the references.\n In particular, the following default values are likely to cause huge differences when compared to the\n defaults in other software: ``ndim``, ``sample_size``, ``ntrees``. The defaults here are\n nevertheless more likely to result in better models. In order to mimic scikit-learn for example, one\n would need to pass ``ndim=1``, ``sample_size=256``, ``ntrees=100``, ``missing_action=\"fail\"``, ``nthreads=1``.\n\n Note\n ----\n Shorthands for parameter combinations that match some of the references:\n\n 'iForest' (reference [1]_):\n ``ndim=1``, ``sample_size=256``, ``max_depth=8``, ``ntrees=100``, ``missing_action=\"fail\"``.\n\n 'EIF' (reference [3]_):\n ``ndim=2``, ``sample_size=256``, ``max_depth=8``, ``ntrees=100``, ``missing_action=\"fail\"``,\n ``coefs=\"uniform\"``, ``standardize_data=False`` (plus standardizing the data **before** passing it).\n \n 'SCiForest' (reference [4]_):\n ``ndim=2``, ``sample_size=256``, ``max_depth=8``, ``ntrees=100``, ``missing_action=\"fail\"``,\n ``coefs=\"normal\"``, ``ntry=10``, ``prob_pick_avg_gain=1``, ``penalize_range=True``.\n Might provide much better results with ``max_depth=None`` despite the reference's recommendation.\n\n 'FCF' (reference [11]_):\n ``ndim=2``, ``sample_size=256``, ``max_depth=None``, ``ntrees=200``, ``missing_action=\"fail\"``,\n ``coefs=\"normal\"``, ``ntry=1``, ``prob_pick_pooled_gain=1``.\n Might provide similar or better results with ``ndim=1`` and/or sample size as low as 32.\n For the FCF model aimed at imputing missing values,\n might give better results with ``ntry=10`` or higher and much larger sample sizes.\n 'RRCF' (reference [12]_):\n ``ndim=1``, ``prob_pick_col_by_range=1``, ``sample_size=256`` or more, ``max_depth=None``,\n ``ntrees=100`` or more, ``missing_action=\"fail\"``. Note however that reference [12]_ proposed a\n different method for calculation of anomaly scores, while this library uses isolation depth just\n like for 'iForest', so results might differ significantly from those of other libraries.\n Nevertheless, experiments in reference [11]_ suggest that isolation depth might be a better\n scoring metric for this model.\n\n Note\n ----\n The model offers many tunable parameters (see reference [11]_ for a comparison).\n The most likely candidate to tune is\n ``prob_pick_pooled_gain``, for which higher values tend to\n result in a better ability to flag outliers in multimodal datasets, at the expense of poorer\n generalizability to inputs with values outside the variables' ranges to which the model was fit\n (see plots generated from the examples in GitHub notebook for a better idea of the difference). The next candidate to tune is\n ``sample_size`` - the default is to use all rows, but in some datasets introducing sub-sampling can help,\n especially for the single-variable model. In smaller datasets, one might also want to experiment\n with ``weigh_by_kurtosis`` and perhaps lower ``ndim``. If using ``prob_pick_pooled_gain``, models\n are likely to benefit from deeper trees (controlled by ``max_depth``), but using large samples\n and/or deeper trees can result in significantly slower model fitting and predictions - in such cases,\n using ``min_gain`` (with a value like 0.25) with ``max_depth=None`` can offer a better speed/performance\n trade-off than changing ``max_depth``.\n \n If the data has categorical variables and these are more important important for determining\n outlierness compared to numerical columns, one might want to experiment with ``ndim=1``,\n ``categ_split_type=\"single_categ\"``, and ``scoring_metric=\"density\"``.\n\n For small datasets, one might also want to experiment with ``ndim=1``, ``scoring_metric=\"adj_depth\"``\n and ``penalize_range=True``.\n\n Note\n ----\n The default parameters will not scale to large datasets. In particular,\n if the amount of data is large, it's suggested to set a smaller sample size for each tree (parameter ``sample_size``)\n and to fit fewer of them (parameter ``ntrees``).\n As well, the default option for 'missing_action' might slow things down significantly.\n See the documentation of the parameters for more details.\n These defaults can also result in very big model sizes in memory and as serialized\n files (e.g. models that weight over 10GB) when the number of rows in the data is large.\n Using fewer trees, smaller sample sizes, and shallower trees can help to reduce model\n sizes if that becomes a problem.\n\n Note\n ----\n See the documentation of ``predict`` for some considerations when serving models generated through\n this library.\n\n Parameters\n ----------\n sample_size : str \"auto\", int, float(0,1), or None\n Sample size of the data sub-samples with which each binary tree will be built. If passing 'None', each\n tree will be built using the full data. Recommended value in [1]_, [2]_, [3]_ is 256, while\n the default value in the author's code in [5]_ is 'None' here.\n\n If passing \"auto\", will use the full number of rows in the data, up to 10,000 (i.e.\n will take 'sample_size=min(nrows(X), 10000)') **when calling fit**, and the full amount\n of rows in the data **when calling the variants** ``fit_predict`` or ``fit_transform``.\n\n If passing ``None``, will take the full number of rows in the data (no sub-sampling).\n\n If passing a number between zero and one, will assume it means taking a sample size that represents\n that proportion of the rows in the data.\n\n Hint: seeing a distribution of scores which is on average too far below 0.5 could mean that the\n model needs more trees and/or bigger samples to reach convergence (unless using non-random\n splits, in which case the distribution is likely to be centered around a much lower number),\n or that the distributions in the data are too skewed for random uniform splits.\n ntrees : int\n Number of binary trees to build for the model. Recommended value in [1]_ is 100, while the default value in the\n author's code in [5]_ is 10. In general, the number of trees required for good results\n is higher when (a) there are many columns, (b) there are categorical variables, (c) categorical variables have many\n categories, (d) `ndim` is high, (e) ``prob_pick_pooled_gain`` is used, (f) ``scoring_metric=\"density\"``\n or ``scoring_metric=\"boxed_density\"`` are used.\n\n Hint: seeing a distribution of scores which is on average too far below 0.5 could mean that the\n model needs more trees and/or bigger samples to reach convergence (unless using non-random\n splits, in which case the distribution is likely to be centered around a much lower number),\n or that the distributions in the data are too skewed for random uniform splits.\n ndim : int\n Number of columns to combine to produce a split. If passing 1, will produce the single-variable model described\n in [1]_ and [2]_, while if passing values greater than 1, will produce the extended model described in [3]_ and [4]_.\n Recommended value in [4]_ is 2, while [3]_ recommends a low value such as 2 or 3. Models with values higher than 1\n are referred hereafter as the extended model (as in [3]_).\n\n Note that, when using ``ndim>1`` plus ``standardize_data=True``, the variables are standardized at\n each step as suggested in [4]_, which makes the models slightly different than in [3]_.\n\n In general, when the data has categorical variables, models with ``ndim=1`` plus\n ``categ_split_type=\"single_categ\"`` tend to produce better results, while models ``ndim>1``\n tend to produce better results for numerical-only data, especially in the presence of missing values.\n ntry : int\n When using any of ``prob_pick_pooled_gain``, ``prob_pick_avg_gain``, ``prob_pick_full_gain``, ``prob_pick_dens``, how many variables (with ``ndim=1``)\n or linear combinations (with ``ndim>1``) to try for determining the best one according to gain.\n \n Recommended value in reference [4]_ is 10 (with ``prob_pick_avg_gain``, for outlier detection), while the\n recommended value in reference [11]_ is 1 (with ``prob_pick_pooled_gain``, for outlier detection), and the\n recommended value in reference [9]_ is 10 to 20 (with ``prob_pick_pooled_gain``, for missing value imputations).\n categ_cols : None or array-like\n Columns that hold categorical features, when the data is passed as an array or matrix.\n Categorical columns should contain only integer values with a continuous numeration starting at zero,\n with negative values and NaN taken as missing,\n and the array or list passed here should correspond to the column numbers, with numeration starting\n at zero. The maximum categorical value should not exceed 'INT_MAX' (typically :math:`2^{31}-1`).\n This might be passed either at construction time or when calling ``fit`` or variations of ``fit``.\n \n This is ignored when the input is passed as a ``DataFrame`` as then it will consider columns as\n categorical depending on their dtype (see the documentation for ``fit`` for details).\n max_depth : int, None, or str \"auto\"\n Maximum depth of the binary trees to grow. If passing None, will build trees until each observation ends alone\n in a terminal node or until no further split is possible. If using \"auto\", will limit it to the corresponding\n depth of a balanced binary tree with number of terminal nodes corresponding to the sub-sample size (the reason\n being that, if trying to detect outliers, an outlier will only be so if it turns out to be isolated with shorter average\n depth than usual, which corresponds to a balanced tree depth). When a terminal node has more than 1 observation, the\n remaining isolation depth for them is estimated assuming the data and splits are both uniformly random (separation depth\n follows a similar process with expected value calculated as in [6]_). Default setting for [1]_, [2]_, [3]_, [4]_ is \"auto\",\n but it's recommended to pass higher values if using the model for purposes other than outlier detection.\n\n Note that models that use ``prob_pick_pooled_gain`` or ``prob_pick_avg_gain`` are likely to benefit from\n deeper trees (larger ``max_depth``), but deeper trees can result in much slower model fitting and\n predictions.\n\n If using pooled gain, one might want to substitute ``max_depth`` with ``min_gain``.\n ncols_per_tree : None, int, or float(0,1]\n Number of columns to use (have as potential candidates for splitting at each iteration) in each tree,\n somewhat similar to the 'mtry' parameter of random forests.\n In general, this is only relevant when using non-random splits and/or weighted column choices.\n\n If passing a number between zero and one, will assume it means taking a sample size that represents\n that proportion of the columns in the data. If passing exactly 1, will assume it means taking\n 100% of the columns rather than taking 1 column.\n\n If passing ``None`` (the default) or zero, will use the full number of available columns.\n prob_pick_pooled_gain : float[0, 1]\n This parameter indicates the probability of choosing the threshold on which to split a variable\n (with ``ndim=1``) or a linear combination of variables (when using ``ndim>1``) as the threshold\n that maximizes a pooled standard deviation gain criterion (see references [9]_ and [11]_) on the\n same variable or linear combination, similarly to regression trees such as CART.\n\n If using ``ntry>1``, will try several variables or linear combinations thereof and choose the one\n in which the largest standardized gain can be achieved.\n\n For categorical variables with ``ndim=1``, will use shannon entropy instead (like in [7]_).\n\n Compared to a simple averaged gain, this tends to result in more evenly-divided splits and more clustered\n groups when they are smaller. Recommended to pass higher values when used for imputation of missing values.\n When used for outlier detection, datasets with multimodal distributions usually see better performance\n under this type of splits.\n \n Note that, since this makes the trees more even and thus it takes more steps to produce isolated nodes,\n the resulting object will be heavier. When splits are not made according to any of ``prob_pick_avg_gain``,\n ``prob_pick_pooled_gain``, ``prob_pick_full_gain``, ``prob_pick_dens``, both the column and the split point are decided at random. Note that, if\n passing value 1 (100%) with no sub-sampling and using the single-variable model,\n every single tree will have the exact same splits.\n\n Be aware that ``penalize_range`` can also have a large impact when using ``prob_pick_pooled_gain``.\n\n Under this option, models are likely to produce better results when increasing ``max_depth``.\n Alternatively, one can also control the depth through ``min_gain`` (for which one might want to\n set ``max_depth=None``).\n\n Important detail: if using any of ``prob_pick_avg_gain``, ``prob_pick_pooled_gain``,\n ``prob_pick_full_gain``, ``prob_pick_dens``, the distribution of\n outlier scores is unlikely to be centered around 0.5.\n prob_pick_avg_gain : float[0, 1]\n This parameter indicates the probability of choosing the threshold on which to split a variable\n (with ``ndim=1``) or a linear combination of variables (when using ``ndim>1``) as the threshold\n that maximizes an averaged standard deviation gain criterion (see references [4]_ and [11]_) on the\n same variable or linear combination.\n\n If using ``ntry>1``, will try several variables or linear combinations thereof and choose the one\n in which the largest standardized gain can be achieved.\n\n For categorical variables with ``ndim=1``, will take the expected standard deviation that would be\n gotten if the column were converted to numerical by assigning to each category a random\n number :math:`\\\\sim \\\\text{Unif}(0, 1)` and calculate gain with those assumed standard deviations.\n\n Compared to a pooled gain, this tends to result in more cases in which a single observation or very\n few of them are put into one branch. Typically, datasets with outliers defined by extreme values in\n some column more or less independently of the rest, usually see better performance under this type\n of split. Recommended to use sub-samples (parameter ``sample_size``) when\n passing this parameter. Note that, since this will create isolated nodes faster, the resulting object\n will be lighter (use less memory).\n \n When splits are\n not made according to any of ``prob_pick_avg_gain``, ``prob_pick_pooled_gain``, ``prob_pick_full_gain``, ``prob_pick_dens``,\n both the column and the split point are decided at random. Default setting for [1]_, [2]_, [3]_ is\n zero, and default for [4]_ is 1. This is the randomization parameter that can be passed to the author's original code in [5]_,\n but note that the code in [5]_ suffers from a mathematical error in the calculation of running standard deviations,\n so the results from it might not match with this library's.\n \n Be aware that, if passing a value of 1 (100%) with no sub-sampling and using the single-variable model, every single tree will have\n the exact same splits.\n\n Under this option, models are likely to produce better results when increasing ``max_depth``.\n\n Important detail: if using any of ``prob_pick_avg_gain``, ``prob_pick_pooled_gain``,\n ``prob_pick_full_gain``, ``prob_pick_dens``, the distribution of\n outlier scores is unlikely to be centered around 0.5.\n prob_pick_full_gain : float[0,1]\n This parameter indicates the probability of choosing the threshold on which to split a variable\n (with ``ndim=1``) or a linear combination of variables (when using ``ndim>1``) as the threshold\n that minimizes the pooled sums of variances of all columns (or a subset of them if using\n ``ncols_per_tree``).\n\n In general, this is much slower to evaluate than the other gain types, and does not tend to\n lead to better results. When using this option, one might want to use a different scoring\n metric (particulatly ``\"density\"``, ``\"boxed_density2\"`` or ``\"boxed_ratio\"``). Note that\n the calculations are all done through the (exact) sorted-indices approach, while is much\n slower than the (approximate) histogram approach used by other decision tree software.\n\n Be aware that the data is not standardized in any way for the variance calculations, thus the scales\n of features will make a large difference under this option, which might not make it suitable for\n all types of data.\n\n This option is not compatible with categorical data, and ``min_gain`` does not apply to it.\n\n When splits are\n not made according to any of ``prob_pick_avg_gain``, ``prob_pick_pooled_gain``, ``prob_pick_full_gain``, ``prob_pick_dens``,\n both the column and the split point are decided at random. Default setting for [1]_, [2]_, [3]_, [4]_ is\n zero.\n prob_pick_dens : float[0,1]\n This parameter indicates the probability of choosing the threshold on which to split a variable\n (with ``ndim=1``) or a linear combination of variables (when using ``ndim>1``) as the threshold\n that maximizes the pooled densities of the branch distributions.\n\n The ``min_gain`` option does not apply to this type of splits.\n\n When splits are\n not made according to any of ``prob_pick_avg_gain``, ``prob_pick_pooled_gain``, ``prob_pick_full_gain``, ``prob_pick_dens``,\n both the column and the split point are decided at random. Default setting for [1]_, [2]_, [3]_, [4]_ is\n zero.\n prob_pick_col_by_range : float[0, 1]\n When using ``ndim=1``, this denotes the probability of choosing the column to split with a probability\n proportional to the range spanned by each column within a node as proposed in reference [12]_.\n\n When using ``ndim>1``, this denotes the probability of choosing columns to create a hyperplane with a\n probability proportional to the range spanned by each column within a node.\n\n This option is not compatible with categorical data. If passing column weights, the\n effect will be multiplicative.\n\n Be aware that the data is not standardized in any way for the range calculations, thus the scales\n of features will make a large difference under this option, which might not make it suitable for\n all types of data.\n\n If there are infinite values, all columns having infinite values will be treated as having the\n same weight, and will be chosen before every other column with non-infinite values.\n\n Note that the proposed RRCF model from [12]_ uses a different scoring metric for producing anomaly\n scores, while this library uses isolation depth regardless of how columns are chosen, thus results\n are likely to be different from those of other software implementations. Nevertheless, as explored\n in [11]_, isolation depth as a scoring metric typically provides better results than the\n \"co-displacement\" metric from [12]_ under these split types.\n prob_pick_col_by_var : float[0, 1]\n When using ``ndim=1``, this denotes the probability of choosing the column to split with a probability\n proportional to the variance of each column within a node.\n\n When using ``ndim>1``, this denotes the probability of choosing columns to create a hyperplane with a\n probability proportional to the variance of each column within a node.\n\n For categorical data, it will calculate the expected variance if the column were converted to\n numerical by assigning to each category a random number :math:`\\\\sim \\\\text{Unif}(0, 1)`, which depending on the number of\n categories and their distribution, produces numbers typically a bit smaller than standardized numerical\n variables.\n\n Note that when using sparse matrices, the calculation of variance will rely on a procedure that\n uses sums of squares, which has less numerical precision than the\n calculation used for dense inputs, and as such, the results might differ slightly.\n\n Be aware that this calculated variance is not standardized in any way, so the scales of\n features will make a large difference under this option.\n\n If passing column weights, the effect will be multiplicative.\n\n If passing a ``missing_action`` different than \"fail\", infinite values will be ignored for the\n variance calculation. Otherwise, all columns with infinite values will have the same probability\n and will be chosen before columns with non-infinite values.\n prob_pick_col_by_kurt : float[0, 1]\n When using ``ndim=1``, this denotes the probability of choosing the column to split with a probability\n proportional to the kurtosis of each column **within a node** (unlike the option ``weigh_by_kurtosis``\n which calculates this metric only at the root).\n\n When using ``ndim>1``, this denotes the probability of choosing columns to create a hyperplane with a\n probability proportional to the kurtosis of each column within a node.\n\n For categorical data, it will calculate the expected kurtosis if the column were converted to\n numerical by assigning to each category a random number :math:`\\\\sim \\\\text{Unif}(0, 1)`.\n\n Note that when using sparse matrices, the calculation of kurtosis will rely on a procedure that\n uses sums of squares and higher-power numbers, which has less numerical precision than the\n calculation used for dense inputs, and as such, the results might differ slightly.\n\n If passing column weights, the effect will be multiplicative. This option is not compatible\n with ``weigh_by_kurtosis``.\n\n If passing a ``missing_action`` different than \"fail\", infinite values will be ignored for the\n kurtosis calculation. Otherwise, all columns with infinite values will have the same probability\n and will be chosen before columns with non-infinite values.\n\n If using ``missing_action=\"impute\"``, the calculation of kurtosis will not use imputed values\n in order not to favor columns with missing values (which would increase kurtosis by all having\n the same central value).\n\n Be aware that kurtosis can be a rather slow metric to calculate.\n min_gain : float > 0\n Minimum gain that a split threshold needs to produce in order to proceed with a split.\n Only used when the splits are decided by a variance gain criterion (``prob_pick_pooled_gain``\n or ``prob_pick_avg_gain``, but not ``prob_pick_full_gain`` nor ``prob_pick_dens``).\n If the highest possible gain in the evaluated\n splits at a node is below this threshold, that node becomes a terminal node.\n\n This can be used as a more sophisticated depth control when using pooled gain (note that ``max_depth``\n still applies on top of this heuristic).\n missing_action : str, one of \"divide\" (single-variable only), \"impute\", \"fail\", \"auto\"\n How to handle missing data at both fitting and prediction time. Options are:\n \n ``\"divide\"``:\n (For the single-variable model only, recommended) Will follow both branches and combine the result with the\n weight given by the fraction of the data that went to each branch when fitting the model.\n ``\"impute\"``:\n Will assign observations to the branch with the most observations in the single-variable model, or fill in\n missing values with the median of each column of the sample from which the split was made in the extended\n model (recommended for the extended model) (but note that the calculation of medians does not take\n into account sample weights when using ``weights_as_sample_prob=False``).\n When using ``ndim=1``, gain calculations will use median-imputed values for missing data under this option.\n ``\"fail\"``:\n Will assume there are no missing values and will trigger undefined behavior if it encounters any.\n ``\"auto\"``:\n Will use \"divide\" for the single-variable model and \"impute\" for the extended model.\n \n In the extended model, infinite values will be treated as missing.\n Passing \"fail\" will produce faster fitting and prediction times along with decreased\n model object sizes.\n\n Models from [1]_, [2]_, [3]_, [4]_ correspond to \"fail\" here.\n\n Typically, models with 'ndim>1' are less affected by missing data that models with 'ndim=1'.\n new_categ_action : str, one of \"weighted\" (single-variable only), \"impute\" (extended only), \"smallest\", \"random\"\n What to do after splitting a categorical feature when new data that reaches that split has categories that\n the sub-sample from which the split was done did not have. Options are:\n\n ``\"weighted\"``:\n (For the single-variable model only, recommended) Will follow both branches and combine the result with weight given\n by the fraction of the data that went to each branch when fitting the model.\n ``\"impute\"``:\n (For the extended model only, recommended) Will assign them the median value for that column that was added to the linear\n combination of features (but note that this median calculation does not use sample weights when\n using ``weights_as_sample_prob=False``).\n ``\"smallest\"``:\n In the single-variable case will assign all observations with unseen categories in the split to the branch that had\n fewer observations when fitting the model, and in the extended case will assign them the coefficient of the least\n common category.\n ``\"random\"``:\n Will assing a branch (coefficient in the extended model) at random for each category beforehand, even if no observations\n had that category when fitting the model. Note that this can produce biased results when deciding\n splits by a gain criterion.\n\n Important: under this option, if the model is fitted to a ``DataFrame``, when calling ``predict``\n on new data which contains new categories (unseen in the data to which the model was fitted),\n they will be added to the model's state on-the-fly. This means that, if calling ``predict`` on data\n which has new categories, there might be inconsistencies in the results if predictions are done in\n parallel or if passing the same data in batches or with different row orders. It also means that\n the ``predict`` function will not be thread-safe (e.g. cannot be used alongside ``joblib`` with a\n backend that uses shared memory).\n ``\"auto\"``:\n Will select \"weighted\" for the single-variable model and \"impute\" for the extended model.\n Ignored when passing 'categ_split_type' = 'single_categ'.\n categ_split_type : str, one of \"auto\", \"subset\", or \"single_categ\"\n Whether to split categorical features by assigning sub-sets of them to each branch, or by assigning\n a single category to a branch and the rest to the other branch. For the extended model, whether to\n give each category a coefficient, or only one while the rest get zero.\n\n If passing ``\"auto\"``, will select ``\"subset\"`` for the extended model and ``\"single_categ\"`` for\n the single-variable model.\n all_perm : bool\n When doing categorical variable splits by pooled gain with ``ndim=1`` (single-variable model),\n whether to consider all possible permutations of variables to assign to each branch or not. If ``False``,\n will sort the categories by their frequency and make a grouping in this sorted order. Note that the\n number of combinations evaluated (if ``True``) is the factorial of the number of present categories in\n a given column (minus 2). For averaged gain, the best split is always to put the second most-frequent\n category in a separate branch, so not evaluating all permutations (passing ``False``) will make it\n possible to select other splits that respect the sorted frequency order.\n Ignored when not using categorical variables or not doing splits by pooled gain or using ``ndim > 1``.\n coef_by_prop : bool\n In the extended model, whether to sort the randomly-generated coefficients for categories\n according to their relative frequency in the tree node. This might provide better results when using\n categorical variables with too many categories, but is not recommended, and not reflective of\n real \"categorical-ness\". Ignored for the single-variable model (``ndim=1``) and/or when not using categorical\n variables.\n recode_categ : bool\n Whether to re-encode categorical variables even in case they are already passed\n as ``pd.Categorical``. This is recommended as it will eliminate potentially redundant categorical levels if\n they have no observations, but if the categorical variables are already of type ``pd.Categorical`` with only\n the levels that are present, it can be skipped for slightly faster fitting times. You'll likely\n want to pass ``False`` here if merging several models into one through ``append_trees``.\n weights_as_sample_prob : bool\n If passing sample (row) weights when fitting the model, whether to consider those weights as row\n sampling weights (i.e. the higher the weights, the more likely the observation will end up included\n in each tree sub-sample), or as distribution density weights (i.e. putting a weight of two is the same\n as if the row appeared twice, thus higher weight makes it less of an outlier, but does not give it a\n higher chance of being sampled if the data uses sub-sampling).\n sample_with_replacement : bool\n Whether to sample rows with replacement or not (not recommended). Note that distance calculations,\n if desired, don't work well with duplicate rows.\n\n Note that it is not possible to call ``fit_predict`` or ``fit_transform`` when using this option.\n penalize_range : bool\n Whether to penalize (add -1 to the terminal depth) observations at prediction time that have a value\n of the chosen split variable (linear combination in extended model) that falls outside of a pre-determined\n reasonable range in the data being split (given by 2 * range in data and centered around the split point),\n as proposed in [4]_ and implemented in the authors' original code in [5]_. Not used in single-variable model\n when splitting by categorical variables.\n\n This option is not supported when using density-based outlier scoring metrics.\n\n It's recommended to turn this off for faster predictions on sparse CSC matrices.\n\n Note that this can make a very large difference in the results when using ``prob_pick_pooled_gain``.\n\n Be aware that this option can make the distribution of outlier scores a bit different\n (i.e. not centered around 0.5).\n scoring_metric : str\n Metric to use for determining outlier scores (see reference [13]_). Options are:\n\n ``\"depth\"``\n Will use isolation depth as proposed in reference [1]_. This is typically the safest choice\n and plays well with all model types offered by this library.\n ``\"density\"``\n Will set scores for each terminal node as the ratio between the fraction of points in the sub-sample\n that end up in that node and the fraction of the volume in the feature space which defines\n the node according to the splits that lead to it.\n If using ``ndim=1``, for categorical variables, this is defined in terms\n of number of categories that go towards each side of the split divided by number of categories\n in the observations that reached that node.\n\n The standardized outlier score from density for a given observation is calculated as the\n negative of the logarithm of the geometric mean from the per-tree densities, which unlike\n the standardized score produced from depth, is unbounded, but just like the standardized\n score from depth, has a natural threshold for definining outlierness, which in this case\n is zero is instead of 0.5. The non-standardized outlier score is calculated as the\n geometric mean, while the per-tree scores are calculated as the density values.\n \n This might lead to better predictions when using ``ndim=1``, particularly in the presence\n of categorical variables. Note however that using density requires more trees for convergence\n of scores (i.e. good results) compared to isolation-based metrics.\n\n This option is incompatible with ``penalize_range``.\n ``\"adj_depth\"``\n Will use an adjusted isolation depth that takes into account the number of points that\n go to each side of a given split vs. the fraction of the range of that feature that each\n side of the split occupies, by a metric as follows:\n :math:`d = \\\\frac{2}{ 1 + \\\\frac{1}{2 p} }`\n \n Where :math:`p` is defined as:\n :math:`p = \\\\frac{n_s}{n_t} / \\\\frac{r_s}{r_t}`\n \n With :math:`n_t` being the number of points that reach a given node, :math:`n_s` the\n number of points that are sent to a given side of the split/branch at that node,\n :math:`r_t` being the range (maximum minus minimum) of the splitting feature or\n linear combination among the points that reached the node, and :math:`r_s` being the\n range of the same feature or linear combination among the points that are sent to this\n same side of the split/branch. This makes each split add a number between zero and two\n to the isolation depth, with this number's probabilistic distribution being centered\n around 1 and thus the expected isolation depth remaing the same as in the original\n ``\"depth\"`` metric, but having more variability around the extremes.\n\n Scores (standardized, non-standardized, per-tree) are aggregated in the same way\n as for ``\"depth\"``.\n\n This might lead to better predictions when using ``ndim=1``, particularly in the prescence\n of categorical variables and for smaller datasets, and for smaller datasets, might make\n sense to combine it with ``penalize_range=True``.\n ``\"adj_density\"``\n Will use the same metric from ``\"adj_depth\"``, but applied multiplicatively instead\n of additively. The expected value for this adjusted density is not strictly the same\n as for isolation, but using the expected isolation depth as standardizing criterion\n tends to produce similar standardized score distributions (centered around 0.5).\n\n Scores (standardized, non-standardized, per-tree) are aggregated in the same way\n as for ``\"depth\"``.\n\n This option is incompatible with ``penalize_range``.\n ``\"boxed_ratio\"``\n Will set the scores for each terminal node as the ratio between the volume of the boxed\n feature space for the node as defined by the smallest and largest values from the split\n conditions for each column (bounded by the variable ranges in the sample) and the\n variable ranges in the tree sample.\n If using ``ndim=1``, for categorical variables this is defined in terms of number of\n categories.\n If using ``ndim=>1``, this is defined in terms of the maximum achievable value for the\n splitting linear combination determined from the minimum and maximum values for each\n variable among the points in the sample, and as such, it has a rather different meaning\n compared to the score obtained with ``ndim=1`` - boxed ratio scores with ``ndim>1``\n typically provide very poor quality results and this metric is thus not recommended to\n use in the extended model. With 'ndim>1', it also has a tendency of producing too small\n values which round to zero.\n\n The standardized outlier score from boxed ratio for a given observation is calculated\n simply as the the average from the per-tree boxed ratios. This metric\n has a lower bound of zero and a theorical upper bound of one, but in practice the scores\n tend to be very small numbers close to zero, and its distribution across\n different datasets is rather unpredictable. In order to keep rankings comparable with\n the rest of the metrics, the non-standardized outlier scores are calculated as the\n negative of the average instead. The per-tree scores are calculated as the ratios.\n\n This metric can be calculated in a fast-but-not-so-precise way, and in a low-but-precise\n way, which is controlled by parameter ``fast_bratio``. Usually, both should give the\n same results, but in some fatasets, the fast way can lead to numerical inaccuracies\n due to roundoffs very close to zero.\n\n This metric might lead to better predictions in datasets with many rows when using ``ndim=1``\n and a relatively small ``sample_size``. Note that more trees are required for convergence\n of scores when using this metric. In some datasets, this metric might result in very bad\n predictions, to the point that taking its inverse produces a much better ranking of outliers.\n\n This option is incompatible with ``penalize_range``.\n ``\"boxed_density2\"``\n Will set the score as the ratio between the fraction of points within the sample that\n end up in a given terminal node and the boxed ratio metric.\n\n Aggregation of scores (standardized, non-standardized, per-tree) is done in the same\n way as for density, and it also has a natural threshold at zero for determining\n outliers and inliers.\n\n This metric is typically usable with 'ndim>1', but tends to produce much bigger values\n compared to 'ndim=1'.\n\n Albeit unintuitively, in many datasets, one can usually get better results with metric\n ``\"boxed_density\"`` instead.\n\n The calculation of this metric is also controlled by ``fast_bratio``.\n\n This option is incompatible with ``penalize_range``.\n ``\"boxed_density\"``\n Will set the score as the ratio between the fraction of points within the sample that\n end up in a given terminal node and the ratio between the boxed volume of the feature\n space in the sample and the boxed volume of a node given by the split conditions (inverse\n as in ``\"boxed_density2\"``). This metric does not have any theoretical or intuitive\n justification behind its existence, and it is perhaps ilogical to use it as a\n scoring metric, but tends to produce good results in some datasets.\n\n The standardized outlier scores are defined as the negative of the geometric mean\n of this metric, while the non-standardized scores are the geometric mean, and the\n per-tree scores are simply the 'density' values.\n\n The calculation of this metric is also controlled by ``fast_bratio``.\n\n This option is incompatible with ``penalize_range``.\n fast_bratio : bool\n When using \"boxed\" metrics for scoring, whether to calculate them in a fast way through\n cumulative sum of logarithms of ratios after each split, or in a slower way as sum of\n logarithms of a single ratio per column for each terminal node.\n\n Usually, both methods should give the same results, but in some datasets, particularly\n when variables have too small or too large ranges, the first method can be prone to\n numerical inaccuracies due to roundoff close to zero.\n\n Note that this does not affect calculations for models with 'ndim>1', since given the\n split types, the calculation for them is different.\n standardize_data : bool\n Whether to standardize the features at each node before creating alinear combination of them as suggested\n in [4]_. This is ignored when using ``ndim=1``.\n weigh_by_kurtosis : bool\n Whether to weigh each column according to the kurtosis obtained in the sub-sample that is selected\n for each tree as briefly proposed in [1]_. Note that this is only done at the beginning of each tree\n sample. For categorical columns, will calculate expected kurtosis if the column were converted to\n numerical by assigning to each category a random number :math:`\\\\sim \\\\text{Unif}(0, 1)`.\n\n Note that when using sparse matrices, the calculation of kurtosis will rely on a procedure that\n uses sums of squares and higher-power numbers, which has less numerical precision than the\n calculation used for dense inputs, and as such, the results might differ slightly.\n\n Using this option makes the model more likely to pick the columns that have anomalous values\n when viewed as a 1-d distribution, and can bring a large improvement in some datasets.\n\n This is intended as a cheap feature selector, while the parameter ``prob_pick_col_by_kurt``\n provides the option to do this at each node in the tree for a different overall type of model.\n\n If passing column weights or using weighted column choices proportional to some other metric\n (``prob_pick_col_by_range``, ``prob_pick_col_by_var``), the effect will be multiplicative.\n\n If passing ``missing_action=\"fail\"`` and the data has infinite values, columns with rows\n having infinite values will get a weight of zero. If passing a different value for missing\n action, infinite values will be ignored in the kurtosis calculation.\n\n If using ``missing_action=\"impute\"``, the calculation of kurtosis will not use imputed values\n in order not to favor columns with missing values (which would increase kurtosis by all having\n the same central value).\n coefs : str, one of \"normal\" or \"uniform\"\n For the extended model, whether to sample random coefficients according to a normal distribution :math:`\\\\sim \\\\text{Normal}(0, 1)`\n (as proposed in [4]_) or according to a uniform distribution :math:`\\\\sim \\\\text{Unif}(-1, +1)` as proposed in [3]_. Ignored for the\n single-variable model. Note that, for categorical variables, the coefficients will be sampled ~ N (0,1)\n regardless - in order for both types of variables to have transformations in similar ranges (which will tend\n to boost the importance of categorical variables), pass ``\"uniform\"`` here.\n assume_full_distr : bool\n When calculating pairwise distances (see [8]_), whether to assume that the fitted model represents\n a full population distribution (will use a standardizing criterion assuming infinite sample,\n and the results of the similarity between two points at prediction time will not depend on the\n prescence of any third point that is similar to them, but will differ more compared to the pairwise\n distances between points from which the model was fit). If passing 'False', will calculate pairwise distances\n as if the new observations at prediction time were added to the sample to which each tree was fit, which\n will make the distances between two points potentially vary according to other newly introduced points.\n This will not be assumed when the distances are calculated as the model is being fit (see documentation\n for method 'fit_transform').\n\n This was added for experimentation purposes only and it's not recommended to pass ``False``.\n Note that when calculating distances using a tree indexer (after calling ``build_index``), there\n might be slight discrepancies between the numbers produced with or without the indexer due to what\n are considered \"additional\" observations in this calculation.\n build_imputer : bool\n Whether to construct missing-value imputers so that later this same model could be used to impute\n missing values of new (or the same) observations. Be aware that this will significantly increase the memory\n requirements and serialized object sizes. Note that this is not related to 'missing_action' as missing\n values inside the model are treated differently and follow their own imputation or division strategy.\n min_imp_obs : int\n Minimum number of observations with which an imputation value can be produced. Ignored if passing\n 'build_imputer' = 'False'.\n depth_imp : str, one of \"higher\", \"lower\", \"same\"\n How to weight observations according to their depth when used for imputing missing values. Passing\n \"higher\" will weigh observations higher the further down the tree (away from the root node) the\n terminal node is, while \"lower\" will do the opposite, and \"same\" will not modify the weights according\n to node depth in the tree. Implemented for testing purposes and not recommended to change\n from the default. Ignored when passing 'build_imputer' = 'False'.\n weigh_imp_rows : str, one of \"inverse\", \"prop\", \"flat\"\n How to weight node sizes when used for imputing missing values. Passing \"inverse\" will weigh\n a node inversely proportional to the number of observations that end up there, while \"proportional\"\n will weight them heavier the more observations there are, and \"flat\" will weigh all nodes the same\n in this regard regardless of how many observations end up there. Implemented for testing purposes\n and not recommended to change from the default. Ignored when passing 'build_imputer' = 'False'.\n random_seed : int\n Seed that will be used for random number generation.\n use_long_double : bool\n Whether to use 'long double' (extended precision) type for more precise calculations about\n standard deviations, means, ratios, weights, gain, and other potential aggregates. This makes\n such calculations accurate to a larger number of decimals (provided that the compiler used has\n wider long doubles than doubles) and it is highly recommended to use when the input data has\n a number of rows or columns exceeding :math:`2^{53}` (an unlikely scenario), and also highly recommended\n to use when the input data has problematic scales (e.g. numbers that differ from each other by\n something like :math:`10^{-100}` or columns that include values like :math:`10^{100}`, :math:`10^{-10}`, and :math:`10^{-100}` and still need to\n be sensitive to a difference of :math:`10^{-10}`), but will make the calculations slower, the more so in\n platforms in which 'long double' is a software-emulated type (e.g. Power8 platforms).\n Note that some platforms (most notably windows with the msvc compiler) do not make any difference\n between 'double' and 'long double'.\n\n If 'long double' is not going to be used, the library can be compiled without support for it\n (making the library size smaller) by defining an environment variable ``NO_LONG_DOUBLE`` before\n installing this package (e.g. through ``export NO_LONG_DOUBLE=1`` before running the ``pip`` command).\n\n This option is not available on Windows, due to lack of support in some compilers (e.g. msvc)\n and lack of thread-safety in the calculations in others (e.g. mingw).\n nthreads : int\n Number of parallel threads to use. If passing a negative number, will use\n the same formula as joblib does for calculating number of threads (which is\n n_cpus + 1 + n_jobs - i.e. pass -1 to use all available threads). Note that, the more threads,\n the more memory will be allocated, even if the thread does not end up being used.\n Be aware that most of the operations are bound by memory bandwidth, which means that\n adding more threads will not result in a linear speed-up. For some types of data\n (e.g. large sparse matrices with small sample sizes), adding more threads might result\n in only a very modest speed up (e.g. 1.5x faster with 4x more threads),\n even if all threads look fully utilized.\n n_estimators : None or int\n Synonym for ``ntrees``, kept for better compatibility with scikit-learn.\n max_samples : None or int\n Synonym for ``sample_size``, kept for better compatibility with scikit-learn.\n n_jobs : None or int\n Synonym for ``nthreads``, kept for better compatibility with scikit-learn.\n random_state : None, int, or RandomState\n Synonym for ``random_seed``, kept for better compatibility with scikit-learn.\n bootstrap : None or bool\n Synonym for ``sample_with_replacement``, kept for better compatibility with scikit-learn.\n\n Attributes\n ----------\n cols_numeric_ : array(n_num_features,)\n Array with the names of the columns that were taken as numerical\n (Only when fitting the model to a DataFrame object).\n cols_categ_ : array(n_categ_features,)\n Array with the names of the columns that were taken as categorical\n (Only when fitting the model to a DataFrame object).\n is_fitted_ : bool\n Indicator telling whether the model has been fit to data or not.\n\n References\n ----------\n .. [1] Liu, Fei Tony, Kai Ming Ting, and Zhi-Hua Zhou. \"Isolation forest.\"\n 2008 Eighth IEEE International Conference on Data Mining. IEEE, 2008.\n .. [2] Liu, Fei Tony, Kai Ming Ting, and Zhi-Hua Zhou. \"Isolation-based anomaly detection.\"\n ACM Transactions on Knowledge Discovery from Data (TKDD) 6.1 (2012): 3.\n .. [3] Hariri, Sahand, Matias Carrasco Kind, and Robert J. Brunner. \"Extended Isolation Forest.\"\n arXiv preprint arXiv:1811.02141 (2018).\n .. [4] Liu, Fei Tony, Kai Ming Ting, and Zhi-Hua Zhou. \"On detecting clustered anomalies using SCiForest.\"\n Joint European Conference on Machine Learning and Knowledge Discovery in Databases. Springer, Berlin, Heidelberg, 2010.\n .. [5] https://sourceforge.net/projects/iforest/\n .. [6] https://math.stackexchange.com/questions/3388518/expected-number-of-paths-required-to-separate-elements-in-a-binary-tree\n .. [7] Quinlan, J. Ross. C4. 5: programs for machine learning. Elsevier, 2014.\n .. [8] Cortes, David. \"Distance approximation using Isolation Forests.\"\n arXiv preprint arXiv:1910.12362 (2019).\n .. [9] Cortes, David. \"Imputing missing values with unsupervised random trees.\"\n arXiv preprint arXiv:1911.06646 (2019).\n .. [10] https://math.stackexchange.com/questions/3333220/expected-average-depth-in-random-binary-tree-constructed-top-to-bottom\n .. [11] Cortes, David. \"Revisiting randomized choices in isolation forests.\"\n arXiv preprint arXiv:2110.13402 (2021).\n .. [12] Guha, Sudipto, et al. \"Robust random cut forest based anomaly detection on streams.\"\n International conference on machine learning. PMLR, 2016.\n .. [13] Cortes, David. \"Isolation forests: looking beyond tree depth.\"\n arXiv preprint arXiv:2111.11639 (2021).\n .. [14] Ting, Kai Ming, Yue Zhu, and Zhi-Hua Zhou. \"Isolation kernel and its effect on SVM.\"\n Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. 2018.\n \"\"\"\n def __init__(self, sample_size = \"auto\", ntrees = 500, ndim = 3, ntry = 1,\n categ_cols = None, max_depth = \"auto\", ncols_per_tree = None,\n prob_pick_pooled_gain = 0.0, prob_pick_avg_gain = 0.0,\n prob_pick_full_gain = 0.0, prob_pick_dens = 0.0,\n prob_pick_col_by_range = 0.0, prob_pick_col_by_var = 0.0,\n prob_pick_col_by_kurt = 0.0,\n min_gain = 0., missing_action = \"auto\", new_categ_action = \"auto\",\n categ_split_type = \"auto\", all_perm = False,\n coef_by_prop = False, recode_categ = False,\n weights_as_sample_prob = True, sample_with_replacement = False,\n penalize_range = False, standardize_data = True,\n scoring_metric = \"depth\", fast_bratio = True, weigh_by_kurtosis = False,\n coefs = \"uniform\", assume_full_distr = True,\n build_imputer = False, min_imp_obs = 3,\n depth_imp = \"higher\", weigh_imp_rows = \"inverse\",\n random_seed = 1, use_long_double = False, nthreads = -1,\n n_estimators = None, max_samples = None,\n n_jobs = None, random_state = None, bootstrap = None):\n self.sample_size = sample_size\n self.ntrees = ntrees\n self.ndim = ndim\n self.ntry = ntry\n self.categ_cols = categ_cols\n self.max_depth = max_depth\n self.ncols_per_tree = ncols_per_tree\n self.prob_pick_avg_gain = prob_pick_avg_gain\n self.prob_pick_pooled_gain = prob_pick_pooled_gain\n self.prob_pick_full_gain = prob_pick_full_gain\n self.prob_pick_dens = prob_pick_dens\n self.prob_pick_col_by_range = prob_pick_col_by_range\n self.prob_pick_col_by_var = prob_pick_col_by_var\n self.prob_pick_col_by_kurt = prob_pick_col_by_kurt\n self.min_gain = min_gain\n self.missing_action = missing_action\n self.new_categ_action = new_categ_action\n self.categ_split_type = categ_split_type\n self.all_perm = all_perm\n self.coef_by_prop = coef_by_prop\n self.recode_categ = recode_categ\n self.weights_as_sample_prob = weights_as_sample_prob\n self.sample_with_replacement = sample_with_replacement\n self.penalize_range = penalize_range\n self.standardize_data = standardize_data\n self.scoring_metric = scoring_metric\n self.fast_bratio = fast_bratio\n self.weigh_by_kurtosis = weigh_by_kurtosis\n self.coefs = coefs\n self.assume_full_distr = assume_full_distr\n self.build_imputer = build_imputer\n self.min_imp_obs = min_imp_obs\n self.depth_imp = depth_imp\n self.weigh_imp_rows = weigh_imp_rows\n self.random_seed = random_seed\n self.use_long_double = use_long_double\n self.nthreads = nthreads\n self.n_estimators = n_estimators\n self.max_samples = max_samples\n self.n_jobs = n_jobs\n self.random_state = random_state\n self.bootstrap = bootstrap\n\n self._reset_obj()\n\n def _init(self, categ_cols = None):\n if categ_cols is not None:\n if self.categ_cols is not None:\n warnings.warn(\"Passed 'categ_cols' in constructor and fit method. Will take the latter.\")\n self.categ_cols = categ_cols\n self._initialize_full(\n sample_size = self.sample_size if (self.max_samples is None) else self.max_samples,\n ntrees = self.ntrees if (self.n_estimators is None) else self.n_estimators,\n ndim = self.ndim, ntry = self.ntry,\n categ_cols = self.categ_cols,\n max_depth = self.max_depth, ncols_per_tree = self.ncols_per_tree,\n prob_pick_avg_gain = self.prob_pick_avg_gain, prob_pick_pooled_gain = self.prob_pick_pooled_gain,\n prob_pick_full_gain = self.prob_pick_full_gain, prob_pick_dens = self.prob_pick_dens,\n prob_pick_col_by_range = self.prob_pick_col_by_range,\n prob_pick_col_by_var = self.prob_pick_col_by_var,\n prob_pick_col_by_kurt = self.prob_pick_col_by_kurt,\n min_gain = self.min_gain, missing_action = self.missing_action, new_categ_action = self.new_categ_action,\n categ_split_type = self.categ_split_type, all_perm = self.all_perm,\n coef_by_prop = self.coef_by_prop, recode_categ = self.recode_categ,\n weights_as_sample_prob = self.weights_as_sample_prob,\n sample_with_replacement = self.sample_with_replacement if (self.bootstrap is None) else self.bootstrap,\n penalize_range = self.penalize_range, standardize_data = self.standardize_data,\n scoring_metric = self.scoring_metric, fast_bratio = self.fast_bratio,\n weigh_by_kurtosis = self.weigh_by_kurtosis,\n coefs = self.coefs, assume_full_distr = self.assume_full_distr,\n build_imputer = self.build_imputer, min_imp_obs = self.min_imp_obs,\n depth_imp = self.depth_imp, weigh_imp_rows = self.weigh_imp_rows,\n random_seed = self.random_seed if (self.random_state is None) else self.random_state,\n use_long_double = self.use_long_double,\n nthreads = self.nthreads if (self.n_jobs is None) else self.n_jobs)\n\n def _initialize_full(self, sample_size = None, ntrees = 500, ndim = 3, ntry = 1,\n categ_cols = None, max_depth = \"auto\", ncols_per_tree = None,\n prob_pick_avg_gain = 0.0, prob_pick_pooled_gain = 0.0,\n prob_pick_full_gain = 0.0, prob_pick_dens = 0.0,\n prob_pick_col_by_range = 0.0, prob_pick_col_by_var = 0.0,\n prob_pick_col_by_kurt = 0.0,\n min_gain = 0., missing_action = \"auto\", new_categ_action = \"auto\",\n categ_split_type = \"auto\", all_perm = False,\n coef_by_prop = False, recode_categ = True,\n weights_as_sample_prob = True, sample_with_replacement = False,\n penalize_range = True, standardize_data = True,\n scoring_metric = \"depth\", fast_bratio = True, weigh_by_kurtosis = False,\n coefs = \"normal\", assume_full_distr = True,\n build_imputer = False, min_imp_obs = 3,\n depth_imp = \"higher\", weigh_imp_rows = \"inverse\",\n random_seed = 1, use_long_double = False, nthreads = -1):\n if (sample_size is not None) and (sample_size != \"auto\"):\n assert sample_size > 0\n if sample_size > 1:\n assert isinstance(sample_size, int)\n if ncols_per_tree is not None:\n assert ncols_per_tree > 0\n if ncols_per_tree > 1:\n assert isinstance(ncols_per_tree, int)\n elif ncols_per_tree == 1:\n ncols_per_tree = None\n assert ntrees > 0\n assert isinstance(ntrees, int)\n if (max_depth != \"auto\") and (max_depth is not None):\n assert max_depth > 0\n assert isinstance(max_depth, int)\n if (sample_size is not None) and (sample_size != \"auto\"):\n if not (max_depth < sample_size):\n warnings.warn(\"Passed 'max_depth' greater than 'sample_size'. Will be ignored.\")\n assert ndim >= 1\n assert isinstance(ndim, int)\n assert ntry >= 1\n assert isinstance(ntry, int)\n if isinstance(random_seed, np.random.RandomState):\n random_seed = random_seed.randint(np.iinfo(np.int32).max)\n if isinstance(random_seed, np.random.Generator):\n random_seed = random_seed.integers(np.iinfo(np.int32).max)\n random_seed = int(random_seed)\n assert random_seed >= 0\n assert isinstance(min_imp_obs, int)\n assert min_imp_obs >= 1\n\n assert missing_action in [\"divide\", \"impute\", \"fail\", \"auto\"]\n assert new_categ_action in [\"weighted\", \"smallest\", \"random\", \"impute\", \"auto\"]\n assert categ_split_type in [\"single_categ\", \"subset\", \"auto\"]\n assert coefs in [\"normal\", \"uniform\"]\n assert depth_imp in [\"lower\", \"higher\", \"same\"]\n assert weigh_imp_rows in [\"inverse\", \"prop\", \"flat\"]\n assert scoring_metric in [\"depth\", \"adj_depth\", \"density\", \"adj_density\",\n \"boxed_density\", \"boxed_density2\", \"boxed_ratio\"]\n\n assert prob_pick_avg_gain >= 0\n assert prob_pick_pooled_gain >= 0\n assert prob_pick_full_gain >= 0\n assert prob_pick_dens >= 0\n assert prob_pick_col_by_range >= 0\n assert prob_pick_col_by_var >= 0\n assert prob_pick_col_by_kurt >= 0\n assert min_gain >= 0\n s = prob_pick_avg_gain + prob_pick_pooled_gain + prob_pick_full_gain + prob_pick_dens\n if s > 1:\n warnings.warn(\"Split type probabilities sum to more than 1, will standardize them\")\n prob_pick_avg_gain /= s\n prob_pick_pooled_gain /= s\n prob_pick_full_gain /= s\n prob_pick_dens /= s\n\n s = prob_pick_col_by_range + prob_pick_col_by_var + prob_pick_col_by_kurt\n if s > 1:\n warnings.warn(\"Column choice probabilities sum to more than 1, will standardize them\")\n prob_pick_col_by_range /= s\n prob_pick_col_by_var /= s\n prob_pick_col_by_kurt /= s\n\n if weigh_by_kurtosis and prob_pick_col_by_kurt:\n raise ValueError(\"'weigh_by_kurtosis' is incompatible with 'prob_pick_col_by_kurt'.\")\n\n if (\n (ndim == 1) and\n ((sample_size is None) or (sample_size == \"auto\")) and\n (max([prob_pick_avg_gain, prob_pick_pooled_gain, prob_pick_full_gain, prob_pick_dens]) >= 1) and\n (not sample_with_replacement)\n ):\n msg = \"Passed parameters for deterministic single-variable splits\"\n msg += \" with no sub-sampling. \"\n msg += \"Every tree fitted will end up doing exactly the same splits. \"\n msg += \"It's recommended to set non-random split probabilities to less than 1, \"\n msg += \"or to use the extended model (ndim > 1).\"\n warnings.warn(msg)\n\n if missing_action == \"auto\":\n if ndim == 1:\n missing_action = \"divide\"\n else:\n missing_action = \"impute\"\n\n if new_categ_action == \"auto\":\n if ndim == 1:\n new_categ_action = \"weighted\"\n else:\n new_categ_action = \"impute\"\n\n if (build_imputer) and (missing_action == \"fail\"):\n raise ValueError(\"Cannot impute missing values when passing 'missing_action' = 'fail'.\")\n\n if categ_split_type == \"auto\":\n if ndim == 1:\n categ_split_type = \"single_categ\"\n else:\n categ_split_type = \"subset\"\n if ndim == 1:\n if (categ_split_type != \"single_categ\") and (new_categ_action == \"impute\"):\n raise ValueError(\"'new_categ_action' = 'impute' not supported in single-variable model.\")\n else:\n if missing_action == \"divide\":\n raise ValueError(\"'missing_action' = 'divide' not supported in extended model.\")\n if (categ_split_type != \"single_categ\") and (new_categ_action == \"weighted\"):\n raise ValueError(\"'new_categ_action' = 'weighted' not supported in extended model.\")\n\n if penalize_range and scoring_metric in [\"density\", \"adj_density\", \"boxed_density\", \"boxed_density2\", \"boxed_ratio\"]:\n raise ValueError(\"'penalize_range' is incompatible with density scoring.\")\n\n if categ_cols is not None:\n categ_cols = np.array(categ_cols).reshape(-1).astype(int)\n if categ_cols.shape[0]:\n if prob_pick_col_by_range:\n raise ValueError(\"'prob_pick_col_by_range' is incompatible with categorical data.\")\n if prob_pick_full_gain:\n raise ValueError(\"'prob_pick_full_gain' is incompatible with categorical data.\")\n categ_cols.sort()\n else:\n categ_cols = None\n\n ## TODO: for better sklearn compatibility, should have versions of\n ## these with underscores at the end\n self.sample_size = sample_size\n self.ntrees = ntrees\n self.ndim = ndim\n self.ntry = ntry\n self.categ_cols_ = categ_cols\n self.max_depth = max_depth\n self.ncols_per_tree = ncols_per_tree\n self.prob_pick_avg_gain_ = float(prob_pick_avg_gain)\n self.prob_pick_pooled_gain_ = float(prob_pick_pooled_gain)\n self.prob_pick_full_gain_ = float(prob_pick_full_gain)\n self.prob_pick_dens_ = float(prob_pick_dens)\n self.prob_pick_col_by_range_ = float(prob_pick_col_by_range)\n self.prob_pick_col_by_var_ = float(prob_pick_col_by_var)\n self.prob_pick_col_by_kurt_ = float(prob_pick_col_by_kurt)\n self.min_gain = min_gain\n self.missing_action_ = missing_action\n self.new_categ_action_ = new_categ_action\n self.categ_split_type_ = categ_split_type\n self.coefs = coefs\n self.depth_imp = depth_imp\n self.weigh_imp_rows = weigh_imp_rows\n self.scoring_metric = scoring_metric\n self.min_imp_obs = min_imp_obs\n self.random_seed = random_seed\n self.nthreads = nthreads\n\n self.fast_bratio = bool(fast_bratio)\n self.all_perm = bool(all_perm)\n self.recode_categ = bool(recode_categ)\n self.coef_by_prop = bool(coef_by_prop)\n self.weights_as_sample_prob = bool(weights_as_sample_prob)\n self.sample_with_replacement = bool(sample_with_replacement)\n self.penalize_range = bool(penalize_range)\n self.standardize_data = bool(standardize_data)\n self.weigh_by_kurtosis = bool(weigh_by_kurtosis)\n self.assume_full_distr = bool(assume_full_distr)\n self.build_imputer = bool(build_imputer)\n self.use_long_double = bool(use_long_double)\n\n self._reset_obj()\n\n def _reset_obj(self):\n self.cols_numeric_ = np.array([])\n self.cols_categ_ = np.array([])\n self._cat_mapping = list()\n self._cat_max_lev = np.array([])\n self._ncols_numeric = 0\n self._ncols_categ = 0\n self.is_fitted_ = False\n self._ntrees = 0\n self._cpp_obj = isoforest_cpp_obj()\n self._is_extended_ = self.ndim > 1\n\n def copy(self):\n \"\"\"\n Get a deep copy of this object\n\n Returns\n -------\n copied : obj\n A deep copy of this object\n \"\"\"\n return deepcopy(self)\n\n def get_params(self, deep=True):\n \"\"\"\n Get parameters for this estimator.\n\n Kept for compatibility with scikit-learn.\n\n Parameters\n ----------\n deep : bool\n Ignored.\n\n Returns\n -------\n params : dict\n Parameter names mapped to their values.\n \"\"\"\n import inspect\n return {param.name:getattr(self, param.name) for param in inspect.signature(self.__init__).parameters.values()}\n\n def set_params(self, **params):\n \"\"\"\n Set the parameters of this estimator.\n\n Kept for compatibility with scikit-learn.\n\n Note\n ----\n Setting any parameter other than the number of threads, will reset the model\n object to a blank state - that is, if it was fitted to some data, the fitted\n model will be lost, and it will need to be refitted before being able to\n make predictions.\n \n Parameters\n ----------\n **params : dict\n Estimator parameters.\n\n Returns\n -------\n self : estimator instance\n Estimator instance.\n \"\"\"\n if not (\n len(params) == 1 and\n (\"nthreads\" in params or \"n_jobs\" in params)\n ):\n self.is_fitted_ = False\n valid_params = self.get_params(deep=False)\n for k,v in params.items():\n if k not in valid_params:\n raise ValueError(\"Invalid parameter: \", k)\n setattr(self, k, v)\n return self\n\n def __str__(self):\n msg = \"\"\n if self._is_extended_:\n msg += \"Extended \"\n msg += \"Isolation Forest model\"\n if (self.prob_pick_avg_gain_ + self.prob_pick_pooled_gain_ + self.prob_pick_full_gain_ + self.prob_pick_dens_) > 0:\n msg += \" (using guided splits)\"\n msg += \"\\n\"\n ndim = self.ndim_ if hasattr(self, \"ndim_\") else self.ndim\n if ndim > 1:\n msg += \"Splitting by %d variables at a time\\n\" % ndim\n if self.is_fitted_:\n msg += \"Consisting of %d trees\\n\" % self._ntrees\n if self._ncols_numeric > 0:\n msg += \"Numeric columns: %d\\n\" % self._ncols_numeric\n if self._ncols_categ:\n msg += \"Categorical columns: %d\\n\" % self._ncols_categ\n if self.has_indexer_:\n has_distances = self._cpp_obj.has_indexer_with_distances()\n has_references = self._cpp_obj.has_reference_points()\n msg += \"(Has node indexer%s%s%s built-in)\\n\" & (\n \" with distances\" if has_distances else \"\",\n \" and\" if (has_distances and has_references) else \"\",\n \" with reference points\" if has_references else \"\"\n )\n return msg\n\n def __repr__(self):\n return self.__str__()\n\n def _get_model_obj(self):\n return self._cpp_obj.get_cpp_obj(self._is_extended_)\n\n def _get_imputer_obj(self):\n return self._cpp_obj.get_imputer()\n\n def _check_can_use_imputer(self, X_cat):\n categ_split_type = self.categ_split_type\n ndim = self.ndim_ if hasattr(self, \"ndim_\") else self.ndim\n if categ_split_type == \"auto\":\n if ndim == 1:\n categ_split_type = \"single_categ\"\n else:\n categ_split_type = \"subset\"\n if (self.build_imputer) and (ndim == 1) and (X_cat is not None) and (X_cat.shape[1]):\n if (categ_split_type != \"single_categ\") and (self.new_categ_action_ == \"weighted\"):\n raise ValueError(\"Cannot build imputer with 'ndim=1' + 'new_categ_action=weighted'.\")\n if self.missing_action_ == \"divide\":\n raise ValueError(\"Cannot build imputer with 'ndim=1' + 'missing_action=divide'.\")\n\n def fit(self, X, y = None, sample_weights = None, column_weights = None, categ_cols = None):\n \"\"\"\n Fit isolation forest model to data\n\n Parameters\n ----------\n X : array or array-like (n_samples, n_features)\n Data to which to fit the model. Can pass a NumPy array, Pandas DataFrame, or SciPy sparse CSC matrix.\n If passing a DataFrame, will assume that columns are:\n \n - Numeric, if their dtype is a subtype of NumPy's 'number' or 'datetime64'.\n \n - Categorical, if their dtype is 'object', 'Categorical', or 'bool'. Note that,\n if `Categorical` dtypes are ordered, the order will be ignored here.\n \n Other dtypes are not supported.\n\n Note that, if passing NumPy arrays, they are used in column-major order (a.k.a. \"Fortran arrays\"),\n and if they are not already in column-major format, will need to create a copy of the data.\n y : None\n Not used. Kept as argument for compatibility with Scikit-Learn pipelining.\n sample_weights : None or array(n_samples,)\n Sample observation weights for each row of 'X', with higher weights indicating either higher sampling\n probability (i.e. the observation has a larger effect on the fitted model, if using sub-samples), or\n distribution density (i.e. if the weight is two, it has the same effect of including the same data\n point twice), according to parameter 'weights_as_sample_prob' in the model constructor method.\n column_weights : None or array(n_features,)\n Sampling weights for each column in 'X'. Ignored when picking columns by deterministic criterion.\n If passing None, each column will have a uniform weight. If used along with kurtosis weights, the\n effect is multiplicative.\n categ_cols : None or array-like\n Columns that hold categorical features, when the data is passed as an array or matrix.\n Categorical columns should contain only integer values with a continuous numeration starting at zero,\n with negative values and NaN taken as missing,\n and the array or list passed here should correspond to the column numbers, with numeration starting\n at zero. The maximum categorical value should not exceed 'INT_MAX' (typically :math:`2^{31}-1`).\n This might be passed either at construction time or when calling ``fit`` or variations of ``fit``.\n \n This is ignored when the input is passed as a ``DataFrame`` as then it will consider columns as\n categorical depending on their dtype.\n\n Returns\n -------\n self : obj\n This object.\n \"\"\"\n self._init(categ_cols)\n nthreads_use = _process_nthreads(self.nthreads, True)\n if (\n self.sample_size is None\n and (sample_weights is not None)\n and (self.weights_as_sample_prob)\n ):\n raise ValueError(\"Sampling weights are only supported when using sub-samples for each tree.\")\n self._reset_obj()\n X_num, X_cat, ncat, sample_weights, column_weights, nrows = self._process_data(X, sample_weights, column_weights)\n\n if X_cat is not None:\n if self.prob_pick_col_by_range_:\n raise ValueError(\"'prob_pick_col_by_range' is incompatible with categorical data.\")\n if self.prob_pick_full_gain:\n raise ValueError(\"'prob_pick_full_gain' is incompatible with categorical data.\")\n\n self._check_can_use_imputer(X_cat)\n\n if self.sample_size is None:\n sample_size = nrows\n elif self.sample_size == \"auto\":\n sample_size = min(nrows, 10000)\n if (sample_weights is not None) and (self.weights_as_sample_prob):\n raise ValueError(\"Sampling weights are only supported when using sub-samples for each tree.\")\n elif self.sample_size <= 1:\n sample_size = int(np.ceil(self.sample_size * nrows))\n if sample_size < 2:\n raise ValueError(\"Sampling proportion amounts to a single row or less.\")\n else:\n sample_size = self.sample_size\n if sample_size > nrows:\n sample_size = nrows\n if self.max_depth == \"auto\":\n max_depth = 0\n limit_depth = True\n elif self.max_depth is None:\n max_depth = nrows - 1\n limit_depth = False\n else:\n max_depth = self.max_depth\n limit_depth = False\n if max_depth >= sample_size:\n max_depth = 0\n limit_depth = False\n\n if self.ncols_per_tree is None:\n ncols_per_tree = 0\n elif self.ncols_per_tree <= 1:\n ncols_tot = 0\n if X_num is not None:\n ncols_tot += X_num.shape[1]\n if X_cat is not None:\n ncols_tot += X_cat.shape[1]\n ncols_per_tree = int(np.ceil(self.ncols_per_tree * ncols_tot))\n else:\n ncols_per_tree = self.ncols_per_tree\n\n if (\n self.prob_pick_pooled_gain_ or\n self.prob_pick_avg_gain_ or\n self.prob_pick_full_gain_ or\n self.prob_pick_dens_\n ) and self.ndim_ == 1:\n ncols_tot = (X_num.shape[1] if X_num is not None else 0) + (X_cat.shape[1] if X_cat is not None else 0)\n if self.ntry > ncols_tot:\n warnings.warn(\"Passed 'ntry' larger than number of columns, will decrease it.\")\n\n if isinstance(self.random_state, np.random.RandomState):\n seed = self.random_state.randint(np.iinfo(np.int32).max)\n else:\n seed = self.random_seed\n\n self._cpp_obj.fit_model(_get_num_dtype(X_num, sample_weights, column_weights),\n _get_int_dtype(X_num),\n X_num, X_cat, ncat, sample_weights, column_weights,\n ctypes.c_size_t(nrows).value,\n ctypes.c_size_t(self._ncols_numeric).value,\n ctypes.c_size_t(self._ncols_categ).value,\n ctypes.c_size_t(self.ndim_).value,\n ctypes.c_size_t(self.ntry).value,\n self.coefs,\n ctypes.c_bool(self.coef_by_prop).value,\n ctypes.c_bool(self.sample_with_replacement).value,\n ctypes.c_bool(self.weights_as_sample_prob).value,\n ctypes.c_size_t(sample_size).value,\n ctypes.c_size_t(self.ntrees).value,\n ctypes.c_size_t(max_depth).value,\n ctypes.c_size_t(ncols_per_tree).value,\n ctypes.c_bool(limit_depth).value,\n ctypes.c_bool(self.penalize_range).value,\n ctypes.c_bool(self.standardize_data).value,\n self.scoring_metric,\n ctypes.c_bool(self.fast_bratio).value,\n ctypes.c_bool(False).value,\n ctypes.c_bool(False).value,\n ctypes.c_bool(False).value,\n ctypes.c_bool(False).value,\n ctypes.c_bool(False).value,\n ctypes.c_bool(self.weigh_by_kurtosis).value,\n ctypes.c_double(self.prob_pick_pooled_gain_).value,\n ctypes.c_double(self.prob_pick_avg_gain_).value,\n ctypes.c_double(self.prob_pick_full_gain_).value,\n ctypes.c_double(self.prob_pick_dens_).value,\n ctypes.c_double(self.prob_pick_col_by_range_).value,\n ctypes.c_double(self.prob_pick_col_by_var_).value,\n ctypes.c_double(self.prob_pick_col_by_kurt_).value,\n ctypes.c_double(self.min_gain).value,\n self.missing_action_,\n self.categ_split_type_,\n self.new_categ_action_,\n ctypes.c_bool(self.build_imputer).value,\n ctypes.c_size_t(self.min_imp_obs).value,\n self.depth_imp,\n self.weigh_imp_rows,\n ctypes.c_bool(self.build_imputer).value,\n ctypes.c_bool(False).value,\n ctypes.c_uint64(seed).value,\n ctypes.c_bool(self.use_long_double).value,\n ctypes.c_int(nthreads_use).value)\n self.is_fitted_ = True\n self._ntrees = self.ntrees\n return self\n\n def fit_predict(self, X, column_weights = None, output_outlierness = \"score\",\n output_distance = None, square_mat = False, output_imputed = False,\n categ_cols = None):\n \"\"\"\n Fit the model in-place and produce isolation or separation depths along the way\n \n See the documentation of other methods ('init', 'fit', 'predict', 'predict_distance')\n for details.\n\n Note\n ----\n The data must NOT contain any duplicate rows.\n\n Note\n ----\n This function will be faster at predicting average depths than calling 'fit' + 'predict'\n separately when using full row samples.\n\n Note\n ----\n If using 'penalize_range' = 'True', the resulting scores/depths from this function might differ a bit\n from those of 'fit' + 'predict' ran separately.\n\n Note\n ----\n Sample weights are not supported for this method.\n\n Note\n ----\n When using multiple threads, there can be small differences in the predicted scores or\n average depth or separation/distance between runs due to roundoff error.\n\n Parameters\n ----------\n X : array or array-like (n_samples, n_features)\n Data to which to fit the model. Can pass a NumPy array, Pandas DataFrame, or SciPy sparse CSC matrix.\n If passing a DataFrame, will assume that columns are:\n \n - Numeric, if their dtype is a subtype of NumPy's 'number' or 'datetime64'.\n \n - Categorical, if their dtype is 'object', 'Categorical', or 'bool'. Note that,\n if `Categorical` dtypes are ordered, the order will be ignored here.\n \n Other dtypes are not supported.\n column_weights : None or array(n_features,)\n Sampling weights for each column in 'X'. Ignored when picking columns by deterministic criterion.\n If passing None, each column will have a uniform weight. If used along with kurtosis weights, the\n effect is multiplicative.\n Note that, if passing a DataFrame with both numeric and categorical columns, the column names must\n not be repeated, otherwise the column weights passed here will not end up matching.\n output_outlierness : None or str in [\"score\", \"avg_depth\"]\n Desired type of outlierness output. If passing \"score\", will output standardized outlier score.\n If passing \"avg_depth\" will output average isolation depth without standardizing.\n If passing 'None', will skip outlierness calculations.\n output_distance : None or str in [\"dist\", \"avg_sep\"]\n Type of distance output to produce. If passing \"dist\", will standardize the average separation\n depths. If passing \"avg_sep\", will output the average separation depth without standardizing it\n (note that lower separation depth means furthest distance). If passing 'None', will skip distance calculations.\n\n Note that it might be much faster to calculate distances through a fitted object with\n ``build_indexer`` instead or calling this method.\n square_mat : bool\n Whether to produce a full square matrix with the distances. If passing 'False', will output\n only the upper triangular part as a 1-d array in which entry (i,j) with 0 <= i < j < n is located at\n position p(i,j) = (i * (n - (i+1)/2) + j - i - 1).\n Ignored when passing 'output_distance' = 'None'.\n output_imputed : bool\n Whether to output the data with imputed missing values. Model object must have been initialized\n with 'build_imputer' = 'True'.\n categ_cols : None or array-like\n Columns that hold categorical features, when the data is passed as an array or matrix.\n Categorical columns should contain only integer values with a continuous numeration starting at zero,\n with negative values and NaN taken as missing,\n and the array or list passed here should correspond to the column numbers, with numeration starting\n at zero. The maximum categorical value should not exceed 'INT_MAX' (typically :math:`2^{31}-1`).\n This might be passed either at construction time or when calling ``fit`` or variations of ``fit``.\n \n This is ignored when the input is passed as a ``DataFrame`` as then it will consider columns as\n categorical depending on their dtype.\n\n Returns\n -------\n output : array(n_samples,), or dict\n Requested outputs about isolation depth (outlierness), pairwise separation depth (distance), and/or\n imputed missing values. If passing either 'output_distance' or 'output_imputed', will return a dictionary\n with keys \"pred\" (array(n_samples,)), \"dist\" (array(n_samples * (n_samples - 1) / 2,) or array(n_samples, n_samples)),\n \"imputed\" (array-like(n_samples, n_columns)), according to whether each output type is present.\n \"\"\"\n self._init(categ_cols)\n nthreads_use = _process_nthreads(self.nthreads, True)\n if (\n (self.sample_size is not None) and\n (self.sample_size != \"auto\") and\n (self.sample_size != 1) and\n (self.sample_size != nrows)\n ):\n raise ValueError(\"Cannot use 'fit_predict' when the sample size is limited.\")\n if self.sample_with_replacement:\n raise ValueError(\"Cannot use 'fit_predict' or 'fit_transform' when sampling with replacement.\")\n\n if (output_outlierness is None) and (output_distance is None):\n raise ValueError(\"Must pass at least one of 'output_outlierness' or 'output_distance'.\")\n\n if output_outlierness is not None:\n assert output_outlierness in [\"score\", \"avg_depth\"]\n\n if output_distance is not None:\n assert output_distance in [\"dist\", \"avg_sep\"]\n\n if output_imputed:\n if self.missing_action == \"fail\":\n raise ValueError(\"Cannot impute missing values when using 'missing_action' = 'fail'.\")\n if not self.build_imputer:\n msg = \"Trying to impute missing values from object \"\n msg += \"that was initialized with 'build_imputer' = 'False' \"\n msg += \"- will force 'build_imputer' to 'True'.\"\n warnings.warn(msg)\n self.build_imputer = True\n\n self._reset_obj()\n X_num, X_cat, ncat, sample_weights, column_weights, nrows = self._process_data(X, None, column_weights)\n\n if (X_cat is not None) and (self.prob_pick_col_by_range_):\n raise ValueError(\"'prob_pick_col_by_range' is incompatible with categorical data.\")\n\n self._check_can_use_imputer(X_cat)\n\n if (output_imputed) and (issparse(X_num)):\n msg = \"Imputing missing values from CSC matrix on-the-fly can be very slow, \"\n msg += \"it's recommended if possible to fit the model first and then pass the \"\n msg += \"same matrix as CSR to 'transform'.\"\n warnings.warn(msg)\n\n if self.max_depth == \"auto\":\n max_depth = 0\n limit_depth = True\n elif self.max_depth is None:\n max_depth = nrows - 1\n else:\n max_depth = self.max_depth\n limit_depth = False\n if max_depth >= nrows:\n max_depth = 0\n limit_depth = False\n\n if self.ncols_per_tree is None:\n ncols_per_tree = 0\n elif self.ncols_per_tree <= 1:\n ncols_tot = 0\n if X_num is not None:\n ncols_tot += X_num.shape[1]\n if X_cat is not None:\n ncols_tot += X_cat.shape[1]\n ncols_per_tree = int(np.ceil(self.ncols_per_tree * ncols_tot))\n else:\n ncols_per_tree = self.ncols_per_tree\n\n if (\n self.prob_pick_pooled_gain_ or\n self.prob_pick_avg_gain_ or\n self.prob_pick_full_gain_ or\n self.prob_pick_dens_\n ) and self.ndim_ == 1:\n ncols_tot = (X_num.shape[1] if X_num is not None else 0) + (X_cat.shape[1] if X_cat is not None else 0)\n if self.ntry > ncols_tot:\n warnings.warn(\"Passed 'ntry' larger than number of columns, will decrease it.\")\n\n if isinstance(self.random_state, np.random.RandomState):\n seed = self.random_state.randint(np.iinfo(np.int32).max)\n else:\n seed = self.random_seed\n\n depths, tmat, dmat, X_num, X_cat = self._cpp_obj.fit_model(_get_num_dtype(X_num, None, column_weights),\n _get_int_dtype(X_num),\n X_num, X_cat, ncat, None, column_weights,\n ctypes.c_size_t(nrows).value,\n ctypes.c_size_t(self._ncols_numeric).value,\n ctypes.c_size_t(self._ncols_categ).value,\n ctypes.c_size_t(self.ndim_).value,\n ctypes.c_size_t(self.ntry).value,\n self.coefs,\n ctypes.c_bool(self.coef_by_prop).value,\n ctypes.c_bool(self.sample_with_replacement).value,\n ctypes.c_bool(self.weights_as_sample_prob).value,\n ctypes.c_size_t(nrows).value,\n ctypes.c_size_t(self.ntrees).value,\n ctypes.c_size_t(max_depth).value,\n ctypes.c_size_t(ncols_per_tree).value,\n ctypes.c_bool(limit_depth).value,\n ctypes.c_bool(self.penalize_range).value,\n ctypes.c_bool(self.standardize_data).value,\n self.scoring_metric,\n ctypes.c_bool(self.fast_bratio).value,\n ctypes.c_bool(output_distance is not None).value,\n ctypes.c_bool(output_distance == \"dist\").value,\n ctypes.c_bool(square_mat).value,\n ctypes.c_bool(output_outlierness is not None).value,\n ctypes.c_bool(output_outlierness == \"score\").value,\n ctypes.c_bool(self.weigh_by_kurtosis).value,\n ctypes.c_double(self.prob_pick_pooled_gain_).value,\n ctypes.c_double(self.prob_pick_avg_gain_).value,\n ctypes.c_double(self.prob_pick_full_gain_).value,\n ctypes.c_double(self.prob_pick_dens_).value,\n ctypes.c_double(self.prob_pick_col_by_range_).value,\n ctypes.c_double(self.prob_pick_col_by_var_).value,\n ctypes.c_double(self.prob_pick_col_by_kurt_).value,\n ctypes.c_double(self.min_gain).value,\n self.missing_action_,\n self.categ_split_type_,\n self.new_categ_action_,\n ctypes.c_bool(self.build_imputer).value,\n ctypes.c_size_t(self.min_imp_obs).value,\n self.depth_imp,\n self.weigh_imp_rows,\n ctypes.c_bool(output_imputed).value,\n ctypes.c_bool(self.all_perm).value,\n ctypes.c_uint64(seed).value,\n ctypes.c_bool(self.use_long_double).value,\n ctypes.c_int(nthreads_use).value)\n self.is_fitted_ = True\n self._ntrees = self.ntrees\n\n if (not output_distance) and (not output_imputed):\n return depths\n else:\n outp = {\"pred\" : depths}\n if output_distance:\n if square_mat:\n outp[\"dist\"] = dmat\n else:\n outp[\"dist\"] = tmat\n if output_imputed:\n outp[\"imputed\"] = self._rearrange_imputed(X, X_num, X_cat)\n return outp\n\n def _process_data(self, X, sample_weights, column_weights):\n ### TODO: this needs a refactoring after introducing 'categ_cols'\n self.ndim_ = self.ndim\n\n if X.__class__.__name__ == \"DataFrame\":\n\n ### TODO: this should also have a version with underscores\n if self.categ_cols_ is not None:\n warnings.warn(\"'categ_cols' is ignored when passing a DataFrame as input.\")\n self.categ_cols_ = None\n\n ### https://stackoverflow.com/questions/25039626/how-do-i-find-numeric-columns-in-pandas\n X_num = X.select_dtypes(include = [np.number, np.datetime64]).to_numpy()\n if X_num.dtype not in [ctypes.c_double, ctypes.c_float]:\n X_num = X_num.astype(ctypes.c_double)\n if not _is_col_major(X_num):\n X_num = np.asfortranarray(X_num)\n X_cat = X.select_dtypes(include = [pd.CategoricalDtype, \"object\", \"bool\"])\n if (X_num.shape[1] + X_cat.shape[1]) == 0:\n raise ValueError(\"Input data has no columns of numeric or categorical type.\")\n elif (X_num.shape[1] + X_cat.shape[1]) < X.shape[1]:\n cols_num = np.array(X.select_dtypes(include = [np.number, np.datetime64]).columns.values)\n cols_cat = np.array(X_cat.columns.values)\n msg = \"Only numeric and categorical columns are supported.\"\n msg += \" Got passed the following types: [\"\n msg += \", \".join([str(X[cl].dtype) for cl in X.columns.values if cl not in cols_num and cl not in cols_cat][:3])\n msg += \"]\\n(Sample problem columns: [\"\n msg += \", \".join([str(cl) for cl in X.columns.values if cl not in cols_num and cl not in cols_cat][:3])\n msg += \"])\"\n raise ValueError(msg)\n\n self.n_features_in_ = X.shape[1]\n self.feature_names_in_ = np.array(X.columns.values)\n\n self._ncols_numeric = X_num.shape[1]\n self._ncols_categ = X_cat.shape[1]\n self.cols_numeric_ = np.array(X.select_dtypes(include = [np.number, np.datetime64]).columns.values)\n self.cols_categ_ = np.array(X.select_dtypes(include = [pd.CategoricalDtype, \"object\", \"bool\"]).columns.values)\n if not self._ncols_numeric:\n X_num = None\n else:\n nrows = X_num.shape[0]\n\n if not self._ncols_categ:\n X_cat = None\n else:\n nrows = X_cat.shape[0]\n\n has_ordered = False\n if X_cat is not None:\n self._cat_mapping = [None for cl in range(X_cat.shape[1])]\n for cl in range(X_cat.shape[1]):\n if (X_cat[X_cat.columns[cl]].dtype.name == \"category\") and (X_cat[X_cat.columns[cl]].dtype.ordered):\n has_ordered = True\n if (not self.recode_categ) and (X_cat[X_cat.columns[cl]].dtype.name == \"category\"):\n self._cat_mapping[cl] = np.array(X_cat[X_cat.columns[cl]].cat.categories)\n X_cat = X_cat.assign(**{X_cat.columns[cl] : X_cat[X_cat.columns[cl]].cat.codes})\n else:\n cl, self._cat_mapping[cl] = pd.factorize(X_cat[X_cat.columns[cl]])\n X_cat = X_cat.assign(**{X_cat.columns[cl] : cl})\n if (self.all_perm\n and (self.ndim_ == 1)\n and (self.prob_pick_pooled_gain_)\n ):\n if np.math.factorial(self._cat_mapping[cl].shape[0]) > np.iinfo(ctypes.c_size_t).max:\n msg = \"Number of permutations for categorical variables is larger than \"\n msg += \"maximum representable integer. Try using 'all_perm=False'.\"\n raise ValueError(msg)\n # https://github.com/pandas-dev/pandas/issues/30618\n if self._cat_mapping[cl].__class__.__name__ == \"CategoricalIndex\":\n self._cat_mapping[cl] = self._cat_mapping[cl].to_numpy()\n X_cat = X_cat.to_numpy()\n if X_cat.dtype != ctypes.c_int:\n X_cat = X_cat.astype(ctypes.c_int)\n if not _is_col_major(X_cat):\n X_cat = np.asfortranarray(X_cat)\n if has_ordered:\n warnings.warn(\"Data contains ordered categoricals. These are treated as unordered.\")\n\n else:\n if len(X.shape) != 2:\n raise ValueError(\"Input data must be two-dimensional.\")\n\n self.n_features_in_ = X.shape[1]\n\n X_cat = None\n if self.categ_cols_ is not None:\n if np.max(self.categ_cols_) >= X.shape[1]:\n raise ValueError(\"'categ_cols' contains indices higher than the number of columns in 'X'.\")\n self.cols_numeric_ = np.setdiff1d(np.arange(X.shape[1]), self.categ_cols_)\n if issparse(X) and not isspmatrix_csc(X):\n X = csc_matrix(X)\n X_cat = X[:, self.categ_cols_]\n X = X[:, self.cols_numeric_]\n\n if X.shape[1]:\n if issparse(X):\n avoid_sort = False\n if not isspmatrix_csc(X):\n warnings.warn(\"Sparse matrices are only supported in CSC format, will be converted.\")\n X = csc_matrix(X)\n avoid_sort = True\n if X.nnz == 0:\n raise ValueError(\"'X' has no non-zero entries\")\n\n if ((X.indptr.dtype not in [ctypes.c_int, np.int64, ctypes.c_size_t]) or\n (X.indices.dtype not in [ctypes.c_int, np.int64, ctypes.c_size_t]) or\n (X.indptr.dtype != X.indices.dtype) or\n (X.data.dtype not in [ctypes.c_double, ctypes.c_float])\n ):\n X = X.copy()\n if X.data.dtype not in [ctypes.c_double, ctypes.c_float]:\n X.data = X.data.astype(ctypes.c_double)\n if (X.indptr.dtype != X.indices.dtype) or (X.indices.dtype not in [ctypes.c_int, np.int64, ctypes.c_size_t]):\n X.indices = X.indices.astype(ctypes.c_size_t)\n if (X.indptr.dtype != X.indices.dtype) or (X.indptr.dtype not in [ctypes.c_int, np.int64, ctypes.c_size_t]):\n X.indptr = X.indptr.astype(ctypes.c_size_t)\n if not avoid_sort:\n _sort_csc_indices(X)\n \n else:\n if (X.__class__.__name__ == \"ndarray\") and (X.dtype not in [ctypes.c_double, ctypes.c_float]):\n X = X.astype(ctypes.c_double)\n if (X.__class__.__name__ != \"ndarray\") or (not _is_col_major(X)):\n X = np.asfortranarray(X)\n if X.dtype not in [ctypes.c_double, ctypes.c_float]:\n X = X.astype(ctypes.c_double)\n\n self._ncols_numeric = X.shape[1]\n self._ncols_categ = 0 if (X_cat is None) else X_cat.shape[1]\n if self.categ_cols_ is None:\n self.cols_numeric_ = np.array([])\n self.cols_categ_ = np.array([])\n self._cat_mapping = list()\n\n if (self._ncols_numeric + self._ncols_categ) == 0:\n raise ValueError(\"'X' has zero columns.\")\n\n if X.shape[1]:\n X_num = X\n nrows = X_num.shape[0]\n else:\n X_num = None\n \n if X_cat is not None:\n if issparse(X_cat):\n X_cat = X_cat.toarray()\n if np.any(np.isnan(X_cat)):\n X_cat = X_cat.copy()\n X_cat[np.isnan(X_cat)] = -1\n if X_cat.dtype != ctypes.c_int:\n X_cat = X_cat.astype(ctypes.c_int)\n if not _is_col_major(X_cat):\n X_cat = np.asfortranarray(X_cat)\n self._cat_max_lev = np.max(X_cat, axis=0)\n if np.any(self._cat_max_lev < 0):\n warnings.warn(\"Some categorical columns contain only missing values.\")\n nrows = X_cat.shape[0]\n\n if nrows == 0:\n raise ValueError(\"Input data has zero rows.\")\n elif nrows < 3:\n raise ValueError(\"Input data must have at least 3 rows.\")\n elif (self.sample_size is not None) and (self.sample_size != \"auto\"):\n if self.sample_size > nrows:\n warnings.warn(\"Input data has fewer rows than sample_size, will forego sub-sampling.\")\n\n if X_cat is not None:\n if self.categ_cols_ is None:\n ncat = np.array([self._cat_mapping[cl].shape[0] for cl in range(X_cat.shape[1])], dtype = ctypes.c_int)\n else:\n if self._cat_max_lev is None:\n self._cat_max_lev = []\n if not isinstance(self._cat_max_lev, np.ndarray):\n self._cat_max_lev = np.array(self._cat_max_lev)\n ncat = (self._cat_max_lev + 1).clip(0)\n if ncat.dtype != ctypes.c_int:\n ncat = ncat.astype(ctypes.c_int)\n else:\n ncat = None\n\n if sample_weights is not None:\n sample_weights = np.array(sample_weights).reshape(-1)\n if (X_num is not None) and (X_num.dtype != sample_weights.dtype):\n sample_weights = sample_weights.astype(X_num.dtype)\n if sample_weights.dtype not in [ctypes.c_double, ctypes.c_float]:\n sample_weights = sample_weights.astype(ctypes.c_double)\n if sample_weights.shape[0] != nrows:\n raise ValueError(\"'sample_weights' has different number of rows than 'X'.\")\n\n ncols = 0\n if X_num is not None:\n ncols += X_num.shape[1]\n if X_cat is not None:\n ncols += X_cat.shape[1]\n\n if column_weights is not None:\n column_weights = np.array(column_weights).reshape(-1)\n if (X_num is not None) and (X_num.dtype != column_weights.dtype):\n column_weights = column_weights.astype(X_num.dtype)\n if column_weights.dtype not in [ctypes.c_double, ctypes.c_float]:\n column_weights = column_weights.astype(ctypes.c_double)\n if ncols != column_weights.shape[0]:\n raise ValueError(\"'column_weights' has %d entries, but data has %d columns.\" % (column_weights.shape[0], ncols))\n if (X_num is not None) and (X_cat is not None):\n column_weights = np.r_[column_weights[X.columns.values == self.cols_numeric_],\n column_weights[X.columns.values == self.cols_categ_]]\n\n if (sample_weights is not None) and (column_weights is not None) and (sample_weights.dtype != column_weights.dtype):\n sample_weights = sample_weights.astype(ctypes.c_double)\n column_weights = column_weights.astype(ctypes.c_double)\n\n if self.ndim_ > 1:\n if self.ndim_ > ncols:\n msg = \"Model was meant to take %d variables for each split, but data has %d columns.\"\n msg += \" Will decrease number of splitting variables to match number of columns.\"\n msg = msg % (self.ndim_, ncols)\n warnings.warn(msg)\n self.ndim_ = ncols\n if self.ndim_ < 2:\n self._is_extended_ = False\n if self.missing_action == \"auto\":\n self.missing_action_ = \"divide\"\n if self.new_categ_action == \"auto\":\n self.new_categ_action_ = \"weighted\"\n\n X_num = _copy_if_subview(X_num, False)\n X_cat = _copy_if_subview(X_cat, False)\n\n return X_num, X_cat, ncat, sample_weights, column_weights, nrows\n\n def _process_data_new(self, X, allow_csr = True, allow_csc = True, prefer_row_major = False,\n keep_new_cat_levels = False):\n if X.__class__.__name__ == \"DataFrame\":\n if ((self.cols_numeric_.shape[0] + self.cols_categ_.shape[0]) > 0) and (self.categ_cols_ is None):\n if self.categ_cols_ is None:\n missing_cols = np.setdiff1d(np.r_[self.cols_numeric_, self.cols_categ_], np.array(X.columns.values))\n if missing_cols.shape[0] > 0:\n raise ValueError(\"Input data is missing %d columns - example: [%s]\" % (missing_cols.shape[0], \", \".join(missing_cols[:3])))\n else:\n if X.shape[1] < (self.cols_numeric_.shape[0] + self.cols_categ_.shape[0]):\n raise ValueError(\"Error: expected input with %d columns - got: %d.\" %\n ((self.cols_numeric_.shape[0] + self.cols_categ_.shape[0]), X.shape[1]))\n\n if self._ncols_numeric > 0:\n if self.categ_cols_ is None:\n X_num = X[self.cols_numeric_].to_numpy()\n else:\n X_num = X.iloc[:, self.cols_numeric_].to_numpy()\n \n if X_num.dtype not in [ctypes.c_double, ctypes.c_float]:\n X_num = X_num.astype(ctypes.c_double)\n if (not prefer_row_major) and (not _is_col_major(X_num)):\n X_num = np.asfortranarray(X_num)\n nrows = X_num.shape[0]\n else:\n X_num = None\n\n if self._ncols_categ > 0:\n if self.categ_cols_ is None:\n X_cat = X[self.cols_categ_]\n\n if (not keep_new_cat_levels) and \\\n (\n (self.new_categ_action_ == \"impute\" and self.missing_action_ == \"impute\")\n or\n (self.new_categ_action_ == \"weighted\" and\n self.categ_split_type_ != \"single_categ\"\n and self.missing_action_ == \"divide\")\n ):\n for cl in range(self._ncols_categ):\n X_cat = X_cat.assign(**{\n self.cols_categ_[cl] : _encode_categorical(X_cat[self.cols_categ_[cl]],\n self._cat_mapping[cl])\n })\n else:\n for cl in range(self._ncols_categ):\n X_cat = X_cat.assign(**{\n self.cols_categ_[cl] : pd.Categorical(X_cat[self.cols_categ_[cl]])\n })\n new_levs = np.setdiff1d(X_cat[self.cols_categ_[cl]].cat.categories, self._cat_mapping[cl])\n if new_levs.shape[0]:\n self._cat_mapping[cl] = np.r_[self._cat_mapping[cl], new_levs]\n X_cat = X_cat.assign(**{\n self.cols_categ_[cl] : _encode_categorical(X_cat[self.cols_categ_[cl]],\n self._cat_mapping[cl])\n })\n\n else:\n X_cat = X.iloc[:, self.categ_cols_]\n \n X_cat = X_cat.to_numpy()\n if X_cat.dtype != ctypes.c_int:\n X_cat = X_cat.astype(ctypes.c_int)\n if (not prefer_row_major) and (not _is_col_major(X_cat)):\n X_cat = np.asfortranarray(X_cat)\n nrows = X_cat.shape[0]\n else:\n X_cat = None\n\n elif self._ncols_categ == 0:\n if X.shape[1] < self._ncols_numeric:\n raise ValueError(\"Input has different number of columns than data to which model was fit.\")\n X_num = X.to_numpy()\n if X_num.dtype not in [ctypes.c_double, ctypes.c_float]:\n X_num = X_num.astype(ctypes.c_double)\n if (not prefer_row_major) and (not _is_col_major(X_num)):\n X_num = np.asfortranarray(X_num)\n X_cat = None\n nrows = X_num.shape[0]\n elif self._ncols_numeric == 0:\n if X.shape[1] < self._ncols_categ:\n raise ValueError(\"Input has different number of columns than data to which model was fit.\")\n X_cat = X.to_numpy()[:, :self._ncols_categ]\n if X_cat.dtype != ctypes.c_int:\n X_cat = X_cat.astype(ctypes.c_int)\n if (not prefer_row_major) and (not _is_col_major(X_cat)):\n X_cat = np.asfortranarray(X_cat)\n X_num = None\n nrows = X_cat.shape[0]\n else:\n nrows = X.shape[0]\n X_num = X.iloc[:, self.cols_numeric_].to_numpy()\n X_cat = X.iloc[:, self.categ_cols_].to_numpy()\n if X_num.dtype not in [ctypes.c_double, ctypes.c_float]:\n X_num = X_num.astype(ctypes.c_double)\n if (not prefer_row_major) and (not _is_col_major(X_num)):\n X_num = np.asfortranarray(X_num)\n if X_cat.dtype != ctypes.c_int:\n X_cat = X_cat.astype(ctypes.c_int)\n if (not prefer_row_major) and (not _is_col_major(X_cat)):\n X_cat = np.asfortranarray(X_cat)\n\n if (X_num is not None) and (X_cat is not None) and (_is_col_major(X_num) != _is_col_major(X_cat)):\n if prefer_row_major:\n X_num = np.ascontiguousarray(X_num)\n X_cat = np.ascontiguousarray(X_cat)\n else:\n X_num = np.asfortranarray(X_num)\n X_cat = np.asfortranarray(X_cat)\n\n else:\n if (self._ncols_categ > 0) and (self.categ_cols_ is None):\n raise ValueError(\"Model was fit to DataFrame with categorical columns, but new input is a numeric array/matrix.\")\n if len(X.shape) != 2:\n raise ValueError(\"Input data must be two-dimensional.\")\n if (self.categ_cols_ is None) and (X.shape[1] < self._ncols_numeric):\n raise ValueError(\"Input has different number of columns than data to which model was fit.\")\n \n if self.categ_cols_ is None:\n X_cat = None\n else:\n if issparse(X) and (not isspmatrix_csc(X)) and (not isspmatrix_csr(X)):\n X = csc_matrix(X)\n X_cat = X[:, self.categ_cols_]\n if issparse(X_cat):\n X_cat = X_cat.toarray()\n X = X[:, self.cols_numeric_]\n\n X_num = None\n if X.shape[1]:\n if issparse(X):\n avoid_sort = False\n if isspmatrix_csr(X) and not allow_csr:\n warnings.warn(\"Cannot predict from CSR sparse matrix, will convert to CSC.\")\n X = csc_matrix(X)\n avoid_sort = True\n elif isspmatrix_csc(X) and not allow_csc:\n warnings.warn(\"Method supports sparse matrices only in CSR format, will convert sparse format.\")\n X = csr_matrix(X)\n avoid_sort = True\n elif (not isspmatrix_csc(X)) and (not isspmatrix_csr(X)):\n msg = \"Sparse matrix inputs only supported as \"\n if allow_csc:\n msg += \"CSC\"\n if allow_csr:\n msg += \" or CSR\"\n else:\n msg += \"CSR\"\n msg += \" format, will convert to \"\n if allow_csc:\n msg += \"CSC.\"\n warnings.warn(msg)\n X = csc_matrix(X)\n else:\n msg += \"CSR.\"\n warnings.warn(msg)\n X = csr_matrix(X)\n avoid_sort = True\n\n if ((X.indptr.dtype not in [ctypes.c_int, np.int64, ctypes.c_size_t]) or\n (X.indices.dtype not in [ctypes.c_int, np.int64, ctypes.c_size_t]) or\n (X.indptr.dtype != X.indices.dtype) or\n (X.data.dtype not in [ctypes.c_double, ctypes.c_float])\n ):\n X = X.copy()\n if X.data.dtype not in [ctypes.c_double, ctypes.c_float]:\n X.data = X.data.astype(ctypes.c_double)\n if (X.indptr.dtype != X.indices.dtype) or (X.indices.dtype not in [ctypes.c_int, np.int64, ctypes.c_size_t]):\n X.indices = X.indices.astype(ctypes.c_size_t)\n if (X.indptr.dtype != X.indices.dtype) or (X.indptr.dtype not in [ctypes.c_int, np.int64, ctypes.c_size_t]):\n X.indptr = X.indptr.astype(ctypes.c_size_t)\n if not avoid_sort:\n _sort_csc_indices(X)\n X_num = X\n \n else:\n if not isinstance(X, np.ndarray):\n if prefer_row_major:\n X = np.array(X)\n else:\n X = np.asfortranarray(X)\n if X.dtype not in [ctypes.c_double, ctypes.c_float]:\n X = X.astype(ctypes.c_double)\n if (not prefer_row_major) and (not _is_col_major(X)):\n X = np.asfortranarray(X)\n X_num = X\n nrows = X_num.shape[0]\n\n if X_cat is not None:\n nrows = X_cat.shape[0]\n if np.any(np.isnan(X_cat)):\n X_cat = X_cat.copy()\n X_cat[np.isnan(X_cat)] = -1\n\n if (X_num is not None) and (isspmatrix_csc(X_num)):\n prefer_row_major = False\n\n\n if (self.categ_cols_ is not None) and np.any(X_cat > self._cat_max_lev.reshape((1,-1))):\n X_cat[X_cat > self._cat_max_lev] = -1\n if X_cat.dtype != ctypes.c_int:\n X_cat = X_cat.astype(ctypes.c_int)\n if (not prefer_row_major) and (not _is_col_major(X_cat)):\n X_cat = np.asfortranarray(X_cat)\n\n X_num = _copy_if_subview(X_num, prefer_row_major)\n X_cat = _copy_if_subview(X_cat, prefer_row_major)\n\n if (X_num is not None) and (isspmatrix_csc(X_num)) and (X_cat is not None) and (not _is_col_major(X_cat)):\n X_cat = np.asfortranarray(X_cat)\n if (nrows > 1) and (X_cat is not None) and (X_num is not None) and (not isspmatrix_csc(X_num)):\n if prefer_row_major:\n if _is_row_major(X_num) != _is_row_major(X_cat):\n X_num = np.ascontiguousarray(X_num)\n X_cat = np.ascontiguousarray(X_cat)\n else:\n if _is_col_major(X_num) != _is_col_major(X_cat):\n X_num = np.asfortranarray(X_num)\n X_cat = np.asfortranarray(X_cat)\n\n return X_num, X_cat, nrows\n\n def _rearrange_imputed(self, orig, X_num, X_cat):\n if orig.__class__.__name__ == \"DataFrame\":\n ncols_imputed = 0\n if X_num is not None:\n if (self.cols_numeric_ is not None) and (self.cols_numeric_.shape[0]):\n df_num = pd.DataFrame(X_num, columns = self.cols_numeric_ if (self.categ_cols_ is None) else orig.columns.values[self.cols_numeric_])\n else:\n df_num = pd.DataFrame(X_num)\n ncols_imputed += df_num.shape[1]\n if X_cat is not None:\n if self.categ_cols_ is None:\n df_cat = pd.DataFrame(X_cat, columns = self.cols_categ_)\n for cl in range(self.cols_categ_.shape[0]):\n df_cat[self.cols_categ_[cl]] = pd.Categorical.from_codes(df_cat[self.cols_categ_[cl]], self._cat_mapping[cl])\n else:\n df_cat = pd.DataFrame(X_cat, columns = orig.columns.values[self.categ_cols_])\n ncols_imputed += df_cat.shape[1]\n \n if orig.columns.values.shape[0] != ncols_imputed:\n if self.categ_cols_ is None:\n cols_new = np.setdiff1d(orig.columns.values, np.r_[self.cols_numeric_, self.cols_categ_])\n else:\n cols_new = orig.columns[(self._ncols_numeric + self._ncols_categ):]\n if (X_num is not None) and (X_cat is None):\n out = pd.concat([df_num, orig[cols_new]], axis = 1)\n elif (X_num is None) and (X_cat is not None):\n out = pd.concat([df_cat, orig[cols_new]], axis = 1)\n else:\n out = pd.concat([df_num, df_cat, orig[cols_new]], axis = 1)\n out = out[orig.columns.values]\n return out\n\n if (X_num is not None) and (X_cat is None):\n return df_num[orig.columns.values]\n elif (X_num is None) and (X_cat is not None):\n return df_cat[orig.columns.values]\n else:\n df = pd.concat([df_num, df_cat], axis = 1)\n df = df[orig.columns.values]\n return df\n\n else: ### not DataFrame\n\n if issparse(orig):\n outp = orig.copy()\n if (self.categ_cols_ is None) and (orig.shape[1] == self._ncols_numeric):\n outp.data[:] = X_num.data\n elif self.categ_cols_ is None:\n if isspmatrix_csr(orig):\n _reconstruct_csr_sliced(\n outp.data,\n outp.indptr,\n X_num.data if (X_num is not None) else np.empty(0, dtype=outp.data.dtype),\n X_num.indptr if (X_num is not None) else np.zeros(1, dtype=outp.indptr.dtype),\n outp.shape[0]\n )\n else:\n outp[:, :self._ncols_numeric] = X_num\n else:\n if isspmatrix_csr(orig):\n _reconstruct_csr_with_categ(\n outp.data,\n outp.indices,\n outp.indptr,\n X_num.data if (X_num is not None) else np.empty(0, dtype=outp.data.dtype),\n X_num.indices if (X_num is not None) else np.empty(0, dtype=outp.indices.dtype),\n X_num.indptr if (X_num is not None) else np.zeros(1, dtype=outp.indptr.dtype),\n X_cat,\n self.cols_numeric_.astype(ctypes.c_size_t) if (self.cols_numeric_ is not None) else np.empty(0, dtype=ctypes.c_size_t),\n self.categ_cols_.astype(ctypes.c_size_t),\n outp.shape[0], outp.shape[1],\n _is_col_major(X_cat),\n )\n else:\n if np.any(X_cat < 0):\n X_cat = X_cat.astype(\"float\")\n X_cat[X_cat < 0] = np.nan\n outp[:, self.categ_cols_] = X_cat\n if X_num is not None:\n outp[:, self.cols_numeric_] = X_num\n return outp\n \n else:\n if (self.categ_cols_ is None) and (orig.shape[1] == self._ncols_numeric):\n return X_num\n elif self.categ_cols_ is None:\n outp = orig.copy()\n outp[:, :self._ncols_numeric] = X_num[:, :self._ncols_numeric]\n else:\n outp = orig.copy()\n if np.any(X_cat < 0):\n X_cat = X_cat.astype(\"float\")\n X_cat[X_cat < 0] = np.nan\n outp[:, self.categ_cols_] = X_cat\n if X_num is not None:\n outp[:, self.cols_numeric_] = X_num[:, :self._ncols_numeric]\n return outp\n\n\n def predict(self, X, output = \"score\"):\n \"\"\"\n Predict outlierness based on average isolation depth or density\n\n Calculates the approximate depth that it takes to isolate an observation according to the\n fitted model splits, or the average density of the branches in which observations fall.\n Can output either the average depth/density, or a standardized outlier score\n based on whether it takes more or fewer splits than average to isolate observations. In the\n standardized outlier score for density-based metrics, values closer to 1 indicate more outlierness,\n while values closer to 0.5 indicate average outlierness, and close to 0 more averageness\n (harder to isolate).\n When using ``scoring_metric=\"density\"``, the standardized outlier scores are instead unbounded,\n with larger values indicating more outlierness and a natural threshold of zero for determining\n inliers and outliers.\n\n Note\n ----\n For multi-threaded predictions on many rows, it is recommended to set the number of threads\n to the number of physical cores of the CPU rather than the number of logical cores, as it\n will typically have better performance that way. Assuming a typical x86-64 desktop CPU,\n this typically involves dividing the number of threads by 2 - for example:\n \n ``import multiprocessing;model.set_params(nthreads=multiprocessing.cpu_count()/2)``\n\n Note\n ----\n Depending on the model parameters, it might be possible to convert the models to 'treelite' format\n for faster predictions or for easier model serving. See method ``to_treelite`` for details.\n\n Note\n ----\n If the model was built with 'nthreads>1', this prediction function will\n use OpenMP for parallelization. In a linux setup, one usually has GNU's \"gomp\" as OpenMP as backend, which\n will hang when used in a forked process - for example, if one tries to call this prediction function from\n 'flask'+'gunicorn', which uses process forking for parallelization, it will cause the whole application to freeze;\n and if using kubernetes on top of a different backend such as 'falcon', might cause it to run slower than\n needed or to hang too. A potential fix in these cases is to set the number of threads to 1 in the object\n (e.g. 'model.nthreads = 1'), or to use a different version of this library compiled without OpenMP\n (requires manually altering the 'setup.py' file), or to use a non-GNU OpenMP backend. This should not\n be an issue when using this library normally in e.g. a jupyter notebook.\n\n Note\n ----\n For model serving purposes, in order to have a smaller and leaner library, it is recommended to\n compile this library without support for 'long double' type, which can be done by setting up an\n environment variable \"NO_LONG_DOUBLE\" before installation of this package (see the GitHub page\n of this library for more details).\n \n Note\n ----\n The more threads that are set for the model, the higher the memory requirements will be as each\n thread will allocate an array with one entry per row.\n \n Note\n ----\n In order to save memory when fitting and serializing models, the functionality for outputting\n terminal node number will generate index mappings on the fly for all tree nodes, even if passing only\n 1 row, so it's only recommended for batch predictions. If this type of prediction is desired, it can\n be sped up by building an index of terminal nodes through ``build_indexer``.\n\n Note\n ----\n The outlier scores/depth predict functionality is optimized for making predictions on one or a\n few rows at a time - for making large batches of predictions, it might be faster to use the\n 'fit_predict' functionality.\n\n Note\n ----\n If using non-random splits (parameters ``prob_pick_avg_gain``, ``prob_pick_pooled_gain``, ``prob_pick_full_gain``, ``prob_pick_dens``)\n and/or range penalizations (which are off by default), the distribution of scores might\n not be centered around 0.5.\n\n Note\n ----\n When making predictions on CSC matrices with many rows using multiple threads, there\n can be small differences between runs due to roundoff error.\n\n Parameters\n ----------\n X : array or array-like (n_samples, n_features)\n Observations for which to predict outlierness or average isolation depth. Can pass\n a NumPy array, Pandas DataFrame, or SciPy sparse CSC or CSR matrix.\n\n If 'X' is sparse and one wants to obtain the outlier score or average depth or tree\n numbers, it's highly recommended to pass it in CSC format as it will be much faster\n when the number of trees or rows is large.\n\n While the 'X' used by ``fit`` always needs to be in column-major order, predictions\n can be done on data that is in either row-major or column-major orders, with row-major\n being faster for dense data.\n output : str, one of \"score\", \"avg_depth\", \"tree_num\", \"tree_depths\"\n Desired type of output. Options are:\n\n ``\"score\"``:\n Will output standardized outlier scores. For all scoring metrics, higher values\n indicate more outlierness.\n\n ``\"avg_depth\"``:\n Will output unstandardized average isolation depths. For ``scoring_metric=\"density\"``,\n will output the geometric mean instead. See the documentation for ``scoring_metric``,\n for more details about the calculation for other metrics.\n For all scoring metrics, higher values indicate less outlierness.\n\n ``\"tree_num\"``:\n Will output the index of the terminal node under each tree in the model.\n If this calculation is going to be perform frequently, it's recommended to\n build node indices through ``build_indexer``.\n\n ``\"tree_depths\"``:\n Will output non-standardized per-tree isolation depths or densities or log-densities\n (note that they will not include range penalties from ``penalize_range=True``).\n See the documentation for ``scoring_metric`` for details about the calculation\n for each metrics.\n\n Returns\n -------\n score : array(n_samples,) or array(n_samples, n_trees)\n Requested output type for each row accoring to parameter 'output' (outlier scores,\n average isolation depth, terminal node indices, or per-tree isolation depths).\n \"\"\"\n assert self.is_fitted_\n assert output in [\"score\", \"avg_depth\", \"tree_num\", \"tree_depths\"]\n nthreads_use = _process_nthreads(self.nthreads)\n X_num, X_cat, nrows = self._process_data_new(X, prefer_row_major = True, keep_new_cat_levels = False)\n if (output in [\"tree_num\", \"tree_depths\"]) and (self.ndim_ == 1):\n if self.missing_action_ == \"divide\":\n raise ValueError(\"Cannot output tree numbers/depths when using 'missing_action' = 'divide'.\")\n if (self._ncols_categ > 0) and (self.new_categ_action_ == \"weighted\") and (self.categ_split_type_ != \"single_categ\"):\n raise ValueError(\"Cannot output tree numbers/depths when using 'new_categ_action' = 'weighted'.\")\n if (nrows == 1) and (output == \"tree_num\") and (not self.has_indexer_):\n warnings.warn(\"Predicting tree number is slow, not recommended to do for 1 row at a time without indexer.\")\n\n depths, tree_num, tree_depths = self._cpp_obj.predict(\n _get_num_dtype(X_num, None, None), _get_int_dtype(X_num),\n X_num, X_cat, self._is_extended_,\n ctypes.c_size_t(nrows).value,\n ctypes.c_int(nthreads_use).value,\n ctypes.c_bool(output == \"score\").value,\n ctypes.c_bool(output == \"tree_num\").value,\n ctypes.c_bool(output == \"tree_depths\").value\n )\n\n if output in [\"score\", \"avg_depth\"]:\n return depths\n elif output == \"tree_depths\":\n return tree_depths\n else:\n return tree_num\n\n def decision_function(self, X):\n \"\"\"\n Wrapper for 'predict' with 'output=score'\n\n This function is kept for compatibility with Scikit-Learn.\n\n Parameters\n ----------\n X : array or array-like (n_samples, n_features)\n Observations for which to predict outlierness or average isolation depth. Can pass\n a NumPy array, Pandas DataFrame, or SciPy sparse CSC or CSR matrix.\n\n Returns\n -------\n score : array(n_samples,)\n Outlier scores for the rows in 'X' (the higher, the most anomalous).\n \"\"\"\n return self.predict(X, output=\"score\")\n\n def predict_distance(self, X, output = \"dist\", square_mat = True, X_ref = None, use_reference_points = True):\n \"\"\"\n Predict approximate distances or isolation kernels/proximities between points\n\n Predict approximate pairwise distances between points, or individual distances between\n two sets of points based on how many splits it takes to separate them, or isolation\n kernels (a.k.a. proximity matrix, which for example can be used for a generalized least-squares\n regressions as a rough estimate of residual correlations) from the model based on the number\n of trees in which two observations end up in the same terminal node.\n Can output either the average number\n of paths/steps it takes to separate two observations,\n or a standardized metric (in the same way as the outlier score) in which values closer\n to zero indicate nearer points, closer to one further away points, and closer to 0.5\n average distance, or a kernel/proximity metric, either standardized (values between zero and one)\n or raw (values ranging from zero to the number of trees in the model).\n\n Note\n ----\n The more threads that are set for the model, the higher the memory requirement will be as each\n thread will allocate an array with one entry per combination (with an exception being\n calculation of distances to reference points, which do not do this).\n\n Note\n ----\n Separation depths are very slow to calculate. By default, it will do it through a procedure\n that counts steps as observations are passed down the trees, which is especially slow and\n not recommended for more than a few thousand observations. If this function is going to be\n called repeatedly and/or it is going to be called for a large number of rows, it's highly\n recommended to build node distance indexes beforehand through ``build_indexer`` with\n option ``with_distances=True``, as then the computation will be done based on terminal node\n indices instead, which is a much faster procedure. If the calculations are all going to be performed\n with respect to a fixed set of points, it's highly recommended to set those points as references\n through ``set_reference_points``.\n\n Note\n ----\n If using ``assume_full_distr=False`` (not recommended to use such option), predictions with\n and without an indexer will differ slightly due to differences in what they count towards\n \"additional\" observations in the calculation.\n\n Parameters\n ----------\n X : array or array-like (n_samples, n_features)\n Observations for which to calculate approximate pairwise distances or kernels,\n or first group for distances/kernels between sets of points. Can pass\n a NumPy array, Pandas DataFrame, or SciPy sparse CSC matrix.\n output : str, one of \"dist\", \"avg_sep\", \"kernel\", \"kernel_raw\"\n Type of output to produce. If passing \"dist\", will standardize the average separation\n depths. If passing \"avg_sep\", will output the average separation depth without standardizing it\n (note that lower separation depth means furthest distance).\n If passing \"kernel\", will output the fraction of the trees in which two observations end up\n in the same terminal node. If passing \"kernel_raw\", will output the number (not fraction) of\n trees in which two observations end up in the same terminal node.\n\n Note that for \"kernel\" and \"kernel_raw\", having an indexer without reference points will not\n speed up calculations, and if such calculations are going to be done frequently, it is highly\n recommended to set reference points in the model object.\n square_mat : bool\n Whether to produce a full square matrix with the pairwise distances or kernels.\n If passing 'False', will output\n only the upper triangular part as a 1-d array in which entry (i,j) with 0 <= i < j < n is located at\n position p(i,j) = (i * (n - (i+1)/2) + j - i - 1).\n \n Ignored when passing ``X_ref`` or ``use_reference_points=True`` plus having reference points.\n X_ref : array or array-like (n_ref, n_features)\n Second group of observations. If passing it, will calculate distances/kernels between each point in\n ``X`` and each point in ``X_ref``. If passing ``None`` (the default), will calculate\n pairwise distances/kernels between the points in ``X``.\n Must be of the same type as ``X`` (e.g. array, DataFrame, CSC).\n\n Note that, if ``X_ref`` is passed and the model object has an indexer with reference points\n added (through ``set_reference_points``), those reference points will be ignored for the\n calculation.\n use_reference_points : bool\n When the model object has an indexer with reference points (which can be added through\n ``set_reference_points``), whether to calculate the distances/kernels from ``X`` to those reference\n points instead of the pairwise distances/kernels between points in ``X``.\n\n This is ignored when passing ``X_ref`` or when the model object does not contain an indexer\n or the indexer does not contain reference points.\n\n Returns\n -------\n dist : array(n_samples * (n_samples - 1) / 2,) or array(n_samples, n_samples) or array(n_samples, n_ref)\n Approximate distances or average separation depth or kernels/proximities between points,\n according to parameter 'output'. Shape and size depends on parameters ``square_mat``,\n ``use_reference_points``, and whether ``X_ref`` is passed.\n \"\"\"\n assert self.is_fitted_\n assert output in [\"dist\", \"avg_sep\", \"kernel\", \"kernel_raw\"]\n nthreads_use = _process_nthreads(self.nthreads)\n\n if X_ref is not None:\n if X.__class__ != X_ref.__class__:\n raise ValueError(\"'X' and 'X_ref' must be of the same class.\")\n nobs_group1 = X.shape[0]\n if X.__class__.__name__ == \"DataFrame\":\n X = X.append(X_ref, ignore_index = True)\n elif issparse(X):\n X = sp_vstack([X, X_ref])\n else:\n X = np.vstack([X, X_ref])\n else:\n nobs_group1 = 0\n use_reference_points = bool(use_reference_points)\n if use_reference_points and self._cpp_obj.has_reference_points():\n if (not output in [\"kernel\", \"kernel_raw\"]) and (not self._cpp_obj.has_indexer_with_distances()):\n raise ValueError(\"Model indexer was built without distances. Cannot calculate distances to reference points.\")\n \n\n can_take_row_major = self._cpp_obj.has_indexer() and self._cpp_obj.has_indexer_with_distances() and not issparse(X)\n X_num, X_cat, nrows = self._process_data_new(X, allow_csr = False, prefer_row_major = can_take_row_major, keep_new_cat_levels = False)\n if nrows == 1 and not (use_reference_points and self._cpp_obj.has_reference_points()):\n raise ValueError(\"Cannot calculate pairwise distances for only 1 row.\")\n\n tmat, dmat, rmat = self._cpp_obj.dist(_get_num_dtype(X_num, None, None), _get_int_dtype(X_num),\n X_num, X_cat, self._is_extended_,\n ctypes.c_size_t(nrows).value,\n ctypes.c_bool(self.use_long_double).value,\n ctypes.c_int(nthreads_use).value,\n ctypes.c_bool(self.assume_full_distr).value,\n ctypes.c_bool(output in [\"dist\", \"kernel\"]).value,\n ctypes.c_bool(square_mat).value,\n ctypes.c_size_t(nobs_group1).value,\n ctypes.c_bool(use_reference_points).value,\n ctypes.c_bool(output in [\"kernel\", \"kernel_raw\"]).value)\n \n if (X_ref is not None) or (use_reference_points and rmat.shape[1]):\n return rmat\n elif square_mat:\n return dmat\n else:\n return tmat\n\n def predict_kernel(self, X, square_mat = True, X_ref = None, use_reference_points = True):\n \"\"\"\n Predict isolation kernel between points\n\n This is a shorthand for ``predict_distance`` with ``output=\"kernel\"``.\n\n Parameters\n ----------\n X : array or array-like (n_samples, n_features)\n Observations for which to calculate approximate pairwise kernels/proximities,\n or first group for kernels between sets of points. Can pass\n a NumPy array, Pandas DataFrame, or SciPy sparse CSC matrix.\n square_mat : bool\n Whether to produce a full square matrix with the pairwise kernels. If passing 'False', will output\n only the upper triangular part as a 1-d array in which entry (i,j) with 0 <= i < j < n is located at\n position p(i,j) = (i * (n - (i+1)/2) + j - i - 1).\n Ignored when passing ``X_ref``.\n X_ref : array or array-like (n_ref, n_features)\n Second group of observations. If passing it, will calculate kernels between each point in\n ``X`` and each point in ``X_ref``. If passing ``None`` (the default), will calculate\n pairwise kernels between the points in ``X``.\n Must be of the same type as ``X`` (e.g. array, DataFrame, CSC).\n\n Note that, if ``X_ref`` is passed and the model object has an indexer with reference points\n added (through ``set_reference_points``), those reference points will be ignored for the\n calculation.\n use_reference_points : bool\n When the model object has an indexer with reference points (which can be added through\n ``set_reference_points``), whether to calculate the kernels from ``X`` to those reference\n points instead of the pairwise kernels between points in ``X``.\n\n This is ignored when passing ``X_ref`` or when the model object does not contain an indexer\n or the indexer does not contain reference points.\n\n Returns\n -------\n dist : array(n_samples * (n_samples - 1) / 2,) or array(n_samples, n_samples) or array(n_samples, n_ref)\n Approximate kernels between points, according to\n parameter 'output'. Shape and size depends on parameter ``square_mat``,\n and whether ``X_ref`` is passed.\n \"\"\"\n return self.predict_distance(X, output = \"kernel\", square_mat = square_mat, X_ref = X_ref, use_reference_points = use_reference_points)\n\n def transform(self, X):\n \"\"\"\n Impute missing values in the data using isolation forest model\n\n Note\n ----\n In order to use this functionality, the model must have been built with imputation capabilities ('build_imputer' = 'True').\n\n Note\n ----\n Categorical columns, if imputed with a model fit to a DataFrame, will always come out\n with pandas categorical dtype.\n\n Note\n ----\n The input may contain new columns (i.e. not present when the model was fitted),\n which will be output as-is.\n\n Parameters\n ----------\n X : array or array-like (n_samples, n_features)\n Data for which missing values should be imputed. Can pass a NumPy array, Pandas DataFrame, or SciPy sparse CSR matrix.\n\n If the model was fit to a DataFrame with categorical columns, must also be a DataFrame.\n\n Returns\n -------\n X_imputed : array or array-like (n_samples, n_features)\n Object of the same type and dimensions as 'X', but with missing values already imputed. Categorical\n columns will be output as pandas's 'Categorical' regardless of their dtype in 'X'.\n \"\"\"\n assert self.is_fitted_\n if not self.build_imputer:\n raise ValueError(\"Cannot impute missing values with model that was built with 'build_imputer' = 'False'.\")\n if self.missing_action_ == \"fail\":\n raise ValueError(\"Cannot impute missing values when using 'missing_action' = 'fail'.\")\n nthreads_use = _process_nthreads(self.nthreads)\n\n X_num, X_cat, nrows = self._process_data_new(X, allow_csr = True, allow_csc = False, prefer_row_major = True, keep_new_cat_levels = False)\n if X.__class__.__name__ != \"DataFrame\":\n if X_num is not None:\n if X_num.shape[1] == self._ncols_numeric:\n X_num = X_num.copy()\n else:\n X_num = X_num[:, :self._ncols_numeric].copy()\n if X_cat is not None:\n X_cat = X_cat.copy()\n X_num, X_cat = self._cpp_obj.impute(_get_num_dtype(X_num, None, None), _get_int_dtype(X_num),\n X_num, X_cat,\n ctypes.c_bool(self._is_extended_).value,\n ctypes.c_size_t(nrows).value,\n ctypes.c_bool(self.use_long_double).value,\n ctypes.c_int(nthreads_use).value)\n return self._rearrange_imputed(X, X_num, X_cat)\n\n def fit_transform(self, X, y = None, column_weights = None, categ_cols = None):\n \"\"\"\n Scikit-Learn pipeline-compatible version of 'fit_predict'\n\n Will fit the model and output imputed missing values. Intended to be used as part of Scikit-learn\n pipelining. Note that this is just a wrapper over 'fit_predict' with parameter 'output_imputed' = 'True'.\n See the documentation of 'fit_predict' for details.\n\n Parameters\n ----------\n X : array or array-like (n_samples, n_features)\n Data to which to fit the model and whose missing values need to be imputed. Can pass a NumPy array, Pandas DataFrame, or SciPy sparse CSC matrix (see the documentation of ``fit`` for more details).\n\n If the model was fit to a DataFrame with categorical columns, must also be a DataFrame.\n y : None\n Not used. Kept for compatibility with Scikit-Learn.\n column_weights : None or array(n_features,)\n Sampling weights for each column in 'X'. Ignored when picking columns by deterministic criterion.\n If passing None, each column will have a uniform weight. If used along with kurtosis weights, the\n effect is multiplicative.\n Note that, if passing a DataFrame with both numeric and categorical columns, the column names must\n not be repeated, otherwise the column weights passed here will not end up matching.\n categ_cols : None or array-like\n Columns that hold categorical features, when the data is passed as an array or matrix.\n Categorical columns should contain only integer values with a continuous numeration starting at zero,\n with negative values and NaN taken as missing,\n and the array or list passed here should correspond to the column numbers, with numeration starting\n at zero. The maximum categorical value should not exceed 'INT_MAX' (typically :math:`2^{31}-1`).\n This might be passed either at construction time or when calling ``fit`` or variations of ``fit``.\n \n This is ignored when the input is passed as a ``DataFrame`` as then it will consider columns as\n categorical depending on their dtype.\n\n Returns\n -------\n imputed : array-like(n_samples, n_columns)\n Input data 'X' with missing values imputed according to the model.\n \"\"\"\n if (self.sample_size is None) or (self.sample_size == \"auto\"):\n outp = self.fit_predict(X = X, column_weights = column_weights, categ_cols = categ_cols, output_imputed = True)\n return outp[\"imputed\"]\n else:\n self.fit(X = X, column_weights = column_weights, categ_cols = categ_cols)\n return self.transform(X)\n\n def partial_fit(self, X, sample_weights = None, column_weights = None, X_ref = None):\n \"\"\"\n Add additional (single) tree to isolation forest model\n\n Adds a single tree fit to the full (non-subsampled) data passed here. Must\n have the same columns as previously-fitted data.\n\n Note\n ----\n If constructing trees with different sample sizes, the outlier scores with depth-based metrics\n will not be centered around 0.5 and might have a very skewed distribution. The standardizing\n constant for the scores will be taken according to the sample size passed in the construction\n argument (if that is ``None`` or ``\"auto\"``, will then set it as the sample size of the first tree).\n\n If trees are going to be fit to samples of different sizes, it's strongly recommended to use\n density-based scoring metrics instead.\n\n Note\n ----\n This function is not thread-safe - that is, it will produce problems if one tries to call\n this function on the same model object in parallel through e.g. ``joblib`` with a shared-memory\n backend (which is not the default for joblib).\n\n Parameters\n ----------\n X : array or array-like (n_samples, n_features)\n Data to which to fit the new tree. Can pass a NumPy array, Pandas DataFrame, or SciPy sparse CSC matrix.\n If passing a DataFrame, will assume that columns are:\n \n - Numeric, if their dtype is a subtype of NumPy's 'number' or 'datetime64'.\n \n - Categorical, if their dtype is 'object', 'Categorical', or 'bool'. Note that,\n if `Categorical` dtypes are ordered, the order will be ignored here.\n Categorical columns, if any, may have new categories.\n \n Other dtypes are not supported.\n\n If passing an array and the array is not in column-major format, will be forcibly converted\n to column-major, which implies an extra data copy.\n sample_weights : None or array(n_samples,)\n Sample observation weights for each row of 'X', with higher weights indicating\n distribution density (i.e. if the weight is two, it has the same effect of including the same data\n point twice). If not 'None', model must have been built with 'weights_as_sample_prob' = 'False'.\n column_weights : None or array(n_features,)\n Sampling weights for each column in 'X'. Ignored when picking columns by deterministic criterion.\n If passing None, each column will have a uniform weight. If used along with kurtosis weights, the\n effect is multiplicative.\n X_ref : array or array-like (n_references, n_features)\n Reference points for distance and/or kernel calculations, if these were previously added to\n the model object through ``set_reference_points``. Must correspond to the same points that\n were passed to the call to ``set_reference_points``.\n\n Might be passed in either row-major (preferred) or column-major order. If sparse, only CSC\n format is supported.\n\n This is ignored if the model has no stored reference points.\n\n Returns\n -------\n self : obj\n This object.\n \"\"\"\n if not self.is_fitted_:\n self._init()\n if (sample_weights is not None) and (self.weights_as_sample_prob):\n raise ValueError(\"Cannot use sampling weights with 'partial_fit'.\")\n\n if not self.is_fitted_:\n trees_restore = self.ntrees\n try:\n self.ntrees = 1\n self.fit(X = X, sample_weights = sample_weights, column_weights = column_weights)\n if X_ref is not None:\n self.set_reference_points(X_ref)\n finally:\n self.ntrees = trees_restore\n return self\n\n if self.is_fitted_:\n if (X_ref is None) and (self.has_indexer_) and (self._cpp_obj.has_reference_points()):\n msg = \"Must pass either pass 'X_ref' in order to maintain reference points in indexer,\"\n msg += \" or drop reference points through 'drop_reference_points'.\"\n raise ValueError(msg)\n if (X_ref is not None) and (not self.has_indexer_ or not self._cpp_obj.has_reference_points()):\n warnings.warn(\"Passed 'X_ref', but model object has no reference points. Will be ignored.\")\n X_ref = None\n\n \n X_num, X_cat, nrows = self._process_data_new(X, allow_csr = False, prefer_row_major = False, keep_new_cat_levels = True)\n if sample_weights is not None:\n sample_weights = np.array(sample_weights).reshape(-1)\n if (X_num is not None) and (X_num.dtype != sample_weights.dtype):\n sample_weights = sample_weights.astype(X_num.dtype)\n if sample_weights.dtype not in [ctypes.c_double, ctypes.c_float]:\n sample_weights = sample_weights.astype(ctypes.c_double)\n assert sample_weights.shape[0] == X.shape[0]\n if column_weights is not None:\n column_weights = np.array(column_weights).reshape(-1)\n if (X_num is not None) and (X_num.dtype != column_weights.dtype):\n column_weights = column_weights.astype(X_num.dtype)\n if column_weights.dtype not in [ctypes.c_double, ctypes.c_float]:\n column_weights = column_weights.astype(ctypes.c_double)\n assert column_weights.shape[0] == X.shape[1]\n if (sample_weights is not None) and (column_weights is not None) and (sample_weights.dtype != column_weights.dtype):\n sample_weights = sample_weights.astype(ctypes.c_double)\n column_weights = column_weights.astype(ctypes.c_double)\n ncat = None\n if self._ncols_categ > 0:\n ncat = np.array([arr.shape[0] for arr in self._cat_mapping]).astype(ctypes.c_int)\n if (ncat is None) and (X_cat is not None) and (X_cat.shape[1]):\n ncat = X_cat.max(axis=0).clip(0)\n if self.max_depth == \"auto\":\n max_depth = 0\n limit_depth = True\n elif self.max_depth is None:\n max_depth = nrows - 1\n else:\n max_depth = self.max_depth\n limit_depth = False\n\n if self.ncols_per_tree is None:\n ncols_per_tree = 0\n elif self.ncols_per_tree <= 1:\n ncols_tot = 0\n if X_num is not None:\n ncols_tot += X_num.shape[1]\n if X_cat is not None:\n ncols_tot += X_cat.shape[1]\n ncols_per_tree = int(np.ceil(self.ncols_per_tree * ncols_tot))\n else:\n ncols_per_tree = self.ncols_per_tree\n\n if (\n self.prob_pick_pooled_gain_ or\n self.prob_pick_avg_gain_ or\n self.prob_pick_full_gain_ or\n self.prob_pick_dens_\n ) and self.ndim_ == 1:\n ncols_tot = (X_num.shape[1] if X_num is not None else 0) + (X_cat.shape[1] if X_cat is not None else 0)\n if self.ntry > ncols_tot:\n warnings.warn(\"Passed 'ntry' larger than number of columns, will decrease it.\")\n\n if isinstance(self.random_state, np.random.RandomState):\n seed = self.random_state.randint(np.iinfo(np.int32).max)\n else:\n seed = self.random_seed\n seed += self._ntrees\n\n if X_ref is None:\n ref_X_num = None\n ref_X_cat = None\n else:\n ref_X_num, ref_X_cat, ref_nrows = self._process_data_new(X_ref, allow_csr = False, prefer_row_major = True, keep_new_cat_levels = True)\n expected_ref_nrows = self._cpp_obj.get_n_reference_points()\n if ref_nrows != expected_ref_nrows:\n raise ValueError(\"'X_ref' as %d rows, but previous reference data had %d rows.\"\n % (ref_nrows, expected_ref_nrows))\n if ref_X_num is not None:\n matching_num_dtype = _get_num_dtype(X_num, sample_weights, column_weights)\n if ref_X_num.data.dtype != matching_num_dtype.dtype:\n ref_X_num = ref_X_num.astype(matching_num_dtype.dtype)\n if issparse(ref_X_num):\n matching_int_dtype = _get_int_dtype(X_num)\n if ref_X_num.indptr.dtype != matching_int_dtype.dtype:\n ref_X_num = ref_X_num.copy()\n ref_X_num.indices = ref_X_num.indices.astype(matching_int_dtype.dtype)\n ref_X_num.indptr = ref_X_num.indptr.astype(matching_int_dtype.dtype)\n\n\n self._cpp_obj.fit_tree(_get_num_dtype(X_num, sample_weights, column_weights),\n _get_int_dtype(X_num),\n X_num, X_cat, ncat, sample_weights, column_weights,\n ctypes.c_size_t(nrows).value,\n ctypes.c_size_t(self._ncols_numeric).value,\n ctypes.c_size_t(self._ncols_categ).value,\n ctypes.c_size_t(self.ndim_).value,\n ctypes.c_size_t(self.ntry).value,\n self.coefs,\n ctypes.c_bool(self.coef_by_prop).value,\n ctypes.c_size_t(max_depth).value,\n ctypes.c_size_t(ncols_per_tree).value,\n ctypes.c_bool(limit_depth).value,\n ctypes.c_bool(self.penalize_range).value,\n ctypes.c_bool(self.standardize_data),\n ctypes.c_bool(self.fast_bratio).value,\n ctypes.c_bool(self.weigh_by_kurtosis).value,\n ctypes.c_double(self.prob_pick_pooled_gain_).value,\n ctypes.c_double(self.prob_pick_avg_gain_).value,\n ctypes.c_double(getattr(self, \"prob_pick_full_gain_\", 0.)).value,\n ctypes.c_double(getattr(self, \"prob_pick_gain_avg_\", 0.)).value,\n ctypes.c_double(getattr(self, \"prob_pick_col_by_range_\", 0.)).value,\n ctypes.c_double(getattr(self, \"prob_pick_col_by_var_\", 0.)).value,\n ctypes.c_double(getattr(self, \"prob_pick_col_by_kurt_\", 0.)).value,\n ctypes.c_double(self.min_gain).value,\n self.missing_action_,\n self.categ_split_type_,\n self.new_categ_action_,\n ctypes.c_bool(self.build_imputer).value,\n ctypes.c_size_t(self.min_imp_obs).value,\n self.depth_imp,\n self.weigh_imp_rows,\n ctypes.c_bool(self.all_perm).value,\n ref_X_num,\n ref_X_cat,\n ctypes.c_int(seed).value,\n ctypes.c_bool(self.use_long_double).value)\n self._ntrees += 1\n return self\n\n def get_num_nodes(self):\n \"\"\"\n Get number of nodes per tree\n\n Gets the number of nodes per tree, along with the number of terminal nodes.\n\n Returns\n -------\n nodes : tuple(array(n_trees,), array(n_trees,))\n A tuple in which the first element denotes the total number of nodes\n in each tree, and the second element denotes the number of terminal\n nodes. Both are returned as arrays having one entry per tree.\n \"\"\"\n assert self.is_fitted_\n nthreads = _process_nthreads(self.nthreads)\n n_nodes, n_terminal = self._cpp_obj.get_n_nodes(ctypes.c_bool(self._is_extended_).value,\n ctypes.c_int(nthreads).value)\n return n_nodes, n_terminal\n\n def append_trees(self, other):\n \"\"\"\n Appends isolation trees from another Isolation Forest model into this one\n\n This function is intended for merging models **that use the same hyperparameters** but\n were fitted to different subsets of data.\n\n In order for this to work, both models must have been fit to data in the same format - \n that is, same number of columns, same order of the columns, and same column types, although\n not necessarily same object classes (e.g. can mix ``np.array`` and ``scipy.sparse.csc_matrix``).\n\n If the data has categorical variables, the models should have been built with parameter\n ``recode_categ=False`` in the class constructor,\n and the categorical columns passed as type ``pd.Categorical`` with the same encoding -\n otherwise different models might be using different encodings for each categorical column,\n which will not be preserved as only the trees will be appended without any associated metadata.\n\n Note\n ----\n This function will not perform any checks on the inputs, and passing two incompatible\n models (e.g. fit to different numbers of columns) will result in wrong results and\n potentially crashing the Python process when using it.\n\n Note\n ----\n This function is not thread-safe - that is, it will produce problems if one tries to call\n this function on the same model object in parallel through e.g. ``joblib`` with a shared-memory\n backend (which is not the default for joblib).\n\n Parameters\n ----------\n other : IsolationForest\n Another Isolation Forest model from which trees will be appended to this model.\n It will not be modified during the call to this function.\n\n Returns\n -------\n self : obj\n This object.\n \"\"\"\n assert self.is_fitted_\n assert other.is_fitted_\n assert isinstance(other, IsolationForest)\n\n if (self._is_extended_) != (other._is_extended_):\n raise ValueError(\"Cannot mix extended and regular isolation forest models (ndim=1).\")\n\n if self.cols_categ_.shape[0]:\n warnings.warn(\"Merging models with categorical features might give wrong results.\")\n\n self._cpp_obj.append_trees_from_other(other._cpp_obj, self._is_extended_)\n self._ntrees += other._ntrees\n\n return self\n\n def export_model(self, file, add_metada_file = False):\n \"\"\"\n Export Isolation Forest model\n\n Save Isolation Forest model to a serialized file along with its\n metadata, in order to be re-used in Python or in the R or the C++ versions of this package.\n \n This function is not suggested to be used for passing models to and from Python -\n in such case, one can use ``pickle`` instead, although the function\n still works correctly for serializing objects between Python processes.\n \n Note that, if the model was fitted to a ``DataFrame``, the column names must be\n something exportable as JSON, and must be something that R could\n use as column names (for example, using integers as column names is valid in pandas\n but not in R).\n \n Can optionally generate a JSON file with metadata such as the column names and the\n levels of categorical variables, which can be inspected visually in order to detect\n potential issues (e.g. character encoding) or to make sure that the columns are of\n the right types.\n\n The metadata file, if produced, will contain, among other things, the encoding that was used for\n categorical columns - this is under ``data_info.cat_levels``, as an array of arrays by column,\n with the first entry for each column corresponding to category 0, second to category 1,\n and so on (the C++ version takes them as integers). When passing ``categ_cols``, there\n will be no encoding but it will save the maximum category integer and the column\n numbers instead of names.\n \n The serialized file can be used in the C++ version by reading it as a binary file\n and de-serializing its contents using the C++ function 'deserialize_combined'\n (recommended to use 'inspect_serialized_object' beforehand).\n\n Be aware that this function will write raw bytes from memory as-is without compression,\n so the file sizes can end up being much larger than when using ``pickle``.\n \n The metadata is not used in the C++ version, but is necessary for the R and Python versions.\n\n Note\n ----\n While in earlier versions of this library this functionality used to be faster than\n ``pickle``, starting with version 0.3.0, this function and ``pickle`` should have\n similar timings and it's recommended to use ``pickle`` for serializing objects\n across Python processes.\n\n Note\n ----\n **Important:** The model treats boolean variables as categorical. Thus, if the model was fit\n to a ``DataFrame`` with boolean columns, when importing this model into C++, they need to be\n encoded in the same order - e.g. the model might encode ``True`` as zero and ``False``\n as one - you need to look at the metadata for this. Also, if using some of Pandas' own\n Boolean types, these might end up as non-boolean categorical, and if importing the model into R,\n you might need to pass values as e.g. ``\"True\"`` instead of ``TRUE`` (look at the ``.metadata``\n file to determine this).\n\n Note\n ----\n The files produced by this function will be compatible between:\n \n * Different operating systems.\n\n * Different compilers.\n\n * Different Python/R versions.\n\n * Systems with different 'size_t' width (e.g. 32-bit and 64-bit),\n as long as the file was produced on a system that was either 32-bit or 64-bit,\n and as long as each saved value fits within the range of the machine's 'size_t' type.\n\n * Systems with different 'int' width,\n as long as the file was produced on a system that was 16-bit, 32-bit, or 64-bit,\n and as long as each saved value fits within the range of the machine's int type.\n\n * Systems with different bit endianness (e.g. x86 and PPC64 in non-le mode).\n\n * Versions of this package from 0.3.0 onwards, **but only forwards compatible**\n (e.g. a model saved with versions 0.3.0 to 0.3.5 can be loaded under version\n 0.3.6, but not the other way around, and attempting to do so will cause crashes\n and memory curruptions without an informative error message). **This last point applies\n also to models saved through pickle**. Note that loading a\n model produced by an earlier version of the library might be slightly slower.\n\n But will not be compatible between:\n\n * Systems with different floating point numeric representations\n (e.g. standard IEEE754 vs. a base-10 system).\n\n * Versions of this package earlier than 0.3.0.\n\n This pretty much guarantees that a given file can be serialized and de-serialized\n in the same machine in which it was built, regardless of how the library was compiled.\n\n Reading a serialized model that was produced in a platform with different\n characteristics (e.g. 32-bit vs. 64-bit) will be much slower.\n\n Note\n ----\n On Windows, if compiling this library with a compiler other than MSVC or MINGW,\n there might be issues exporting models larger than 2GB.\n\n Parameters\n ----------\n file : str\n The output file path into which to export the model. Must be a file name, not a\n file handle.\n add_metada_file : bool\n Whether to generate a JSON file with metadata, which will have\n the same name as the model but will end in '.metadata'. This file is not used by the\n de-serialization function, it's only meant to be inspected manually, since such contents\n will already be written in the produced model file.\n\n Returns\n -------\n self : obj\n This object.\n \"\"\"\n assert self.is_fitted_\n file = os.path.expanduser(file)\n metadata = self._export_metadata()\n if add_metada_file:\n with open(file + \".metadata\", \"w\") as of:\n json.dump(metadata, of, indent=4)\n metadata = json.dumps(metadata)\n metadata = metadata.encode('utf-8')\n self._cpp_obj.serialize_obj(file, metadata, self.ndim_ > 1, has_imputer=self.build_imputer)\n return self\n\n @staticmethod\n def import_model(file):\n \"\"\"\n Load an Isolation Forest model exported from R or Python\n\n Loads a serialized Isolation Forest model as produced and exported\n by the function ``export_model`` or by the R version of this package.\n Note that the metadata must be something\n importable in Python - e.g. column names must be valid for Pandas.\n \n It's recommended to generate a '.metadata' file (passing ``add_metada_file=True``) and\n to visually inspect said file in any case.\n\n See the documentation for ``export_model`` for details about compatibility\n of the generated files across different machines and versions.\n\n Note\n ----\n This is a static class method - that is, it should be called like this:\n ``iso = IsolationForest.import_model(...)``\n (i.e. no parentheses after `IsolationForest`)\n\n Note\n ----\n While in earlier versions of this library this functionality used to be faster than\n ``pickle``, starting with version 0.3.0, this function and ``pickle`` should have\n similar timings and it's recommended to use ``pickle`` for serializing objects\n across Python processes.\n \n Parameters\n ----------\n file : str\n The input file path containing an exported model along with its metadata file.\n Must be a file name, not a file handle.\n\n Returns\n -------\n iso : IsolationForest\n An Isolation Forest model object reconstructed from the serialized file\n and ready to use.\n \"\"\"\n file = os.path.expanduser(file)\n obj = IsolationForest()\n metadata = obj._cpp_obj.deserialize_obj(file)\n metadata = json.loads(metadata)\n obj._take_metadata(metadata)\n return obj\n\n def generate_sql(self, enclose=\"doublequotes\", output_tree_num = False, tree = None,\n table_from = None, select_as = \"outlier_score\",\n column_names = None, column_names_categ = None):\n \"\"\"\n Generate SQL statements representing the model prediction function\n\n Generate SQL statements - either separately per tree (the default),\n for a single tree if needed (if passing ``tree``), or for all trees\n concatenated together (if passing ``table_from``). Can also be made\n to output terminal node numbers (numeration starting at zero).\n\n Note\n ----\n Making predictions through SQL is much less efficient than from the model\n itself, as each terminal node will have to check all of the conditions\n that lead to it instead of passing observations down a tree.\n\n Note\n ----\n If constructed with the default arguments, the model will not perform any\n sub-sampling, which can lead to very big trees. If it was fit to a large\n dataset, the generated SQL might consist of gigabytes of text, and might\n lay well beyond the character limit of commands accepted by SQL vendors.\n\n Note\n ----\n The generated SQL statements will not include range penalizations, thus\n predictions might differ from calls to ``predict`` when using\n ``penalize_range=True``.\n\n Note\n ----\n The generated SQL statements will only include handling of missing values\n when using ``missing_action=\"impute\"``. When using the single-variable\n model with categorical variables + subset splits, the rule buckets might be\n incomplete due to not including categories that were not present in a given\n node - this last point can be avoided by using ``new_categ_action=\"smallest\"``,\n ``new_categ_action=\"random\"``, or ``missing_action=\"impute\"`` (in the latter\n case will treat them as missing, but the ``predict`` function might treat\n them differently).\n\n Note\n ----\n The resulting statements will include all the tree conditions as-is,\n with no simplification. Thus, there might be lots of redundant conditions\n in a given terminal node (e.g. \"X > 2\" and \"X > 1\", the second of which is\n redundant).\n\n Note\n ----\n If using ``scoring_metric=\"density\"`` or ``scoring_metric=\"boxed_ratio\"`` plus\n ``output_tree_num=False``, the outputs will correspond to the logarithm of the\n density rather than the density.\n\n Parameters\n ----------\n enclose : str\n With which symbols to enclose the column names in the select statement\n so as to make them SQL compatible in case they include characters like dots.\n Options are:\n\n ``\"doublequotes\"``:\n Will enclose them as ``\"column_name\"`` - this will work for e.g. PostgreSQL.\n\n ``\"squarebraces\"``:\n Will enclose them as ``[column_name]`` - this will work for e.g. SQL Server.\n\n ``\"none\"``:\n Will output the column names as-is (e.g. ``column_name``)\n output_tree_num : bool\n Whether to make the statements return the terminal node number\n instead of the isolation depth. The numeration will start at zero.\n tree : int or None\n Tree for which to generate SQL statements. If passed, will generate\n the statements only for that single tree. If passing 'None', will\n generate statements for all trees in the model.\n table_from : str or None\n If passing this, will generate a single select statement for the\n outlier score from all trees, selecting the data from the table\n name passed here. In this case, will always output the outlier\n score, regardless of what is passed under ``output_tree_num``.\n select_as : str\n Alias to give to the generated outlier score in the select statement.\n Ignored when not passing ``table_from``.\n column_names : None or list[str]\n Column names to use for the **numeric** columns.\n If not passed and the model was fit to a ``DataFrame``, will use the column\n names from that ``DataFrame``, which can be found under ``self.cols_numeric_``.\n If not passing it and the model was fit to data in a format other than\n ``DataFrame``, the columns will be named \"column_N\" in the resulting\n SQL statement. Note that the names will be taken verbatim - this function will\n not do any checks for whether they constitute valid SQL or not, and will not\n escape characters such as double quotation marks.\n column_names_categ : None or list[str]\n Column names to use for the **categorical** columns.\n If not passed, will use the column names from the ``DataFrame`` to which the\n model was fit. These can be found under ``self.cols_categ_``.\n\n Returns\n -------\n sql : list[str] or str\n A list of SQL statements for each tree as strings, or the SQL statement\n for a single tree if passing 'tree', or a single select-from SQL statement\n with all the trees concatenated if passing ``table_from``.\n \"\"\"\n assert self.is_fitted_\n \n single_tree = False\n if tree is not None:\n if isinstance(tree, float):\n tree = int(tree)\n assert isinstance(tree, int)\n assert tree >= 0\n assert tree < self._ntrees\n single_tree = True\n else:\n tree = 0\n output_tree_num = bool(output_tree_num)\n\n if self._ncols_numeric:\n if column_names is not None:\n if len(column_names) != self._ncols_numeric:\n raise ValueError(\"'column_names' must have %d entries.\" % self._ncols_numeric)\n else:\n if self.cols_numeric_.shape[0]:\n column_names = self.cols_numeric_\n else:\n column_names = [\"column_\" + str(cl) for cl in range(self._ncols_numeric)]\n else:\n column_names = []\n\n if self.cols_categ_.shape[0]:\n if column_names_categ is not None:\n if len(column_names_categ) != self.cols_categ_.shape[0]:\n raise ValueError(\"'column_names_categ' must have %d entries.\" % self.cols_categ_.shape[0])\n else:\n column_names_categ = self.cols_categ_\n categ_levels = [[str(lev).encode() for lev in mp] for mp in self._cat_mapping]\n else:\n column_names_categ = []\n categ_levels = []\n\n assert enclose in [\"doublequotes\", \"squarebraces\", \"none\"]\n if enclose != \"none\":\n enclose_left = '\"' if (enclose == \"doublequotes\") else '['\n enclose_right = '\"' if (enclose == \"doublequotes\") else ']'\n column_names = [(enclose_left + cl + enclose_right).encode() for cl in column_names]\n column_names_categ = [(enclose_left + cl + enclose_right).encode() for cl in column_names_categ]\n\n nthreads_use = _process_nthreads(self.nthreads)\n\n out = [s.decode()\n for s in self._cpp_obj.generate_sql(self.ndim_ > 1,\n column_names, column_names_categ, categ_levels,\n output_tree_num, single_tree, tree, nthreads_use)]\n if single_tree:\n return out[0]\n return out\n\n def to_treelite(self, use_float32 = False):\n \"\"\"\n Convert model to 'treelite' format\n\n Converts an IsolationForest model to a 'treelite' object, which can be compiled into a small\n standalone runtime library for smaller models and usually faster predictions:\n\n https://treelite.readthedocs.io/en/latest/index.html\n\n\n A couple notes about this conversion:\n\n - It is only possible to convert to 'treelite' when using ``ndim=1`` (which is not the default).\n - The 'treelite' and 'treelite_runtime' libraries must be installed for this to work.\n - The options for handling missing values in 'treelite' are more limited.\n This function will always produce models that force ``missing_action=\"impute\"``, regardless\n of how the IsolationForest model itself handles them.\n - The options for handling unseen categories in categorical variables are also more\n limited in 'treelite'. It's not possible to convert models that use ``new_categ_action=\"weighted\"``,\n and categories that were not present within the training data (which are not meant to be passed to\n 'treelite') will always be sent to the right side of the split, which might produce different\n results from ``predict``.\n - Some features such as range penalizations will not be kept in the 'treelite' model.\n - While this library always uses C 'double' precision (typically 'float64') for model objects and\n prediction outputs, 'treelite' (a) can use 'float32' precision, (b) converts floating point numbers\n to a decimal representation and back to floating point; which combined can result in some precision\n loss which leads to producing slightly different predictions from the ``predict`` function in this\n package.\n - If the model was fit to a DataFrame having a mixture of numerical and categorical columns, the\n resulting 'treelite' object will be built assuming all the numerical columns come before the\n categorical columns, regardless of which order they originally had in the data that was passed to\n 'fit'. In such cases, it is possible to check the order of the columns under attributes\n ``self.cols_numeric_`` and ``self.cols_categ_``.\n - Categorical columns in 'treelite' are passed as integer values. if the model was fit to a DataFrame\n with categorical columns, the encoding that is used can be found under ``self._cat_mapping``.\n - The 'treelite' object returned by this function will not yet have been compiled. It's necessary to\n call ``compile`` and ``export_lib`` afterwards in order to be able to use it.\n\n Parameters\n ----------\n use_float32 : bool\n Whether to use 'float32' type for the model. This is typically faster but has less precision\n than the typical 'float64' (outside of this conversion, models from this library always use\n 'float64').\n\n Returns\n -------\n model : obj\n A 'treelite' model object.\n \"\"\"\n assert self.ndim_ == 1\n assert self.is_fitted_\n\n if (self._ncols_categ and\n self.categ_split_type_ != \"single_categ\" and\n self.new_categ_action_ not in [\"smallest\", \"random\"]\n ):\n raise ValueError(\"Cannot convert to 'treelite' with the current parameters for categorical columns.\")\n\n if self.missing_action_ != \"impute\":\n warnings.warn(\"'treelite' conversion will switch 'missing_action' to 'impute'.\")\n if self.penalize_range:\n warnings.warn(\"'penalize_range' is ignored (assumed 'False') for 'treelite' conversion.\")\n\n import treelite\n\n float_dtype = 'float32' if bool(use_float32) else 'float64'\n\n num_node_info = np.empty(6, dtype=ctypes.c_double)\n n_nodes = self.get_num_nodes()[0]\n\n if self.categ_cols_ is None:\n mapping_num_cols = np.arange(self._ncols_numeric)\n mapping_cat_cols = np.arange(self._ncols_numeric, self._ncols_numeric + self._ncols_categ)\n else:\n mapping_num_cols = np.setdiff1d(np.arange(self._ncols_numeric + self._ncols_categ),\n self.categ_cols_, assume_unique=True)\n mapping_cat_cols = np.array(self.categ_cols_).reshape(-1).astype(int)\n\n if self.scoring_metric in [\"depth\", \"adj_depth\", \"adj_density\"]:\n builder = treelite.ModelBuilder(\n num_feature = self._ncols_numeric + self._ncols_categ,\n average_tree_output = True,\n threshold_type = float_dtype,\n leaf_output_type = float_dtype,\n pred_transform = \"exponential_standard_ratio\",\n ratio_c = self._cpp_obj.get_expected_isolation_depth()\n )\n else:\n builder = treelite.ModelBuilder(\n num_feature = self._ncols_numeric + self._ncols_categ,\n average_tree_output = True,\n threshold_type = float_dtype,\n leaf_output_type = float_dtype\n )\n for tree_ix in range(self._ntrees):\n tree = treelite.ModelBuilder.Tree(threshold_type = float_dtype, leaf_output_type = float_dtype)\n for node_ix in range(n_nodes[tree_ix]):\n cat_left = self._cpp_obj.get_node(tree_ix, node_ix, num_node_info)\n \n if num_node_info[0] == 1:\n tree[node_ix].set_leaf_node(num_node_info[1], leaf_value_type = float_dtype)\n \n elif num_node_info[0] == 0:\n tree[node_ix].set_numerical_test_node(\n feature_id = mapping_num_cols[int(num_node_info[1])],\n opname = \"<=\",\n threshold = num_node_info[2],\n threshold_type = float_dtype,\n default_left = bool(num_node_info[3]),\n left_child_key = int(num_node_info[4]),\n right_child_key = int(num_node_info[5])\n )\n\n else:\n tree[node_ix].set_categorical_test_node(\n feature_id = mapping_cat_cols[int(num_node_info[1])],\n left_categories = cat_left,\n default_left = bool(num_node_info[3]),\n left_child_key = int(num_node_info[4]),\n right_child_key = int(num_node_info[5])\n )\n\n tree[0].set_root()\n builder.append(tree)\n model = builder.commit()\n return model\n\n def drop_imputer(self):\n \"\"\"\n Drops the imputer sub-object from this model object\n\n Drops the imputer sub-object from this model object, if it was fitted with data imputation\n capabilities. The imputer, if constructed, is likely to be a very heavy object which might\n not be needed for all purposes.\n\n Returns\n -------\n self : obj\n This object\n \"\"\"\n self._cpp_obj.drop_imputer()\n return self\n\n def drop_indexer(self):\n \"\"\"\n Drops the indexer sub-object from this model object\n\n Drops the indexer sub-object from this model object, if it was constructed.\n The indexer, if constructed, is likely to be a very heavy object which might\n not be needed for all purposes.\n\n Note that reference points as added through ``set_reference_points`` are\n associated with the indexer object and will also be dropped if any were added.\n\n Returns\n -------\n self : obj\n This object\n \"\"\"\n self._cpp_obj.drop_indexer()\n return self\n\n def drop_reference_points(self):\n \"\"\"\n Drops reference points from this model\n\n Drops any reference points used for distance and/or kernel calculations\n from the model object, if any were set through ``set_reference_points``.\n\n Returns\n -------\n self : obj\n This object\n \"\"\"\n self._cpp_obj.drop_reference_points()\n return self\n\n def build_indexer(self, with_distances = False):\n \"\"\"\n Build indexer for faster terminal node predictions and/or distance calculations\n\n Builds an index of terminal nodes for faster prediction of terminal node numbers\n (calling ``predict`` with ``output=\"tree_num\"``).\n\n Optionally, can also pre-calculate terminal node distances in order to speed up\n distance calculations (calling ``predict_distance``).\n\n Note\n ----\n This feature is not available for models that use ``missing_action=\"divide\"``\n or ``new_categ_action=\"weighted\"`` (which are the defaults when passing ``ndim=1``).\n\n Parameters\n ----------\n with_distances : bool\n Whether to also pre-calculate node distances in order to speed up ``predict_distance``.\n Note that this will consume a lot more memory and make the resulting object significantly\n heavier.\n\n Returns\n -------\n self : obj\n This object\n \"\"\"\n assert self.is_fitted_\n if self.missing_action_ == \"divide\":\n raise ValueError(\"Cannot build tree indexer when using missing_action='divide'.\")\n if self.new_categ_action_ == \"weighted\" and self.categ_split_type_ != \"single_categ\":\n if self._ncols_categ or self.cols_categ_.shape[0]:\n raise ValueError(\"Cannot build tree indexer when using new_categ_action='weighted'.\")\n self._cpp_obj.build_tree_indices(self._is_extended_, bool(with_distances), _process_nthreads(self.nthreads))\n return self\n\n @property\n def has_indexer_(self):\n return self._cpp_obj.has_indexer()\n\n @property\n def has_reference_points_(self):\n return self._cpp_obj.has_reference_points()\n\n def set_reference_points(self, X, with_distances=False):\n \"\"\"\n Set reference points to calculate distances or kernels with\n\n Sets some points as pre-defined landmarks with respect to which distances and/or\n isolation kernel values will be calculated for arbitrary new points in calls to\n ``predict_distance`` and/or ``predict_kernel``. If any points have already been set\n as references in the model object, they will be overwritten with the new points passed here.\n\n Note that points are added in terms of their terminal node indices, but the raw data about\n them is not kept - thus, calling ``partial_fit`` later on a model with reference points\n requires passing those reference points again to add their node indices to the new tree.\n\n Be aware that adding reference points requires building a tree indexer.\n\n Parameters\n ----------\n X : array or array-like (n_samples, n_features)\n Observations to set as references for future distance and/or isolation kernel calculations.\n Can pass a NumPy array, Pandas DataFrame, or SciPy sparse CSC matrix.\n with_distances : bool\n Whether to pre-calculate node distances (this is required to calculate distance\n from arbitrary points to the reference points).\n\n Note that reference points for distances can only be set when using `assume_full_distr=False`\n (which is the default).\n\n Returns\n -------\n self : obj\n This object\n \"\"\"\n assert self.is_fitted_\n with_distances = bool(with_distances)\n\n if with_distances and (not self.assume_full_distr):\n raise ValueError(\"Cannot set reference points for distance when using 'assume_full_distr=False'.\")\n\n if self.missing_action_ == \"divide\":\n raise ValueError(\"Cannot set reference points when using missing_action='divide'.\")\n if self.new_categ_action_ == \"weighted\" and self.categ_split_type_ != \"single_categ\":\n if self._ncols_categ or self.cols_categ_.shape[0]:\n raise ValueError(\"Cannot set reference points when using new_categ_action='weighted'.\")\n\n nthreads_use = _process_nthreads(self.nthreads)\n X_num, X_cat, nrows = self._process_data_new(X, prefer_row_major = True, keep_new_cat_levels = True, allow_csr = False)\n self._cpp_obj.set_reference_points(\n _get_num_dtype(X_num, None, None), _get_int_dtype(X_num),\n X_num, X_cat, self._is_extended_,\n ctypes.c_size_t(nrows).value,\n ctypes.c_int(nthreads_use).value,\n ctypes.c_bool(with_distances).value\n )\n return self\n\n\n def subset_trees(self, trees_take):\n \"\"\"\n Subset trees of a given model\n\n Creates a new model containing only selected trees of this\n model object.\n\n Parameters\n ----------\n trees_take : array_like(n,)\n Indices of the trees of this model to copy over to the new model.\n Must be integers with numeration starting at zero.\n\n Returns\n -------\n new_model : obj\n A new IsolationForest model object, containing only the subset of trees\n from this object that was specified under 'trees_take'.\n \"\"\"\n assert self.is_fitted_\n trees_take = np.array(trees_take).reshape(-1).astype(ctypes.c_size_t)\n if not trees_take.shape[0]:\n raise ValueError(\"'trees_take' is empty.\")\n if trees_take.max() >= self._ntrees:\n raise ValueError(\"Attempting to take tree indices that the model does not have.\")\n new_cpp_obj = self._cpp_obj.subset_model(trees_take, self._is_extended_, self.build_imputer)\n old_cpp_obj = self._cpp_obj\n try:\n self._cpp_obj = None\n new_obj = deepcopy(self)\n new_obj._cpp_obj = new_cpp_obj\n finally:\n self._cpp_obj = old_cpp_obj\n return new_obj\n\n ### https://github.com/numpy/numpy/issues/19069\n def _is_np_int(self, el):\n return (\n np.issubdtype(el.__class__, int) or\n np.issubdtype(el.__class__, np.integer) or\n np.issubdtype(el.__class__, np.int8) or\n np.issubdtype(el.__class__, np.int16) or\n np.issubdtype(el.__class__, np.int16) or\n np.issubdtype(el.__class__, np.int32) or\n np.issubdtype(el.__class__, np.int64) or\n np.issubdtype(el.__class__, np.uint8) or\n np.issubdtype(el.__class__, np.uint16) or\n np.issubdtype(el.__class__, np.uint16) or\n np.issubdtype(el.__class__, np.uint32) or\n np.issubdtype(el.__class__, np.uint64)\n )\n\n def _denumpify_list(self, lst):\n return [int(el) if self._is_np_int(el) else el for el in lst]\n\n def _export_metadata(self):\n if (self.max_depth is not None) and (self.max_depth != \"auto\"):\n self.max_depth = int(self.max_depth)\n\n data_info = {\n \"ncols_numeric\" : int(self._ncols_numeric), ## is in c++\n \"ncols_categ\" : int(self._ncols_categ), ## is in c++\n \"cols_numeric\" : list(self.cols_numeric_),\n \"cols_categ\" : list(self.cols_categ_),\n \"cat_levels\" : [list(m) for m in self._cat_mapping],\n \"categ_cols\" : [] if self.categ_cols_ is None else list(self.categ_cols_),\n \"categ_max\" : [] if self._cat_max_lev is None else list(self._cat_max_lev)\n }\n\n ### Beaware of np.int64, which looks like a Python integer but is not accepted by json\n data_info[\"cols_numeric\"] = self._denumpify_list(data_info[\"cols_numeric\"])\n data_info[\"cols_categ\"] = self._denumpify_list(data_info[\"cols_categ\"])\n data_info[\"categ_cols\"] = self._denumpify_list(data_info[\"categ_cols\"])\n data_info[\"categ_max\"] = self._denumpify_list(data_info[\"categ_max\"])\n if len(data_info[\"cat_levels\"]):\n data_info[\"cat_levels\"] = [self._denumpify_list(lst) for lst in data_info[\"cat_levels\"]]\n if len(data_info[\"categ_cols\"]):\n data_info[\"categ_cols\"] = self._denumpify_list(data_info[\"categ_cols\"])\n\n try:\n nthreads = _process_nthreads(self.nthreads)\n except:\n nthreads = 1\n model_info = {\n \"ndim\" : int(self.ndim_),\n \"nthreads\" : nthreads,\n \"use_long_double\" : bool(self.use_long_double),\n \"build_imputer\" : bool(self.build_imputer)\n }\n\n params = {\n \"sample_size\" : self.sample_size,\n \"ntrees\" : int(self._ntrees), ## is in c++\n \"ntry\" : int(self.ntry),\n \"max_depth\" : self.max_depth,\n \"ncols_per_tree\" : self.ncols_per_tree,\n \"prob_pick_avg_gain\" : float(self.prob_pick_avg_gain_),\n \"prob_pick_pooled_gain\" : float(self.prob_pick_pooled_gain_),\n \"prob_pick_full_gain\" : float(self.prob_pick_full_gain_),\n \"prob_pick_dens\" : float(self.prob_pick_dens_),\n \"prob_pick_col_by_range\" : float(self.prob_pick_col_by_range_),\n \"prob_pick_col_by_var\" : float(self.prob_pick_col_by_var_),\n \"prob_pick_col_by_kurt\" : float(self.prob_pick_col_by_kurt_),\n \"min_gain\" : float(self.min_gain),\n \"missing_action\" : self.missing_action_, ## is in c++\n \"new_categ_action\" : self.new_categ_action_, ## is in c++\n \"categ_split_type\" : self.categ_split_type_, ## is in c++\n \"coefs\" : self.coefs,\n \"depth_imp\" : self.depth_imp,\n \"weigh_imp_rows\" : self.weigh_imp_rows,\n \"min_imp_obs\" : int(self.min_imp_obs),\n \"random_seed\" : self.random_seed,\n \"all_perm\" : self.all_perm,\n \"coef_by_prop\" : self.coef_by_prop,\n \"weights_as_sample_prob\" : self.weights_as_sample_prob,\n \"sample_with_replacement\" : self.sample_with_replacement,\n \"penalize_range\" : self.penalize_range,\n \"standardize_data\" : self.standardize_data,\n \"scoring_metric\" : self.scoring_metric,\n \"fast_bratio\" : self.fast_bratio,\n \"weigh_by_kurtosis\" : self.weigh_by_kurtosis,\n \"assume_full_distr\" : self.assume_full_distr,\n }\n\n if params[\"max_depth\"] == \"auto\":\n params[\"max_depth\"] = 0\n\n return {\"data_info\" : data_info, \"model_info\" : model_info, \"params\" : params}\n\n def _take_metadata(self, metadata):\n self._ncols_numeric = metadata[\"data_info\"][\"ncols_numeric\"]\n self._ncols_categ = metadata[\"data_info\"][\"ncols_categ\"]\n self.cols_numeric_ = np.array(metadata[\"data_info\"][\"cols_numeric\"])\n self.cols_categ_ = np.array(metadata[\"data_info\"][\"cols_categ\"])\n self._cat_mapping = [np.array(lst) for lst in metadata[\"data_info\"][\"cat_levels\"]]\n self.categ_cols = np.array(metadata[\"data_info\"][\"categ_cols\"]).reshape(-1).astype(int) if len(metadata[\"data_info\"][\"categ_cols\"]) else None\n self.categ_cols_ = self.categ_cols\n self._cat_max_lev = np.array(metadata[\"data_info\"][\"categ_max\"]).reshape(-1).astype(int) if (self.categ_cols_ is not None) else []\n\n self.ndim = metadata[\"model_info\"][\"ndim\"]\n self.ndim_ = self.ndim\n self.nthreads = _process_nthreads(metadata[\"model_info\"][\"nthreads\"])\n self.build_imputer = metadata[\"model_info\"][\"build_imputer\"]\n try:\n self.use_long_double = metadata[\"model_info\"][\"use_long_double\"]\n except:\n self.use_long_double = False\n\n self.sample_size = metadata[\"params\"][\"sample_size\"]\n self.ntrees = metadata[\"params\"][\"ntrees\"]\n self._ntrees = self.ntrees\n self.ntry = metadata[\"params\"][\"ntry\"]\n self.max_depth = metadata[\"params\"][\"max_depth\"]\n self.ncols_per_tree = metadata[\"params\"][\"ncols_per_tree\"]\n self.prob_pick_avg_gain = metadata[\"params\"][\"prob_pick_avg_gain\"]\n self.prob_pick_pooled_gain = metadata[\"params\"][\"prob_pick_pooled_gain\"]\n try:\n self.prob_pick_full_gain = metadata[\"params\"][\"prob_pick_full_gain\"]\n except:\n self.prob_pick_full_gain = 0.0\n try:\n self.prob_pick_dens = metadata[\"params\"][\"prob_pick_dens\"]\n except:\n self.prob_pick_dens = 0.0\n try:\n self.prob_pick_col_by_range = metadata[\"params\"][\"prob_pick_col_by_range\"]\n except:\n self.prob_pick_col_by_range = 0.0\n try:\n self.prob_pick_col_by_var = metadata[\"params\"][\"prob_pick_col_by_var\"]\n except:\n self.prob_pick_col_by_var = 0.0\n try:\n self.prob_pick_col_by_kurt = metadata[\"params\"][\"prob_pick_col_by_kurt\"]\n except:\n self.prob_pick_col_by_kurt = 0.0\n self.prob_pick_avg_gain_ = self.prob_pick_avg_gain\n self.prob_pick_pooled_gain_ = self.prob_pick_pooled_gain\n self.prob_pick_full_gain_ = self.prob_pick_full_gain\n self.prob_pick_dens_ = self.prob_pick_dens\n self.prob_pick_col_by_range_ = self.prob_pick_col_by_range\n self.prob_pick_col_by_var_ = self.prob_pick_col_by_var\n self.prob_pick_col_by_kurt_ = self.self.prob_pick_col_by_kurt\n self.min_gain = metadata[\"params\"][\"min_gain\"]\n self.missing_action = metadata[\"params\"][\"missing_action\"]\n self.missing_action_ = self.missing_action\n self.new_categ_action = metadata[\"params\"][\"new_categ_action\"]\n self.new_categ_action_ = self.new_categ_action\n self.categ_split_type = metadata[\"params\"][\"categ_split_type\"]\n self.categ_split_type_ = self.categ_split_type\n self.coefs = metadata[\"params\"][\"coefs\"]\n self.depth_imp = metadata[\"params\"][\"depth_imp\"]\n self.weigh_imp_rows = metadata[\"params\"][\"weigh_imp_rows\"]\n self.min_imp_obs = metadata[\"params\"][\"min_imp_obs\"]\n self.random_seed = metadata[\"params\"][\"random_seed\"]\n self.all_perm = metadata[\"params\"][\"all_perm\"]\n self.coef_by_prop = metadata[\"params\"][\"coef_by_prop\"]\n self.weights_as_sample_prob = metadata[\"params\"][\"weights_as_sample_prob\"]\n self.sample_with_replacement = metadata[\"params\"][\"sample_with_replacement\"]\n self.penalize_range = metadata[\"params\"][\"penalize_range\"]\n try:\n self.standardize_data = metadata[\"params\"][\"standardize_data\"]\n except:\n self.standardize_data = True\n try:\n self.scoring_metric = metadata[\"params\"][\"scoring_metric\"]\n except:\n self.scoring_metric = \"depth\"\n try:\n self.fast_bratio = metadata[\"params\"][\"fast_bratio\"]\n except:\n self.fast_bratio = True\n self.weigh_by_kurtosis = metadata[\"params\"][\"weigh_by_kurtosis\"]\n self.assume_full_distr = metadata[\"params\"][\"assume_full_distr\"]\n\n if \"prob_split_avg_gain\" in metadata[\"params\"].keys():\n if metadata[\"params\"][\"prob_split_avg_gain\"] > 0:\n msg = \"'prob_split_avg_gain' has been deprecated in favor of 'prob_pick_avg_gain' + 'ntry'.\"\n if self.ndim_ > 1:\n msg += \" Be sure to change these parameters if refitting this model or adding trees.\"\n warnings.warn(msg)\n if \"prob_split_pooled_gain\" in metadata[\"params\"].keys():\n if metadata[\"params\"][\"prob_split_pooled_gain\"] > 0:\n msg = \"'prob_split_pooled_gain' has been deprecated in favor of 'prob_pick_pooled_gain' + 'ntry'.\"\n if self.ndim_ > 1:\n msg += \" Be sure to change these parameters if refitting this model or adding trees.\"\n warnings.warn(msg)\n\n self.is_fitted_ = True\n self._is_extended_ = self.ndim_ > 1\n return self\n\n def __is_fitted__(self):\n return self.is_fitted_\n"
] | [
[
"numpy.issubdtype",
"numpy.vstack",
"numpy.math.factorial",
"pandas.DataFrame",
"numpy.all",
"numpy.max",
"numpy.any",
"numpy.iinfo",
"scipy.sparse.vstack",
"scipy.sparse.isspmatrix_csc",
"scipy.sparse.issparse",
"numpy.arange",
"numpy.ceil",
"numpy.zeros",
"pandas.Categorical.from_codes",
"scipy.sparse.csc_matrix",
"pandas.concat",
"numpy.ascontiguousarray",
"pandas.Categorical",
"numpy.asfortranarray",
"numpy.isnan",
"pandas.factorize",
"scipy.sparse.csr_matrix",
"scipy.sparse.isspmatrix_csr",
"numpy.array",
"numpy.setdiff1d",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
derdav3/tf-sparql | [
"6d3fe6e3b6824a4cd5468a243829b71f5b0952f2"
] | [
"ml_models/nn/batch_norm.py"
] | [
"# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Batch normalization module for nn.\n\nThis contains the module BatchNorm, which performs batch normalization on\nits inputs. It has an optional post-normalization scale and offset, and it\nmaintains moving averages of the statistics for use at test time.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom six.moves import xrange\nimport tensorflow as tf\n\nfrom tensorflow.contrib.layers.python.layers import utils\nfrom tensorflow.python.training import moving_averages\nfrom nn import base\nfrom nn import util\n\n\nclass BatchNorm(base.AbstractModule):\n \"\"\"Batch normalization module, including optional affine transformation.\n\n This module maintains exponential moving averages of the mean and\n variance, used for calculating more accurate shifted statistics at training\n time and optionally used to normalize at test time.\n\n In order to update the moving averages, the user must run the\n ops in the tf.GraphKeys.UPDATE_OPS TensorFlow collection. For example:\n\n bn = BatchNorm()\n train_net = bn(train_inputs, is_training=True)\n test_net = bn(test_inputs, is_training=False, test_local_stats=False)\n\n ...\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_op = tf.group(train_op)\n\n Then, whenever `train_op` is run so also are the moving average update ops.\n\n At training time, batch statistics (mean, variance) are not shared between\n separate connections. The moving averages are shared between separate\n connections. At both training and test time, the optional affine\n transformations are shared between separate connections.\n\n Local batch statistics are used by default at test time, but the moving\n averages can be used by specifying a flag when connecting. One often wants\n to use local batch statistics at test time to track the progress while the\n model is trained as it would ensure that moving average updates do not affect\n the training curves. Once the training is finished, it's often advantageous\n to use moving average statistics, since it would make evaluation agnostic to\n the batch size, and might even lead to small improvements over the local\n batch statistics.\n \"\"\"\n\n GAMMA = \"gamma\"\n BETA = \"beta\"\n POSSIBLE_INITIALIZER_KEYS = {GAMMA, BETA}\n\n def __init__(self, reduction_indices=None, offset=True, scale=False,\n decay_rate=0.999, eps=1e-3, initializers=None,\n use_legacy_moving_second_moment=False,\n name=\"batch_norm\"):\n \"\"\"Constructs a BatchNorm module.\n\n By default reduces over all input tensor dimensions apart from the final\n dimension. This has the effect of treating pixels in 1D/2D/3D images as\n additional elements of the minibatch.\n\n If this is not the desired behaviour, the user can specify the tensor\n indices to reduce over with `reduction_indices`.\n\n Args:\n reduction_indices: Optional indices of dimensions to reduce over.\n offset: Optional boolean to specify whether or not to apply a trained\n component-wise bias after the batch normalization and scaling.\n scale: Optional boolean to specify whether or not to apply a trained\n component-wise scale after the batch normalization.\n decay_rate: Decay rate of the exponential moving averages of the mean\n and variance.\n eps: Small number to avoid dividing by zero when diving by the standard\n deviation.\n initializers: Optional dict containing ops to initialize the weights of\n the affine transform (`gamma` and `beta`).\n use_legacy_moving_second_moment: Keep a moving second moment, rather than\n the moving variance. This is deprecated, but is kept for backwards\n compatability with old checkpoints. By default `False`.\n name: Name of the module.\n\n Raises:\n base.Error: If initializers contains any keys other\n than `gamma` or `beta`.\n ValueError: If `use_legacy_moving_second_moment` is not `True`.\n \"\"\"\n super(BatchNorm, self).__init__(name)\n\n self._reduction_indices = reduction_indices\n self._offset = offset\n self._scale = scale\n self._decay_rate = decay_rate\n self._eps = eps\n self._use_legacy_moving_second_moment = use_legacy_moving_second_moment\n\n self._initializers = util.check_initializers(\n initializers, self.POSSIBLE_INITIALIZER_KEYS)\n\n def _set_default_initializer(self, var_name):\n \"\"\"Sets up a default initializer for a variable if one doesn't exist.\n\n For the offset (beta), a zeros initializer is used by default.\n For the scale (gamma), a ones initializer is used by default.\n\n Args:\n var_name: name of variable as a string.\n \"\"\"\n if var_name not in self._initializers:\n if var_name == self.GAMMA:\n self._initializers[self.GAMMA] = tf.ones_initializer()\n elif var_name == self.BETA:\n self._initializers[self.BETA] = tf.zeros_initializer()\n\n def _build_statistics_variance(self, input_batch,\n reduction_indices, use_batch_stats):\n \"\"\"Builds the statistics part of the graph when using moving variance.\n\n Args:\n input_batch: Input batch Tensor.\n reduction_indices: Indices of `input_batch` to reduce over.\n use_batch_stats: Boolean to indicate if batch statistics should be\n calculated, otherwise moving averages are returned.\n\n Returns:\n Tuple of (mean, variance).\n \"\"\"\n # Set up our moving statistics. When connecting in parallel, this is shared.\n self._moving_mean = tf.get_variable(\n \"moving_mean\",\n shape=self._mean_shape,\n collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES,\n tf.GraphKeys.GLOBAL_VARIABLES],\n initializer=tf.zeros_initializer(),\n trainable=False)\n\n self._moving_variance = tf.get_variable(\n \"moving_variance\",\n shape=self._mean_shape,\n collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES,\n tf.GraphKeys.GLOBAL_VARIABLES],\n initializer=tf.ones_initializer(),\n trainable=False)\n\n def build_batch_stats():\n \"\"\"Builds the batch statistics calculation ops.\"\"\"\n\n # We use the moving mean as an estimate of the mean in order to perform\n # a more numerically stable calculation of the batch mean.\n # Copy for better stability.\n shift = tf.add(self._moving_mean, 0)\n counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.sufficient_statistics(\n input_batch,\n reduction_indices,\n keep_dims=True,\n shift=shift,\n name=\"batch_norm_ss\")\n\n mean, variance = tf.nn.normalize_moments(counts,\n shifted_sum_x,\n shifted_sum_x2,\n shift,\n name=\"normalize_moments\")\n\n return mean, variance\n\n def build_moving_stats():\n return (\n tf.identity(self._moving_mean),\n tf.identity(self._moving_variance),\n )\n\n mean, variance = utils.smart_cond(\n use_batch_stats,\n build_batch_stats,\n build_moving_stats,\n )\n\n return mean, variance\n\n def _build_statistics_second_moment(self, input_batch,\n reduction_indices, use_batch_stats):\n \"\"\"Builds the statistics part of the graph when using moving second moment.\n\n Args:\n input_batch: Input batch Tensor.\n reduction_indices: Indices of `input_batch` to reduce over.\n use_batch_stats: Boolean to indicate if batch statistics should be\n calculated, otherwise moving averages are returned.\n\n Returns:\n Tuple of (mean, variance, second_moment).\n \"\"\"\n # Set up our moving statistics. When connecting in parallel, this is shared.\n self._moving_mean = tf.get_variable(\n \"moving_mean\",\n shape=self._mean_shape,\n collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES,\n tf.GraphKeys.GLOBAL_VARIABLES],\n initializer=tf.zeros_initializer(),\n trainable=False)\n\n self._moving_second_moment = tf.get_variable(\n \"moving_second_moment\",\n shape=self._mean_shape,\n collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES,\n tf.GraphKeys.GLOBAL_VARIABLES],\n initializer=tf.ones_initializer(),\n trainable=False)\n\n self._moving_variance = tf.subtract(self._moving_second_moment,\n tf.square(self._moving_mean),\n name=\"moving_variance\")\n\n def build_batch_stats():\n \"\"\"Builds the batch statistics calculation ops.\"\"\"\n\n # Copy for better stability.\n # We use the moving mean as an estimate of the mean in order to perform\n # a more numerically stable calculation of the batch mean.\n shift = tf.add(self._moving_mean, 0)\n counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.sufficient_statistics(\n input_batch,\n reduction_indices,\n keep_dims=True,\n shift=shift,\n name=\"batch_norm_ss\")\n\n mean, variance = tf.nn.normalize_moments(counts,\n shifted_sum_x,\n shifted_sum_x2,\n shift,\n name=\"normalize_moments\")\n second_moment = variance + tf.square(mean)\n\n return mean, variance, second_moment\n\n def build_moving_stats():\n return (\n tf.identity(self._moving_mean),\n tf.identity(self._moving_variance),\n tf.identity(self._moving_second_moment),\n )\n\n mean, variance, second_moment = utils.smart_cond(\n use_batch_stats,\n build_batch_stats,\n build_moving_stats,\n )\n\n return mean, variance, second_moment\n\n def _build_update_ops_variance(self, mean, variance, is_training):\n \"\"\"Builds the moving average update ops when using moving variance.\n\n Args:\n mean: The mean value to update with.\n variance: The variance value to update with.\n is_training: Boolean Tensor to indicate if we're currently in\n training mode.\n \"\"\"\n\n def build_update_ops():\n \"\"\"Builds the exponential moving average update ops.\"\"\"\n\n update_mean_op = moving_averages.assign_moving_average(\n variable=self._moving_mean,\n value=mean,\n decay=self._decay_rate,\n name=\"update_moving_mean\").op\n\n update_variance_op = moving_averages.assign_moving_average(\n variable=self._moving_variance,\n value=variance,\n decay=self._decay_rate,\n name=\"update_moving_variance\").op\n\n return update_mean_op, update_variance_op\n\n def build_no_ops():\n return (tf.no_op(), tf.no_op())\n\n # Only make the ops if we know that `is_training=True`, or the value of\n # `is_training` is unknown.\n is_training_const = utils.constant_value(is_training)\n if is_training_const is None or is_training_const:\n update_mean_op, update_variance_op = utils.smart_cond(\n is_training,\n build_update_ops,\n build_no_ops,\n )\n\n # Every new connection creates a new op which adds its contribution\n # to the running average when ran.\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mean_op)\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_variance_op)\n\n def _build_update_ops_second_moment(self, mean, second_moment, is_training):\n \"\"\"Builds the moving average update ops when using the moving second moment.\n\n Args:\n mean: The mean value to update with.\n second_moment: The second_moment value to update with.\n is_training: Boolean Tensor to indicate if we're currently in\n training mode.\n \"\"\"\n\n def build_update_ops():\n \"\"\"Builds the exponential moving average update ops.\"\"\"\n\n update_mean_op = moving_averages.assign_moving_average(\n variable=self._moving_mean,\n value=mean,\n decay=self._decay_rate,\n name=\"update_moving_mean\").op\n\n update_second_moment_op = moving_averages.assign_moving_average(\n variable=self._moving_second_moment,\n value=second_moment,\n decay=self._decay_rate,\n name=\"update_moving_second_moment\").op\n\n return update_mean_op, update_second_moment_op\n\n def build_no_ops():\n return (tf.no_op(), tf.no_op())\n\n # Only make the ops if we know that `is_training=True`, or the value of\n # `is_training` is unknown.\n is_training_const = utils.constant_value(is_training)\n if is_training_const is None or is_training_const:\n update_mean_op, update_second_moment_op = utils.smart_cond(\n is_training,\n build_update_ops,\n build_no_ops,\n )\n\n # Every new connection creates a new op which adds its contribution\n # to the running average when ran.\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mean_op)\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_second_moment_op)\n\n def _build(self, input_batch, is_training=True, test_local_stats=True):\n \"\"\"Connects the BatchNorm module into the graph.\n\n Args:\n input_batch: A Tensor of arbitrary dimension. By default, the final\n dimension is not reduced over when computing the minibatch statistics.\n is_training: A boolean to indicate if the module should be connected in\n training mode, meaning the moving averages are updated. By default\n `True`. Can be a Tensor.\n test_local_stats: A boolean to indicate if local batch statistics should\n be used when `is_training=False`. If not, moving averages are used.\n By default `True`. Can be a Tensor.\n\n Returns:\n A tensor with the same shape as `input_batch`.\n\n Raises:\n base.IncompatibleShapeError: If `reduction_indices` is not valid for the\n input shape or has negative entries.\n base.NotSupportedError: If `input_batch` has data type of `tf.float16`.\n \"\"\"\n input_shape = input_batch.get_shape()\n\n if self._reduction_indices is not None:\n if len(self._reduction_indices) > len(input_shape):\n raise base.IncompatibleShapeError(\n \"Too many reduction indices specified.\")\n\n if max(self._reduction_indices) >= len(input_shape):\n raise base.IncompatibleShapeError(\n \"Reduction index too large for input shape.\")\n\n if min(self._reduction_indices) < 0:\n raise base.IncompatibleShapeError(\n \"Reduction indeces must be non-negative.\")\n\n reduction_indices = self._reduction_indices\n else:\n # Reduce over all dimensions except the last.\n reduction_indices = range(len(input_shape))[:-1]\n\n if input_batch.dtype == tf.float16:\n raise base.NotSupportedError(\n \"BatchNorm does not support `tf.float16`, insufficient \"\n \"precision for calculating sufficient statistics.\")\n\n self._mean_shape = input_batch.get_shape().as_list()\n for index in reduction_indices:\n self._mean_shape[index] = 1\n\n use_batch_stats = is_training | test_local_stats\n\n # Use the legacy moving second moment if the flag is set.\n if self._use_legacy_moving_second_moment:\n tf.logging.warning(\n \"nn.BatchNorm `use_legacy_second_moment=True` is deprecated.\")\n\n mean, variance, second_moment = self._build_statistics_second_moment(\n input_batch,\n reduction_indices,\n use_batch_stats)\n\n self._build_update_ops_second_moment(mean, second_moment, is_training)\n else:\n mean, variance = self._build_statistics_variance(\n input_batch,\n reduction_indices,\n use_batch_stats)\n\n self._build_update_ops_variance(mean, variance, is_training)\n\n # Set up optional scale and offset factors.\n if self._offset:\n self._set_default_initializer(self.BETA)\n self._beta = tf.get_variable(\n self.BETA,\n shape=self._mean_shape,\n initializer=self._initializers[self.BETA])\n else:\n self._beta = None\n\n if self._scale:\n self._set_default_initializer(self.GAMMA)\n self._gamma = tf.get_variable(\n self.GAMMA,\n shape=self._mean_shape,\n initializer=self._initializers[self.GAMMA])\n else:\n self._gamma = None\n\n out = tf.nn.batch_normalization(\n input_batch,\n mean,\n variance,\n self._beta,\n self._gamma,\n self._eps,\n name=\"batch_norm\")\n\n return out\n\n @property\n def moving_mean(self):\n self._ensure_is_connected()\n return self._moving_mean\n\n @property\n def moving_second_moment(self):\n self._ensure_is_connected()\n return self._moving_second_moment\n\n @property\n def moving_variance(self):\n self._ensure_is_connected()\n return self._moving_variance\n\n @property\n def beta(self):\n self._ensure_is_connected()\n\n if self._beta is None:\n raise base.Error(\n \"Batch normalization doesn't have an offset, so no beta\")\n else:\n return self._beta\n\n @property\n def gamma(self):\n self._ensure_is_connected()\n\n if self._gamma is None:\n raise base.Error(\n \"Batch normalization doesn't have a scale, so no gamma\")\n else:\n return self._gamma\n"
] | [
[
"tensorflow.contrib.layers.python.layers.utils.constant_value",
"tensorflow.logging.warning",
"tensorflow.nn.batch_normalization",
"tensorflow.get_variable",
"tensorflow.python.training.moving_averages.assign_moving_average",
"tensorflow.zeros_initializer",
"tensorflow.nn.normalize_moments",
"tensorflow.identity",
"tensorflow.nn.sufficient_statistics",
"tensorflow.contrib.layers.python.layers.utils.smart_cond",
"tensorflow.add",
"tensorflow.no_op",
"tensorflow.square",
"tensorflow.ones_initializer",
"tensorflow.add_to_collection"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
mswimmer/extractnet | [
"1d5202a35bd659f502e6f5496a9b1933f0098b91"
] | [
"test/test_kohlschuetter.py"
] | [
"import io\nimport os\nimport re\n\nimport numpy as np\nimport pytest\nfrom lxml import etree\n\nfrom extractnet import Blockifier, BlockifyError\nfrom extractnet.features import KohlschuetterFeatures\nfrom extractnet.compat import range_, str_cast\n\n\[email protected](scope=\"module\")\ndef html():\n fname = os.path.join(\"test\", \"datafiles\", \"HTML\", \"page_for_testing.html\")\n with io.open(fname, mode=\"rt\", encoding='utf8') as f:\n html_ = f.read()\n return html_\n\n\ndef block_output_tokens(blocks, true_tokens):\n \"\"\"\n blocks = the output from blockify\n true_tokens = a list of true tokens\n \"\"\"\n assert len(blocks) == len(true_tokens)\n for k in range_(len(blocks)):\n block_tokens = re.split(r\"\\s+\", blocks[k].text.strip())\n assert block_tokens == true_tokens[k]\n\n\ndef link_output_tokens(blocks, true_tokens):\n assert len(blocks) == len(true_tokens)\n link_tokens = [ele.link_tokens for ele in blocks]\n for k in range_(len(link_tokens)):\n assert link_tokens[k] == true_tokens[k]\n\n\ndef css_output_tokens(blocks, attrib, true_tokens):\n assert len(blocks) == len(true_tokens)\n for k in range_(len(blocks)):\n css_tokens = re.split(r\"\\s+\", blocks[k].css[attrib].strip())\n assert css_tokens == true_tokens[k]\n\n\nclass TestBlockifier(object):\n\n def test_lxml_error(self):\n \"\"\"tests the case where lxml raises an error during parsing\n\n also handles case where lxml returns None for the tree\"\"\"\n # this raises an error in parsing\n with pytest.raises(BlockifyError):\n Blockifier.blockify(\"\")\n # this returns None in lxml\n assert etree.fromstring(\"<!--\", etree.HTMLParser(recover=True)) is None\n with pytest.raises(BlockifyError):\n Blockifier.blockify(\"<!--\")\n\n def test_very_simple(self):\n \"\"\"test_very_simple\"\"\"\n s = \"\"\"<div>some text\n <script> skip this </script>\n more text here\n </div>\"\"\"\n blocks = Blockifier.blockify(s)\n block_output_tokens(blocks, [['some', 'text', 'more', 'text', 'here']])\n\n def test_very_simple2(self):\n s = \"\"\"<div>some text <i>in italic</i> and something else\n <script> <div>skip this</div> </script>\n <b>bold stuff</b> after the script\n </div>\"\"\"\n blocks = Blockifier.blockify(s)\n block_output_tokens(\n blocks,\n [['some', 'text', 'in', 'italic', 'and', 'something', 'else', 'bold', 'stuff', 'after', 'the', 'script']]\n )\n\n @staticmethod\n def count_divs(tree):\n div_xpath = etree.XPath(\"//div\")\n TestBlockifier.div_count = len(div_xpath(tree))\n\n def test_callback(self):\n s = \"\"\"<div>some text <i>in italic</i> and something else\n <pre> <div>skip this</div> </pre>\n <b>bold stuff</b> after the script\n </div>\"\"\"\n blocks = Blockifier.blockify(s, parse_callback=self.count_divs)\n print(TestBlockifier.div_count)\n assert self.div_count == 2\n\n def test_simple_two_blocks(self):\n s = \"\"\"<h1>A title <i>with italics</i> and other words</h1>\n some text outside the h1\n <div>a div <span class=\"test\"> with a span </span> more </div>\"\"\"\n blocks = Blockifier.blockify(s)\n block_output_tokens(\n blocks,\n [['A', 'title', 'with', 'italics', 'and', 'other', 'words', 'some', 'text', 'outside', 'the', 'h1'],\n ['a', 'div', 'with', 'a', 'span', 'more']]\n )\n\n def test_comment(self):\n s = \"\"\"<H1>h1 tag word</H1>\n <!-- a comment -->\n orphaned text\n <TABLE><tr><td>table data</td></tr><tr><td>second row</td></tr></TABLE>\n final\n \"\"\"\n blocks = Blockifier.blockify(s)\n block_output_tokens(\n blocks,\n [['h1', 'tag', 'word', 'orphaned', 'text'],\n ['table', 'data', 'second', 'row', 'final']]\n )\n\n def test_empty_blocks(self):\n s = \"\"\"<div> .! </div>\n some text\n <h1> in an h1 </h1>\n <p> ! _ </p>\n \"\"\"\n blocks = Blockifier.blockify(s)\n block_output_tokens(blocks, [['.!', 'some', 'text'], ['in', 'an', 'h1']])\n\n def test_nested_blocks(self):\n s = \"\"\"initial text\n <div>div <p> with paragraph </p>\n after Paragraph\n <div> nested div <div> and again </div>here</div>\n </div>\n final\n <div> <i> italic </i> before <h1>tag</h1></div>\"\"\"\n blocks = Blockifier.blockify(s)\n block_output_tokens(\n blocks,\n [['initial', 'text'],\n ['div'],\n ['with', 'paragraph', 'after', 'Paragraph'],\n ['nested', 'div'],\n ['and', 'again', 'here', 'final'],\n ['italic', 'before'],\n ['tag']]\n )\n\n def test_anchors(self):\n s = \"\"\"<a href=\".\">anchor text</a>\n more\n <div>text <a href=\".\">123</a><div>MORE!</div></div>\n an img link<a href=\".\"><img src=\".\"></a>there\n <table><tr><td><a href=\".\">WILL <img src=\".\"> THIS PASS <b>THE TEST</b> ??</a></tr></td></table>\"\"\"\n blocks = Blockifier.blockify(s)\n block_output_tokens(\n blocks,\n [['anchor', 'text', 'more'],\n ['text', '123'],\n ['MORE!', 'an', 'img', 'link', 'there'],\n ['WILL', 'THIS', 'PASS', 'THE', 'TEST', '??']]\n )\n link_output_tokens(\n blocks,\n [['anchor', 'text'],\n ['123'],\n [],\n ['WILL', 'THIS', 'PASS', 'THE', 'TEST', '??']]\n )\n\n def test_unicode(self):\n s = u\"\"\"<div><div><a href=\".\"> the registered trademark \\xae</a></div></div>\"\"\"\n blocks = Blockifier.blockify(s)\n block_output_tokens(blocks, [['the', 'registered', 'trademark', u'\\xae']])\n link_output_tokens(blocks, [['the', 'registered', 'trademark', u'\\xae']])\n\n def test_all_non_english(self):\n s = u\"\"\"<div> <div> \\u03b4\\u03bf\\u03b3 </div> <div> <a href=\"summer\">\\xe9t\\xe9</a> </div>\n <div> \\u62a5\\u9053\\u4e00\\u51fa </div> </div>\"\"\"\n blocks = Blockifier.blockify(s)\n block_output_tokens(\n blocks,\n [[u'\\u03b4\\u03bf\\u03b3'],\n [u'\\xe9t\\xe9'],\n [u'\\u62a5\\u9053\\u4e00\\u51fa']]\n )\n link_output_tokens(blocks, [[], [u'\\xe9t\\xe9'], []])\n\n def test_class_id(self):\n s = \"\"\"<div CLASS='d1'>text in div\n <h1 id=\"HEADER\">header</h1>\n <div class=\"nested\">dragnet</div>\n </div>\"\"\"\n blocks = Blockifier.blockify(s)\n block_output_tokens(\n blocks, [['text', 'in', 'div'], ['header'], ['dragnet']])\n css_output_tokens(\n blocks, 'id', [[''], ['header'], ['']])\n css_output_tokens(\n blocks, 'class', [['d1'], [''], ['nested']])\n\n def test_class_id_unicode(self):\n s = b\"\"\"<div CLASS=' class1 \\xc2\\xae'>text in div\n <h1 id=\"HEADER\">header</h1>\n </div>\"\"\"\n blocks = Blockifier.blockify(s, encoding='utf-8')\n block_output_tokens(\n blocks, [['text', 'in', 'div'], ['header']])\n css_output_tokens(\n blocks, 'id', [[''], ['header']])\n css_output_tokens(\n blocks, 'class', [['class1', str_cast(b'\\xc2\\xae')], ['']])\n\n def test_invalid_bytes(self):\n # \\x80 is invalid utf-8\n s = b\"\"\"<div CLASS='\\x80'>text in div</div><p>invalid bytes \\x80</p>\"\"\"\n blocks = Blockifier.blockify(s, encoding='utf-8')\n block_output_tokens(blocks, [['text', 'in', 'div']])\n css_output_tokens(blocks, 'class', [[str_cast(b'\\xc2\\x80')]])\n\n def test_big_html(self, html):\n s = html\n blocks = Blockifier.blockify(s)\n block_output_tokens(\n blocks,\n [['Inside', 'the', 'h1', 'tag'],\n ['First', 'line', 'of', 'the', 'content', 'in', 'bold'],\n ['A', 'paragraph', 'with', 'a', 'link', 'and', 'some', 'additional', 'words.'],\n ['Second', 'paragraph', 'Insert', 'a', 'block', 'quote', 'here'],\n ['Some', 'more', 'text', 'after', 'the', 'image'],\n ['An', 'h2', 'tag', 'just', 'for', 'kicks'],\n ['Finally', 'more', 'text', 'at', 'the', 'end', 'of', 'the', 'content'],\n ['This', 'is', 'a', 'comment'],\n ['with', 'two', 'paragraphs', 'and', 'some', 'comment', 'spam'],\n ['Second', 'comment'],\n ['Footer', 'text']]\n )\n link_output_tokens(\n blocks,\n [[],\n [],\n ['a', 'link'],\n [],\n [],\n [],\n [],\n [],\n ['and', 'some', 'comment', 'spam'],\n [],\n []]\n )\n css_output_tokens(\n blocks, 'class',\n [[''],\n ['title'],\n ['link'],\n [''],\n [''],\n [''],\n [''],\n [''],\n [''],\n [''],\n ['footer']]\n )\n css_output_tokens(\n blocks, 'id',\n [[''],\n ['content'],\n ['para'],\n [''],\n [''],\n [''],\n [''],\n [''],\n [''],\n [''],\n ['']]\n )\n\n\nclass TestKohlschuetter(object):\n\n def test_small_doc(self):\n kf = KohlschuetterFeatures()\n s = \"<html></html>\"\n with pytest.raises(ValueError):\n kf.transform(Blockifier.blockify(s))\n s = \"<html> <p>a</p> <div>b</div> </html>\"\n with pytest.raises(ValueError):\n kf.transform(Blockifier.blockify(s))\n\n def test_transform(self):\n kf = KohlschuetterFeatures()\n s = \"<html> <p>first </p> <div> <p>second block with <a href=''>anchor</a> </p> <p>the third block</p> </div> </html>\"\n blocks = Blockifier.blockify(s)\n features = kf.transform(blocks)\n block_output_tokens(blocks, [[\"first\"], [\"second\", \"block\", \"with\", \"anchor\"], [\"the\", \"third\", \"block\"]])\n link_output_tokens(blocks, [[], [\"anchor\"], []])\n\n text_density = [1.0, 4.0, 3.0]\n link_density = [1.0, 0.25, 1.0 / 3.0]\n\n assert np.allclose(features[0, :], [0.0, 0.0, link_density[0], text_density[0], link_density[1], text_density[1]])\n assert np.allclose(features[1, :], [link_density[0], text_density[0], link_density[1], text_density[1], link_density[2], text_density[2]])\n assert np.allclose(features[2, :], [link_density[1], text_density[1], link_density[2], text_density[2], 0.0, 0.0])\n"
] | [
[
"numpy.allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ramirezalbert3/my_agents | [
"fd8ffc8c2f157292b4d309ab1899326007aea726"
] | [
"my_agents/agents/nstep_agent.py"
] | [
"from collections import deque\nfrom typing import Tuple\nimport numpy as np\nfrom gym import logger\nfrom tensorflow import keras\n\n\"\"\"\n# References\n# 1. https://storage.googleapis.com/deepmind-media/dqn/DQNNaturePaper.pdf\n# 2. https://arxiv.org/pdf/1509.06461.pdf\n# 3. https://arxiv.org/pdf/1703.01327.pdf\n# 4. https://drive.google.com/file/d/1opPSz5AZ_kVa1uWOdOiveNiBFiEOHjkG/view\n#\n# [1] Minh 2015 is baseline DQN\n# [2] Van Hasselt 2015, proposes using online model for greedy policy and target model for evaluation\n# [3] Article about n-step methods which is roughly the same as what's found in [4]\n# [4] Sutton's reinforcement learning book, chapter 6 covers n-step methods\n\"\"\"\n\n\ndef build_dense_network(num_actions: int, state_shape: tuple, hidden_layers: list = [24, 24]):\n \"\"\"\n # TODO: optimizer and losses as arguments\n \"\"\"\n\n model = keras.models.Sequential()\n\n for idx, val in enumerate(hidden_layers):\n if idx == 0:\n model.add(keras.layers.Dense(val,\n activation='relu',\n input_shape=state_shape,\n name='input'))\n else:\n model.add(keras.layers.Dense(val,\n activation='relu',\n name='hidden_layer_{}'.format(idx)))\n\n model.add(keras.layers.Dense(num_actions,\n name='output'))\n\n # Using Keras optimizers and not tf because of warnings when saving\n # tf optimizers apparently need to be recompiled upon loading, theyre not as convenient\n model.compile(optimizer='adam',\n loss='mean_squared_error',\n metrics=['mae']) # mean absolute error\n\n return model\n\n\nclass NStepDDQNAgent:\n \"\"\"\n DDQN as per [4. Van Hasselt 2015] which is an improvement\n of [3. Minh 2015] 'Algorithm 1: deep Q-learning with experience replay'\n also implements n-step learning updates\n Target network is used for evaluations of state/action values\n Online network is trained and used for greedy policy decisions\n states need to be properly conditioned for the agent before being used\n \"\"\"\n\n def __init__(self, num_actions: int, state_shape: tuple,\n update_horizon: int = 3, gamma: float = 0.9,\n target_update_freq: int = 200,\n prebuilt_model: keras.models.Sequential = None) -> None:\n if prebuilt_model is not None:\n if num_actions is not None or state_shape is not None:\n logger.warn('Provided num_actions and state_shape are not used when passing a prebuilt model.'\n 'Set them to None for clarity')\n self._q_impl = prebuilt_model\n else:\n self._q_impl = build_dense_network(num_actions, state_shape)\n\n # Start target network = to online network\n self._target_q_impl = keras.models.Sequential.from_config(self._q_impl.get_config())\n self._update_target_model()\n\n self._n = update_horizon\n self._target_update_freq = target_update_freq\n self._gamma = gamma\n self._memory = deque(maxlen=2000)\n\n def act(self, state: np.ndarray) -> int:\n \"\"\" Get greedy action \"\"\"\n return self.policy(state)[0]\n\n def process_observation(self, state: np.ndarray, action: int, reward: float,\n next_state: np.ndarray, done: bool) -> None:\n \"\"\" Store observation to train later in batches \"\"\"\n self._memory.append((state, action, reward, next_state, done))\n\n def train(self, step_num: int, batch_size: int = 64, epochs: int = 3) -> None:\n \"\"\" 're-fit' Q replaying random samples from memory \"\"\"\n if len(self._memory) <= batch_size:\n logger.warning('Cant train on an empty memory, warm-up the agent!')\n return\n\n # TODO: split in functionality between _sample_nand _observations_to_train is not clear\n states, actions, rewards, next_states, dones, gammas = self._sample_n_transitions(batch_size)\n states, target_qs = self._observations_to_train_data(np.array(states),\n np.array(actions),\n np.array(rewards),\n np.array(next_states),\n np.array(dones),\n np.array(gammas))\n\n result = self._q_impl.fit(states, target_qs, batch_size=batch_size, epochs=epochs, verbose=0)\n\n if step_num % self._target_update_freq == 0:\n self._update_target_model()\n\n return result\n\n def _sample_n_transitions(self, batch_size):\n \"\"\" sample batch_size transitions of n-length \"\"\"\n indexes = np.random.uniform(0, len(self._memory), batch_size).astype(int)\n\n states = []\n actions = []\n rewards = []\n next_states = []\n dones = []\n gammas = []\n for idx in indexes:\n r = 0 # we'll be accumulating rewards\n for n in range(self._n):\n if idx + n == len(self._memory):\n break\n sn, an, rn, nsn, dn = self._memory[idx + n]\n if n == 0:\n states.append(sn)\n actions.append(an)\n r += rn * (self._gamma ** n)\n if dn:\n break\n rewards.append(r)\n next_states.append(nsn)\n dones.append(dn)\n gammas.append(self._gamma ** (n + 1))\n return states, actions, rewards, next_states, dones, gammas\n\n def _update_target_model(self):\n self._target_q_impl.set_weights(self._q_impl.get_weights())\n\n def _observations_to_train_data(self, states: np.ndarray, actions: np.ndarray, rewards: np.ndarray,\n next_states: np.ndarray, dones: np.ndarray, gammas: np.ndarray) -> Tuple[\n np.ndarray, np.ndarray]:\n \"\"\" get states observations, rewards and action and return X, y for training \"\"\"\n assert (states.shape == next_states.shape)\n assert (actions.shape == rewards.shape == dones.shape == gammas.shape)\n assert (len(states) == len(actions))\n\n batch_size = len(actions) # TODO: this will fail if not in batches\n targets = rewards + np.logical_not(dones) * gammas * self.V(next_states, use_target=True)\n target_qs = self.Q(states, use_target=False)\n\n target_qs[np.arange(batch_size), actions] = targets\n return states, target_qs\n\n def Q(self, states: np.ndarray, use_target: bool = False) -> np.ndarray:\n \"\"\" value of any taken action in a batch of states and playing perfectly onwards \"\"\"\n if len(states.shape) == 1:\n # we're evaluating a single example -> make batch_size = 1\n states = states[np.newaxis]\n\n if use_target:\n # This happens during training/value evaluation according to [4]\n self._target_q_impl.predict(states)\n\n # This happens during greedy policy evaluation according to [4]\n return self._q_impl.predict(states)\n\n def policy(self, states: np.ndarray, use_target: bool = False) -> int:\n \"\"\" optimal greedy action for a batch of states \"\"\"\n return np.argmax(self.Q(states, use_target), axis=1) # axis=0 is batch axis\n\n def V(self, states: np.ndarray, use_target: bool = False) -> float:\n \"\"\" value of being in a batch of states (and playing perfectly onwards) \"\"\"\n return np.max(self.Q(states, use_target), axis=1) # axis=0 is batch axis\n\n def save(self, file_path: str = 'nstep_agent.h5') -> None:\n \"\"\" Save online trained model to .h5 file\"\"\"\n if not file_path.endswith('.h5'):\n file_path += '.h5'\n logger.info('Saving agent to: ' + file_path)\n self._q_impl.save(file_path)\n\n @staticmethod\n def from_h5(file_path: str = 'nstep_agent.h5',\n update_horizon: int = 3, gamma: float = 0.9,\n target_update_freq: int = 200) -> 'NStepDDQNAgent':\n \"\"\" Load trained model from .h5 file\"\"\"\n logger.info('Loading agent from: ' + file_path)\n model = keras.models.load_model(file_path)\n agent = NStepDDQNAgent(None, None,\n update_horizon=update_horizon, gamma=gamma,\n target_update_freq=target_update_freq, prebuilt_model=model)\n return agent\n"
] | [
[
"numpy.logical_not",
"tensorflow.keras.models.load_model",
"tensorflow.keras.layers.Dense",
"numpy.arange",
"numpy.array",
"tensorflow.keras.models.Sequential"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
matthewfeickert/jax | [
"b0d96bd42440231cc7e98c61f52106f46578fca4"
] | [
"tests/sparse_test.py"
] | [
"# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport contextlib\nfrom functools import partial\nimport itertools\nimport operator\nimport random\nimport unittest\nfrom typing import NamedTuple, Tuple\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nimport jax\nimport jax.random\nfrom jax import config\nfrom jax import dtypes\nfrom jax.experimental import sparse\nfrom jax import lax\nfrom jax._src.lib import cusparse\nfrom jax._src.lib import xla_bridge\nfrom jax import jit\nfrom jax import tree_util\nfrom jax import vmap\nfrom jax._src import test_util as jtu\nfrom jax._src.lax.lax import remaining, DotDimensionNumbers\nfrom jax import xla\nimport jax.numpy as jnp\nfrom jax.util import split_list\nimport numpy as np\nimport scipy.sparse\nconfig.parse_flags_with_absl()\nFLAGS = config.FLAGS\n\nMATMUL_TOL = {\n np.float32: 1E-5,\n np.float64: 1E-10,\n np.complex64: 1e-5,\n np.complex128: 1E-10,\n}\n\nclass BcooDotGeneralProperties(NamedTuple):\n lhs_shape: Tuple[int]\n rhs_shape: Tuple[int]\n dtype: np.dtype\n n_batch: int\n n_dense: int\n dimension_numbers: DotDimensionNumbers\n\n def testcase_name(self):\n return \"_{}_{}_nbatch={}_ndense={}_dimension_numbers={}\".format(\n jtu.format_shape_dtype_string(self.lhs_shape, self.dtype),\n jtu.format_shape_dtype_string(self.rhs_shape, self.dtype),\n self.n_batch, self.n_dense, self.dimension_numbers)\n\n\ndef _iter_subsets(s):\n return itertools.chain.from_iterable(itertools.combinations(s, n) for n in range(len(s) + 1))\n\ndef _generate_bcoo_dot_general_properties(shapes, dtypes) -> BcooDotGeneralProperties:\n \"\"\"Generator of properties for bcoo_dot_general tests.\"\"\"\n rng = random.Random(0)\n\n for shape in shapes:\n for n_batch in range(len(shape) + 1):\n for n_dense in range(len(shape) + 1 - n_batch):\n n_sparse = len(shape) - n_batch - n_dense\n subsets = split_list(range(len(shape)), [n_batch, n_sparse])\n for batch_dims in _iter_subsets(range(n_batch)):\n for contracting_dims in _iter_subsets(remaining(range(n_batch + n_sparse), batch_dims)):\n # We want coverage of permutations & dtypes without generating hundreds of thousands\n # of test cases; we do this by deterministic pseudo-random sampling instead of iterating.\n rhs_permute = rng.sample(range(len(shape)), len(shape))\n lhs_permute = list(itertools.chain.from_iterable(\n rng.sample(subset, len(subset)) for subset in subsets))\n yield BcooDotGeneralProperties(\n lhs_shape=tuple(shape[p] for p in lhs_permute),\n rhs_shape=tuple(shape[p] for p in rhs_permute),\n dtype=rng.choice(dtypes),\n n_batch=n_batch,\n n_dense=n_dense,\n dimension_numbers=(\n ([lhs_permute.index(d) for d in contracting_dims], [rhs_permute.index(d) for d in contracting_dims]),\n ([lhs_permute.index(d) for d in batch_dims], [rhs_permute.index(d) for d in batch_dims])\n ),\n )\n\n\nall_dtypes = jtu.dtypes.integer + jtu.dtypes.floating + jtu.dtypes.complex\n\n\ndef rand_sparse(rng, nse=0.5, post=lambda x: x, rand_method=jtu.rand_default):\n def _rand_sparse(shape, dtype, nse=nse):\n rand = rand_method(rng)\n size = np.prod(shape).astype(int)\n if 0 <= nse < 1:\n nse = nse * size\n nse = min(size, int(nse))\n M = rand(shape, dtype)\n indices = rng.choice(size, size - nse, replace=False)\n M.flat[indices] = 0\n return post(M)\n return _rand_sparse\n\n\nclass cuSparseTest(jtu.JaxTestCase):\n def gpu_dense_conversion_warning_context(self, dtype):\n if jtu.device_under_test() == \"gpu\" and np.issubdtype(dtype, np.integer):\n return self.assertWarns(sparse.CuSparseEfficiencyWarning)\n return contextlib.nullcontext()\n\n def gpu_matmul_warning_context(self, dtype):\n if jtu.device_under_test() == \"gpu\" and dtype not in [np.float32, np.float64, np.complex64, np.complex128]:\n return self.assertWarns(sparse.CuSparseEfficiencyWarning)\n return contextlib.nullcontext()\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}\".format(jtu.format_shape_dtype_string(shape, dtype)),\n \"shape\": shape, \"dtype\": dtype}\n for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]\n for dtype in all_dtypes))\n def test_csr_todense(self, shape, dtype):\n rng = rand_sparse(self.rng(), post=scipy.sparse.csr_matrix)\n M = rng(shape, dtype)\n\n args = (M.data, M.indices, M.indptr)\n todense = lambda *args: sparse.csr_todense(*args, shape=M.shape)\n\n self.assertArraysEqual(M.toarray(), todense(*args))\n with self.gpu_dense_conversion_warning_context(dtype):\n self.assertArraysEqual(M.toarray(), jit(todense)(*args))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}\".format(jtu.format_shape_dtype_string(shape, dtype)),\n \"shape\": shape, \"dtype\": dtype}\n for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]\n for dtype in jtu.dtypes.floating + jtu.dtypes.complex))\n def test_csr_todense_ad(self, shape, dtype):\n rng = rand_sparse(self.rng(), post=jnp.array)\n M = rng(shape, dtype)\n data, indices, indptr = sparse.csr_fromdense(M, nse=(M != 0).sum())\n row, col = sparse.util._csr_to_coo(indices, indptr)\n f = lambda data: sparse.csr_todense(data, indices, indptr, shape=M.shape)\n\n # Forward-mode\n primals, tangents = jax.jvp(f, [data], [jnp.ones_like(data)])\n self.assertArraysEqual(primals, f(data))\n self.assertArraysEqual(tangents, jnp.zeros_like(M).at[row, col].set(1))\n\n # Reverse-mode\n primals, vjp_fun = jax.vjp(f, data)\n data_out, = vjp_fun(primals)\n self.assertArraysEqual(primals, f(data))\n self.assertArraysEqual(data_out, data)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}\".format(jtu.format_shape_dtype_string(shape, dtype)),\n \"shape\": shape, \"dtype\": dtype}\n for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]\n for dtype in jtu.dtypes.floating + jtu.dtypes.complex))\n def test_csr_fromdense_ad(self, shape, dtype):\n rng = rand_sparse(self.rng(), post=jnp.array)\n M = rng(shape, dtype)\n nse = (M != 0).sum()\n f = lambda M: sparse.csr_fromdense(M, nse=nse)\n\n # Forward-mode\n primals, tangents = jax.jvp(f, [M], [jnp.ones_like(M)])\n self.assertArraysEqual(primals[0], f(M)[0])\n self.assertArraysEqual(primals[1], f(M)[1])\n self.assertArraysEqual(primals[2], f(M)[2])\n self.assertArraysEqual(tangents[0], jnp.ones(nse, dtype=dtype))\n self.assertEqual(tangents[1].dtype, dtypes.float0)\n self.assertEqual(tangents[2].dtype, dtypes.float0)\n\n # Reverse-mode\n primals, vjp_fun = jax.vjp(f, M)\n M_out, = vjp_fun(primals)\n self.assertArraysEqual(primals[0], f(M)[0])\n self.assertArraysEqual(primals[1], f(M)[1])\n self.assertArraysEqual(primals[2], f(M)[2])\n self.assertArraysEqual(M_out, M)\n\n @unittest.skipIf(jtu.device_under_test() == \"tpu\", \"TPU has insufficient precision\")\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n jtu.format_shape_dtype_string(bshape, dtype)),\n \"shape\": shape, \"dtype\": dtype, \"bshape\": bshape}\n for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]\n for bshape in [shape[-1:] + s for s in [(), (1,), (3,)]]\n for dtype in jtu.dtypes.floating + jtu.dtypes.complex))\n def test_csr_matmul_ad(self, shape, dtype, bshape):\n csr_matmul = sparse.csr_matvec if len(bshape) == 1 else sparse.csr_matmat\n tol = {np.float32: 1E-5, np.float64: 1E-12, np.complex64: 1E-5, np.complex128: 1E-12}\n\n rng = rand_sparse(self.rng(), post=jnp.array)\n rng_b = jtu.rand_default(self.rng())\n\n M = rng(shape, dtype)\n data, indices, indptr = sparse.csr_fromdense(M, nse=(M != 0).sum())\n x = rng_b(bshape, dtype)\n xdot = rng_b(bshape, dtype)\n\n # Forward-mode with respect to the vector\n f_dense = lambda x: M @ x\n f_sparse = lambda x: csr_matmul(data, indices, indptr, x, shape=M.shape)\n v_sparse, t_sparse = jax.jvp(f_sparse, [x], [xdot])\n v_dense, t_dense = jax.jvp(f_dense, [x], [xdot])\n self.assertAllClose(v_sparse, v_dense, atol=tol, rtol=tol)\n self.assertAllClose(t_sparse, t_dense, atol=tol, rtol=tol)\n\n # Reverse-mode with respect to the vector\n primals_dense, vjp_dense = jax.vjp(f_dense, x)\n primals_sparse, vjp_sparse = jax.vjp(f_sparse, x)\n out_dense, = vjp_dense(primals_dense)\n out_sparse, = vjp_sparse(primals_sparse)\n self.assertAllClose(primals_dense[0], primals_sparse[0], atol=tol, rtol=tol)\n self.assertAllClose(out_dense, out_sparse, atol=tol, rtol=tol)\n\n # Forward-mode with respect to nonzero elements of the matrix\n f_sparse = lambda data: csr_matmul(data, indices, indptr, x, shape=M.shape)\n f_dense = lambda data: sparse.csr_todense(data, indices, indptr, shape=M.shape) @ x\n data = rng((len(data),), data.dtype)\n data_dot = rng((len(data),), data.dtype)\n v_sparse, t_sparse = jax.jvp(f_sparse, [data], [data_dot])\n v_dense, t_dense = jax.jvp(f_dense, [data], [data_dot])\n\n self.assertAllClose(v_sparse, v_dense, atol=tol, rtol=tol)\n self.assertAllClose(t_sparse, t_dense, atol=tol, rtol=tol)\n\n # Reverse-mode with respect to nonzero elements of the matrix\n primals_dense, vjp_dense = jax.vjp(f_dense, data)\n primals_sparse, vjp_sparse = jax.vjp(f_sparse, data)\n out_dense, = vjp_dense(primals_dense)\n out_sparse, = vjp_sparse(primals_sparse)\n self.assertAllClose(primals_dense[0], primals_sparse[0], atol=tol, rtol=tol)\n self.assertAllClose(out_dense, out_sparse, atol=tol, rtol=tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}\".format(jtu.format_shape_dtype_string(shape, dtype)),\n \"shape\": shape, \"dtype\": dtype}\n for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]\n for dtype in all_dtypes))\n def test_csr_fromdense(self, shape, dtype):\n rng = rand_sparse(self.rng())\n M = rng(shape, dtype)\n M_csr = scipy.sparse.csr_matrix(M)\n\n nse = M_csr.nnz\n index_dtype = jnp.int32\n fromdense = lambda M: sparse.csr_fromdense(M, nse=nse, index_dtype=jnp.int32)\n\n data, indices, indptr = fromdense(M)\n self.assertArraysEqual(data, M_csr.data.astype(dtype))\n self.assertArraysEqual(indices, M_csr.indices.astype(index_dtype))\n self.assertArraysEqual(indptr, M_csr.indptr.astype(index_dtype))\n\n with self.gpu_dense_conversion_warning_context(dtype):\n data, indices, indptr = jit(fromdense)(M)\n self.assertArraysEqual(data, M_csr.data.astype(dtype))\n self.assertArraysEqual(indices, M_csr.indices.astype(index_dtype))\n self.assertArraysEqual(indptr, M_csr.indptr.astype(index_dtype))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_T={}\".format(jtu.format_shape_dtype_string(shape, dtype), transpose),\n \"shape\": shape, \"dtype\": dtype, \"transpose\": transpose}\n for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]\n for dtype in all_dtypes\n for transpose in [True, False]))\n def test_csr_matvec(self, shape, dtype, transpose):\n op = lambda M: M.T if transpose else M\n\n v_rng = jtu.rand_default(self.rng())\n rng = rand_sparse(self.rng(), post=scipy.sparse.csr_matrix)\n M = rng(shape, dtype)\n v = v_rng(op(M).shape[1], dtype)\n\n args = (M.data, M.indices, M.indptr, v)\n matvec = lambda *args: sparse.csr_matvec(*args, shape=M.shape, transpose=transpose)\n\n self.assertAllClose(op(M) @ v, matvec(*args), rtol=MATMUL_TOL)\n with self.gpu_matmul_warning_context(dtype):\n self.assertAllClose(op(M) @ v, jit(matvec)(*args), rtol=MATMUL_TOL)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_T={}\".format(jtu.format_shape_dtype_string(shape, dtype), transpose),\n \"shape\": shape, \"dtype\": dtype, \"transpose\": transpose}\n for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]\n for dtype in all_dtypes\n for transpose in [True, False]))\n def test_csr_matmat(self, shape, dtype, transpose):\n op = lambda M: M.T if transpose else M\n\n B_rng = jtu.rand_default(self.rng())\n rng = rand_sparse(self.rng(), post=scipy.sparse.csr_matrix)\n M = rng(shape, dtype)\n B = B_rng((op(M).shape[1], 4), dtype)\n\n args = (M.data, M.indices, M.indptr, B)\n matmat = lambda *args: sparse.csr_matmat(*args, shape=shape, transpose=transpose)\n\n self.assertAllClose(op(M) @ B, matmat(*args), rtol=MATMUL_TOL)\n with self.gpu_matmul_warning_context(dtype):\n self.assertAllClose(op(M) @ B, jit(matmat)(*args), rtol=MATMUL_TOL)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}\".format(jtu.format_shape_dtype_string(shape, dtype)),\n \"shape\": shape, \"dtype\": dtype}\n for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]\n for dtype in all_dtypes))\n def test_coo_todense(self, shape, dtype):\n rng = rand_sparse(self.rng(), post=scipy.sparse.coo_matrix)\n M = rng(shape, dtype)\n\n args = (M.data, M.row, M.col)\n todense = lambda *args: sparse.coo_todense(*args, shape=M.shape)\n\n self.assertArraysEqual(M.toarray(), todense(*args))\n with self.gpu_dense_conversion_warning_context(dtype):\n self.assertArraysEqual(M.toarray(), jit(todense)(*args))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}\".format(jtu.format_shape_dtype_string(shape, dtype)),\n \"shape\": shape, \"dtype\": dtype}\n for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]\n for dtype in all_dtypes))\n def test_coo_fromdense(self, shape, dtype):\n rng = rand_sparse(self.rng())\n M = rng(shape, dtype)\n M_coo = scipy.sparse.coo_matrix(M)\n\n nse = M_coo.nnz\n index_dtype = jnp.int32\n fromdense = lambda M: sparse.coo_fromdense(M, nse=nse, index_dtype=jnp.int32)\n\n data, row, col = fromdense(M)\n self.assertArraysEqual(data, M_coo.data.astype(dtype))\n self.assertArraysEqual(row, M_coo.row.astype(index_dtype))\n self.assertArraysEqual(col, M_coo.col.astype(index_dtype))\n\n with self.gpu_dense_conversion_warning_context(dtype):\n data, indices, indptr = jit(fromdense)(M)\n self.assertArraysEqual(data, M_coo.data.astype(dtype))\n self.assertArraysEqual(row, M_coo.row.astype(index_dtype))\n self.assertArraysEqual(col, M_coo.col.astype(index_dtype))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_T={}\".format(jtu.format_shape_dtype_string(shape, dtype), transpose),\n \"shape\": shape, \"dtype\": dtype, \"transpose\": transpose}\n for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]\n for dtype in all_dtypes\n for transpose in [True, False]))\n def test_coo_matvec(self, shape, dtype, transpose):\n op = lambda M: M.T if transpose else M\n\n v_rng = jtu.rand_default(self.rng())\n rng = rand_sparse(self.rng(), post=scipy.sparse.coo_matrix)\n M = rng(shape, dtype)\n v = v_rng(op(M).shape[1], dtype)\n\n args = (M.data, M.row, M.col, v)\n matvec = lambda *args: sparse.coo_matvec(*args, shape=M.shape, transpose=transpose)\n\n self.assertAllClose(op(M) @ v, matvec(*args), rtol=MATMUL_TOL)\n with self.gpu_matmul_warning_context(dtype):\n self.assertAllClose(op(M) @ v, jit(matvec)(*args), rtol=MATMUL_TOL)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_T={}\".format(jtu.format_shape_dtype_string(shape, dtype), transpose),\n \"shape\": shape, \"dtype\": dtype, \"transpose\": transpose}\n for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]\n for dtype in all_dtypes\n for transpose in [True, False]))\n def test_coo_matmat(self, shape, dtype, transpose):\n op = lambda M: M.T if transpose else M\n\n B_rng = jtu.rand_default(self.rng())\n rng = rand_sparse(self.rng(), post=scipy.sparse.coo_matrix)\n M = rng(shape, dtype)\n B = B_rng((op(M).shape[1], 4), dtype)\n\n args = (M.data, M.row, M.col, B)\n matmat = lambda *args: sparse.coo_matmat(*args, shape=shape, transpose=transpose)\n\n self.assertAllClose(op(M) @ B, matmat(*args), rtol=MATMUL_TOL)\n with self.gpu_matmul_warning_context(dtype):\n self.assertAllClose(op(M) @ B, jit(matmat)(*args), rtol=MATMUL_TOL)\n\n def test_coo_matmat_layout(self):\n # Regression test for https://github.com/google/jax/issues/7533\n d = jnp.array([1.0, 2.0, 3.0, 4.0])\n i = jnp.array([0, 0, 1, 2])\n j = jnp.array([0, 2, 0, 0])\n shape = (3, 3)\n\n x = jnp.arange(9).reshape(3, 3).astype(d.dtype)\n\n def f(x):\n return sparse.coo_matmat(d, i, j, x.T, shape=shape)\n\n result = f(x)\n result_jit = jit(f)(x)\n\n self.assertAllClose(result, result_jit)\n\n @unittest.skipIf(jtu.device_under_test() != \"gpu\", \"test requires GPU\")\n def test_gpu_translation_rule(self):\n version = xla_bridge.get_backend().platform_version\n cuda_version = None if version == \"<unknown>\" else int(version.split()[-1])\n if cuda_version is None or cuda_version < 11000:\n self.assertFalse(cusparse and cusparse.is_supported)\n self.assertNotIn(sparse.csr_todense_p,\n xla._backend_specific_translations[\"gpu\"])\n else:\n self.assertTrue(cusparse and cusparse.is_supported)\n self.assertIn(sparse.csr_todense_p,\n xla._backend_specific_translations[\"gpu\"])\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}\".format(\n jtu.format_shape_dtype_string(shape, dtype), mat_type),\n \"shape\": shape, \"dtype\": dtype, \"mat_type\": mat_type}\n for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]\n for dtype in jtu.dtypes.floating + jtu.dtypes.complex\n for mat_type in ['csr', 'coo']))\n def test_extra_nse(self, shape, dtype, mat_type):\n rng = rand_sparse(self.rng())\n M = rng(shape, dtype)\n nse = (M != 0).sum() + 5\n fromdense = getattr(sparse, f\"{mat_type}_fromdense\")\n todense = getattr(sparse, f\"{mat_type}_todense\")\n args = fromdense(M, nse=nse, index_dtype=jnp.int32)\n M_out = todense(*args, shape=M.shape)\n self.assertArraysEqual(M, M_out)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}\".format(jtu.format_shape_dtype_string(shape, dtype)),\n \"shape\": shape, \"dtype\": dtype}\n for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]\n for dtype in jtu.dtypes.floating + jtu.dtypes.complex))\n def test_coo_todense_ad(self, shape, dtype):\n rng = rand_sparse(self.rng(), post=jnp.array)\n M = rng(shape, dtype)\n data, row, col = sparse.coo_fromdense(M, nse=(M != 0).sum())\n f = lambda data: sparse.coo_todense(data, row, col, shape=M.shape)\n\n # Forward-mode\n primals, tangents = jax.jvp(f, [data], [jnp.ones_like(data)])\n self.assertArraysEqual(primals, f(data))\n self.assertArraysEqual(tangents, jnp.zeros_like(M).at[row, col].set(1))\n\n # Reverse-mode\n primals, vjp_fun = jax.vjp(f, data)\n data_out, = vjp_fun(primals)\n self.assertArraysEqual(primals, f(data))\n self.assertArraysEqual(data_out, data)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}\".format(jtu.format_shape_dtype_string(shape, dtype)),\n \"shape\": shape, \"dtype\": dtype}\n for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]\n for dtype in jtu.dtypes.floating + jtu.dtypes.complex))\n def test_coo_fromdense_ad(self, shape, dtype):\n rng = rand_sparse(self.rng(), post=jnp.array)\n M = rng(shape, dtype)\n nse = (M != 0).sum()\n f = lambda M: sparse.coo_fromdense(M, nse=nse)\n\n # Forward-mode\n primals, tangents = jax.jvp(f, [M], [jnp.ones_like(M)])\n self.assertArraysEqual(primals[0], f(M)[0])\n self.assertArraysEqual(primals[1], f(M)[1])\n self.assertArraysEqual(primals[2], f(M)[2])\n self.assertArraysEqual(tangents[0], jnp.ones(nse, dtype=dtype))\n self.assertEqual(tangents[1].dtype, dtypes.float0)\n self.assertEqual(tangents[2].dtype, dtypes.float0)\n\n # Reverse-mode\n primals, vjp_fun = jax.vjp(f, M)\n M_out, = vjp_fun(primals)\n self.assertArraysEqual(primals[0], f(M)[0])\n self.assertArraysEqual(primals[1], f(M)[1])\n self.assertArraysEqual(primals[2], f(M)[2])\n self.assertArraysEqual(M_out, M)\n\n @unittest.skipIf(jtu.device_under_test() == \"tpu\", \"TPU has insufficient precision\")\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n jtu.format_shape_dtype_string(bshape, dtype)),\n \"shape\": shape, \"dtype\": dtype, \"bshape\": bshape}\n for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]\n for bshape in [shape[-1:] + s for s in [(), (1,), (3,)]]\n for dtype in jtu.dtypes.floating + jtu.dtypes.complex))\n def test_coo_matmul_ad(self, shape, dtype, bshape):\n coo_matmul = sparse.coo_matvec if len(bshape) == 1 else sparse.coo_matmat\n tol = {np.float32: 1E-5, np.float64: 1E-12, np.complex64: 1E-5, np.complex128: 1E-12}\n\n rng = rand_sparse(self.rng(), post=jnp.array)\n rng_b = jtu.rand_default(self.rng())\n\n M = rng(shape, dtype)\n data, row, col = sparse.coo_fromdense(M, nse=(M != 0).sum())\n x = rng_b(bshape, dtype)\n xdot = rng_b(bshape, dtype)\n\n # Forward-mode with respect to the vector\n f_dense = lambda x: M @ x\n f_sparse = lambda x: coo_matmul(data, row, col, x, shape=M.shape)\n v_sparse, t_sparse = jax.jvp(f_sparse, [x], [xdot])\n v_dense, t_dense = jax.jvp(f_dense, [x], [xdot])\n self.assertAllClose(v_sparse, v_dense, atol=tol, rtol=tol)\n self.assertAllClose(t_sparse, t_dense, atol=tol, rtol=tol)\n\n # Reverse-mode with respect to the vector\n primals_dense, vjp_dense = jax.vjp(f_dense, x)\n primals_sparse, vjp_sparse = jax.vjp(f_sparse, x)\n out_dense, = vjp_dense(primals_dense)\n out_sparse, = vjp_sparse(primals_sparse)\n self.assertAllClose(primals_dense[0], primals_sparse[0], atol=tol, rtol=tol)\n self.assertAllClose(out_dense, out_sparse, atol=tol, rtol=tol)\n\n # Forward-mode with respect to nonzero elements of the matrix\n f_sparse = lambda data: coo_matmul(data, row, col, x, shape=M.shape)\n f_dense = lambda data: sparse.coo_todense(data, row, col, shape=M.shape) @ x\n data = rng((len(data),), data.dtype)\n data_dot = rng((len(data),), data.dtype)\n v_sparse, t_sparse = jax.jvp(f_sparse, [data], [data_dot])\n v_dense, t_dense = jax.jvp(f_dense, [data], [data_dot])\n\n self.assertAllClose(v_sparse, v_dense, atol=tol, rtol=tol)\n self.assertAllClose(t_sparse, t_dense, atol=tol, rtol=tol)\n\n # Reverse-mode with respect to nonzero elements of the matrix\n primals_dense, vjp_dense = jax.vjp(f_dense, data)\n primals_sparse, vjp_sparse = jax.vjp(f_sparse, data)\n out_dense, = vjp_dense(primals_dense)\n out_sparse, = vjp_sparse(primals_sparse)\n self.assertAllClose(primals_dense[0], primals_sparse[0], atol=tol, rtol=tol)\n self.assertAllClose(out_dense, out_sparse, atol=tol, rtol=tol)\n\nclass BCOOTest(jtu.JaxTestCase):\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_nbatch={}_ndense={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), n_batch, n_dense),\n \"shape\": shape, \"dtype\": dtype, \"n_batch\": n_batch, \"n_dense\": n_dense}\n for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]\n for dtype in all_dtypes\n for n_batch in range(len(shape) + 1)\n for n_dense in range(len(shape) + 1 - n_batch)))\n def test_empty(self, shape, dtype, n_batch, n_dense):\n M = sparse.empty(shape, dtype=dtype, n_batch=n_batch, n_dense=n_dense)\n self.assertIsInstance(M, sparse.BCOO)\n self.assertEqual(M.nse, 0)\n self.assertEqual(M.n_batch, n_batch)\n self.assertEqual(M.n_dense, n_dense)\n self.assertEqual(M.dtype, dtype)\n self.assertArraysEqual(M.todense(), jnp.empty(shape, dtype))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_nbatch={}_ndense={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), n_batch, n_dense),\n \"shape\": shape, \"dtype\": dtype, \"n_batch\": n_batch, \"n_dense\": n_dense}\n for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]\n for dtype in all_dtypes\n for n_batch in range(len(shape) + 1)\n for n_dense in range(len(shape) + 1 - n_batch)))\n def test_bcoo_dense_round_trip(self, shape, dtype, n_batch, n_dense):\n rng = rand_sparse(self.rng())\n M = rng(shape, dtype)\n n_sparse = M.ndim - n_batch - n_dense\n nse = int(sparse.bcoo._bcoo_nse(M, n_batch=n_batch, n_dense=n_dense))\n data, indices = sparse.bcoo_fromdense(M, n_batch=n_batch, n_dense=n_dense)\n data_jit, indices_jit = jit(partial(sparse.bcoo_fromdense, nse=nse, n_batch=n_batch, n_dense=n_dense))(M)\n self.assertArraysEqual(data, data_jit)\n self.assertArraysEqual(indices, indices_jit)\n\n assert data.dtype == dtype\n assert data.shape == shape[:n_batch] + (nse,) + shape[n_batch + n_sparse:]\n assert indices.dtype == jnp.int32 # TODO: test passing this arg\n assert indices.shape == shape[:n_batch] + (nse, n_sparse)\n\n todense = partial(sparse.bcoo_todense, shape=shape)\n self.assertArraysEqual(M, todense(data, indices))\n self.assertArraysEqual(M, jit(todense)(data, indices))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_nbatch={}_ndense={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), n_batch, n_dense),\n \"shape\": shape, \"dtype\": dtype, \"n_batch\": n_batch, \"n_dense\": n_dense}\n for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]\n for dtype in jtu.dtypes.floating\n for n_batch in range(len(shape) + 1)\n for n_dense in range(len(shape) + 1 - n_batch)))\n def test_bcoo_todense_ad(self, shape, dtype, n_batch, n_dense):\n rng = rand_sparse(self.rng())\n M = rng(shape, dtype)\n data, indices = sparse.bcoo_fromdense(M, n_batch=n_batch, n_dense=n_dense)\n\n todense = partial(sparse.bcoo_todense, indices=indices, shape=shape)\n j1 = jax.jacfwd(todense)(data)\n j2 = jax.jacrev(todense)(data)\n hess = jax.hessian(todense)(data)\n self.assertArraysAllClose(j1, j2)\n self.assertEqual(j1.shape, M.shape + data.shape)\n self.assertEqual(hess.shape, M.shape + 2 * data.shape)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_nbatch={}_ndense={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), n_batch, n_dense),\n \"shape\": shape, \"dtype\": dtype, \"n_batch\": n_batch, \"n_dense\": n_dense}\n for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]\n for dtype in jtu.dtypes.floating\n for n_batch in range(len(shape) + 1)\n for n_dense in range(len(shape) + 1 - n_batch)))\n def test_bcoo_fromdense_ad(self, shape, dtype, n_batch, n_dense):\n rng = rand_sparse(self.rng())\n M = rng(shape, dtype)\n nse = int(sparse.bcoo._bcoo_nse(M, n_batch=n_batch, n_dense=n_dense))\n\n def fromdense(M):\n return sparse.bcoo_fromdense(M, nse=nse, n_batch=n_batch, n_dense=n_dense)[0]\n data = fromdense(M)\n\n j1 = jax.jacfwd(fromdense)(M)\n j2 = jax.jacrev(fromdense)(M)\n hess = jax.hessian(fromdense)(M)\n self.assertArraysAllClose(j1, j2)\n self.assertEqual(j1.shape, data.shape + M.shape)\n self.assertEqual(hess.shape, data.shape + 2 * M.shape)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_nbatch={}_ndense={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), n_batch, n_dense),\n \"shape\": shape, \"dtype\": dtype, \"n_batch\": n_batch, \"n_dense\": n_dense}\n for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]\n for dtype in jtu.dtypes.floating + jtu.dtypes.complex\n for n_batch in range(len(shape) + 1)\n for n_dense in range(len(shape) + 1 - n_batch)))\n def test_bcoo_dense_round_trip_batched(self, shape, dtype, n_batch, n_dense):\n rng = rand_sparse(self.rng())\n M = rng(shape, dtype)\n n_sparse = M.ndim - n_batch - n_dense\n nse = int(sparse.bcoo._bcoo_nse(M, n_batch=n_batch, n_dense=n_dense))\n\n fromdense = partial(sparse.bcoo_fromdense, nse=nse, n_dense=n_dense)\n todense = partial(sparse.bcoo_todense, shape=shape[n_batch:])\n for i in range(n_batch):\n fromdense = jax.vmap(fromdense)\n todense = jax.vmap(todense)\n\n data, indices = fromdense(M)\n\n assert data.dtype == dtype\n assert data.shape == shape[:n_batch] + (nse,) + shape[n_batch + n_sparse:]\n assert indices.dtype == jnp.int32 # TODO: test passing this arg\n assert indices.shape == shape[:n_batch] + (nse, n_sparse)\n\n self.assertArraysEqual(M, todense(data, indices))\n self.assertArraysEqual(M, jit(todense)(data, indices))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_nbatch={}_ndense={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), n_batch, n_dense),\n \"shape\": shape, \"dtype\": dtype, \"n_batch\": n_batch, \"n_dense\": n_dense}\n for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]\n for dtype in jtu.dtypes.floating + jtu.dtypes.complex\n for n_batch in range(len(shape) + 1)\n for n_dense in range(len(shape) + 1 - n_batch)))\n def test_bcoo_extract(self, shape, dtype, n_batch, n_dense):\n rng = rand_sparse(self.rng())\n M = rng(shape, dtype)\n data, indices = sparse.bcoo_fromdense(M)\n data2 = sparse.bcoo_extract(indices, M)\n self.assertArraysEqual(data, data2)\n data3 = jit(sparse.bcoo_extract)(indices, M)\n self.assertArraysEqual(data, data3)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_nbatch={}_ndense={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), n_batch, n_dense),\n \"shape\": shape, \"dtype\": dtype, \"n_batch\": n_batch, \"n_dense\": n_dense}\n for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]\n for dtype in jtu.dtypes.floating\n for n_batch in range(len(shape) + 1)\n for n_dense in range(len(shape) + 1 - n_batch)))\n def test_bcoo_extract_ad(self, shape, dtype, n_batch, n_dense):\n rng = rand_sparse(self.rng())\n M = rng(shape, dtype)\n data, indices = sparse.bcoo_fromdense(M, n_batch=n_batch, n_dense=n_dense)\n\n extract = partial(sparse.bcoo_extract, indices)\n j1 = jax.jacfwd(extract)(M)\n j2 = jax.jacrev(extract)(M)\n hess = jax.hessian(extract)(M)\n self.assertArraysAllClose(j1, j2)\n self.assertEqual(j1.shape, data.shape + M.shape)\n self.assertEqual(hess.shape, data.shape + 2 * M.shape)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_nbatch={}_ndense={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), n_batch, n_dense),\n \"shape\": shape, \"dtype\": dtype, \"n_batch\": n_batch, \"n_dense\": n_dense}\n for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]\n for dtype in jtu.dtypes.floating\n for n_batch in range(len(shape) + 1)\n for n_dense in range(len(shape) + 1 - n_batch)))\n def test_bcoo_transpose(self, shape, dtype, n_batch, n_dense):\n n_sparse = len(shape) - n_batch - n_dense\n rng = self.rng()\n sprng = rand_sparse(rng)\n M = sprng(shape, dtype)\n data, indices = sparse.bcoo_fromdense(M, n_batch=n_batch, n_dense=n_dense)\n\n permutation = np.concatenate([\n rng.permutation(range(n_batch)),\n rng.permutation(range(n_batch, n_batch + n_sparse)),\n rng.permutation(range(n_batch + n_sparse, len(shape)))]).astype(int)\n\n M_T = M.transpose(permutation)\n trans = partial(sparse.bcoo_transpose, shape=shape, permutation=permutation)\n self.assertArraysEqual(M_T, sparse.bcoo_todense(*trans(data, indices), shape=M_T.shape))\n self.assertArraysEqual(M_T, sparse.bcoo_todense(*jit(trans)(data, indices), shape=M_T.shape))\n\n # test batched\n def trans(M):\n return M.transpose([p - n_batch for p in permutation[n_batch:]])\n for _ in range(n_batch):\n trans = jax.vmap(trans)\n Msp = sparse.BCOO.fromdense(M, n_batch=n_batch, n_dense=n_dense)\n self.assertArraysEqual(trans(M), trans(Msp).todense())\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_nbatch={}_ndense={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), n_batch, n_dense),\n \"shape\": shape, \"dtype\": dtype, \"n_batch\": n_batch, \"n_dense\": n_dense}\n for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]\n for dtype in jtu.dtypes.floating\n for n_batch in range(len(shape) + 1)\n for n_dense in range(len(shape) + 1 - n_batch)))\n def test_bcoo_transpose_ad(self, shape, dtype, n_batch, n_dense):\n n_sparse = len(shape) - n_batch - n_dense\n rng = self.rng()\n sprng = rand_sparse(self.rng())\n\n M = sprng(shape, dtype)\n data, indices = sparse.bcoo_fromdense(M, n_batch=n_batch, n_dense=n_dense)\n\n permutation = np.concatenate([\n rng.permutation(range(n_batch)),\n rng.permutation(range(n_batch, n_batch + n_sparse)),\n rng.permutation(range(n_batch + n_sparse, len(shape)))]).astype(int)\n\n def f_sparse(data):\n return sparse.bcoo_transpose(data, indices, shape=shape, permutation=permutation)[0]\n\n jf_sparse = jax.jacfwd(f_sparse)(data)\n jr_sparse = jax.jacrev(f_sparse)(data)\n\n tol = {}\n if jtu.device_under_test() == \"tpu\":\n tol = {np.float32: 5E-3}\n\n # TODO(jakevdp) also test against dense version?\n self.assertAllClose(jf_sparse, jr_sparse, rtol=tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_nbatch={}_ndense={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), n_batch, n_dense),\n \"shape\": shape, \"dtype\": dtype, \"n_batch\": n_batch, \"n_dense\": n_dense}\n for shape in [(5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]\n for dtype in jtu.dtypes.floating + jtu.dtypes.complex\n for n_batch in range(1, len(shape) + 1)\n for n_dense in range(len(shape) + 1 - n_batch)))\n def test_bcoo_todense_partial_batch(self, shape, dtype, n_batch, n_dense):\n rng = rand_sparse(self.rng())\n M = rng(shape, dtype)\n data, indices = sparse.bcoo_fromdense(M, n_batch=n_batch, n_dense=n_dense)\n\n M1 = sparse.bcoo_todense(data, indices[:1], shape=M.shape)\n M2 = sparse.bcoo_todense(data, jnp.stack(shape[0] * [indices[0]]), shape=M.shape)\n self.assertAllClose(M1, M2)\n\n M3 = sparse.bcoo_todense(data[:1], indices, shape=M.shape)\n M4 = sparse.bcoo_todense(jnp.stack(shape[0] * [data[0]]), indices, shape=M.shape)\n self.assertAllClose(M3, M4)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": props.testcase_name(), \"props\": props}\n for props in _generate_bcoo_dot_general_properties(\n shapes=[(5,), (2, 3), (2, 3, 4), (2, 3, 4, 4)],\n dtypes=jtu.dtypes.floating + jtu.dtypes.complex,\n )))\n def test_bcoo_dot_general(self, props: BcooDotGeneralProperties):\n rng = jtu.rand_small(self.rng())\n rng_sparse = rand_sparse(self.rng())\n\n def args_maker():\n lhs = rng_sparse(props.lhs_shape, props.dtype)\n rhs = rng(props.rhs_shape, props.dtype)\n data, indices = sparse.bcoo_fromdense(lhs, n_batch=props.n_batch, n_dense=props.n_dense)\n return data, indices, lhs, rhs\n\n def f_dense(data, indices, lhs, rhs):\n return lax.dot_general(lhs, rhs, dimension_numbers=props.dimension_numbers)\n\n def f_sparse(data, indices, lhs, rhs):\n return sparse.bcoo_dot_general(data, indices, rhs, lhs_shape=lhs.shape,\n dimension_numbers=props.dimension_numbers)\n\n tol = {'float32': 3E-2} if jtu.device_under_test() == 'tpu' else {}\n self._CheckAgainstNumpy(f_dense, f_sparse, args_maker, tol=tol)\n self._CheckAgainstNumpy(f_dense, jit(f_sparse), args_maker, tol=tol)\n # TODO(jakevdp): In rare cases, this fails python_should_be_executing check. Why?\n # self._CompileAndCheck(f_sparse, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": props.testcase_name(), \"props\": props}\n for props in _generate_bcoo_dot_general_properties(\n shapes=[(5,), (2, 3), (2, 3, 4), (2, 3, 4, 4)],\n dtypes=jtu.dtypes.floating + jtu.dtypes.complex,\n )))\n def test_bcoo_rdot_general(self, props: BcooDotGeneralProperties):\n rng = jtu.rand_small(self.rng())\n rng_sparse = rand_sparse(self.rng())\n\n lhs_shape, rhs_shape = props.rhs_shape, props.lhs_shape\n dimension_numbers = tuple(d[::-1] for d in props.dimension_numbers)\n\n def args_maker():\n lhs = rng_sparse(lhs_shape, props.dtype)\n rhs = rng(rhs_shape, props.dtype)\n data, indices = sparse.bcoo_fromdense(rhs, n_batch=props.n_batch, n_dense=props.n_dense)\n return data, indices, lhs, rhs\n\n def f_dense(data, indices, lhs, rhs):\n return lax.dot_general(lhs, rhs, dimension_numbers=dimension_numbers)\n\n def f_sparse(data, indices, lhs, rhs):\n return sparse.bcoo_rdot_general(lhs, data, indices, rhs_shape=rhs.shape,\n dimension_numbers=dimension_numbers)\n\n tol = {'float32': 3E-2} if jtu.device_under_test() == 'tpu' else {}\n self._CheckAgainstNumpy(f_dense, f_sparse, args_maker, tol=tol)\n self._CheckAgainstNumpy(f_dense, jit(f_sparse), args_maker, tol=tol)\n # TODO(jakevdp): In rare cases, this fails python_should_be_executing check. Why?\n # self._CompileAndCheck(f_sparse, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_dimension_numbers={}_n_batch={}_n_dense={}\"\n .format(jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype),\n dimension_numbers, n_batch, n_dense),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"dimension_numbers\": dimension_numbers,\n \"n_batch\": n_batch, \"n_dense\": n_dense}\n for lhs_shape, rhs_shape, dimension_numbers, n_batch, n_dense in [\n ((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0])), 1, 0),\n ((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0])), 2, 0),\n ((3, 3, 2), (2, 3, 4), (([2], [0]), ([0], [1])), 1, 0),\n ((3, 3, 2), (2, 3, 4), (([2], [0]), ([0], [1])), 2, 0),\n ((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1])), 2, 0),\n ((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1])), 2, 1),\n ]\n for dtype in jtu.dtypes.floating + jtu.dtypes.complex))\n def test_bcoo_dot_general_partial_batch(self, lhs_shape, rhs_shape, dtype,\n dimension_numbers, n_batch, n_dense):\n rng = jtu.rand_small(self.rng())\n rng_sparse = rand_sparse(self.rng())\n\n X = rng_sparse(lhs_shape, dtype)\n data, indices = sparse.bcoo_fromdense(X, n_batch=n_batch, n_dense=n_dense)\n Y = rng(rhs_shape, dtype)\n\n def f_dense(X, Y):\n return lax.dot_general(X, Y, dimension_numbers=dimension_numbers)\n\n def f_sparse(data, indices, Y):\n return sparse.bcoo_dot_general(data, indices, Y, lhs_shape=X.shape,\n dimension_numbers=dimension_numbers)\n\n for data, indices in itertools.product([data, data[:1]], [indices, indices[:1]]):\n X = sparse.bcoo_todense(data, indices, shape=X.shape)\n self.assertAllClose(f_dense(X, Y), f_sparse(data, indices, Y))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_dimension_numbers={}_n_batch={}_n_dense={}\"\n .format(jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype),\n dimension_numbers, n_batch, n_dense),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"dimension_numbers\": dimension_numbers,\n \"n_batch\": n_batch, \"n_dense\": n_dense}\n for lhs_shape, rhs_shape, dimension_numbers, n_batch, n_dense in [\n ((4, 5), (5, 3), (([1], [0]), ([], [])), 0, 0),\n ((2, 4, 5), (2, 5, 3), (([2], [1]), ([0], [0])), 1, 0),\n ((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0])), 1, 0),\n ((3, 3, 2), (2, 3, 4), (([2], [0]), ([0], [1])), 1, 0),\n ((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1])), 2, 0),\n ((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0])), 2, 0),\n ((3, 3, 2), (2, 3, 4), (([2], [0]), ([0], [1])), 2, 0),\n # This requires contraction over dense dimensions, which is not yet implemented:\n # ((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1])), 2, 1),\n ]\n for dtype in jtu.dtypes.floating))\n def test_bcoo_dot_general_ad(self, lhs_shape, rhs_shape, dtype,\n dimension_numbers, n_batch, n_dense):\n rng = jtu.rand_small(self.rng())\n rng_sparse = rand_sparse(self.rng())\n\n X = rng_sparse(lhs_shape, dtype)\n data, indices = sparse.bcoo_fromdense(X, n_batch=n_batch, n_dense=n_dense)\n Y = rng(rhs_shape, dtype)\n\n # gradient with respect to rhs\n def f_dense(Y):\n return lax.dot_general(X, Y, dimension_numbers=dimension_numbers)\n\n def f_sparse(Y):\n return sparse.bcoo_dot_general(data, indices, Y, lhs_shape=X.shape,\n dimension_numbers=dimension_numbers)\n\n jf_dense = jax.jacfwd(f_dense)(Y)\n jr_dense = jax.jacrev(f_dense)(Y)\n jf_sparse = jax.jacfwd(f_sparse)(Y)\n jr_sparse = jax.jacrev(f_sparse)(Y)\n\n tol = {}\n if jtu.device_under_test() == \"tpu\":\n tol = {np.float32: 5E-3}\n\n self.assertAllClose(jf_dense, jf_sparse, rtol=tol)\n self.assertAllClose(jr_dense, jr_sparse, rtol=tol)\n self.assertAllClose(jf_sparse, jr_sparse, rtol=tol)\n\n # gradient with respect to lhs\n def g_dense(X):\n return lax.dot_general(X, Y, dimension_numbers=dimension_numbers)\n\n def g_sparse(data):\n return sparse.bcoo_dot_general(data, indices, Y, lhs_shape=X.shape,\n dimension_numbers=dimension_numbers)\n\n jf_dense = jax.jacfwd(g_dense)(X)\n jr_dense = jax.jacrev(g_dense)(X)\n jf_sparse = jax.jacfwd(g_sparse)(data)\n jr_sparse = jax.jacrev(g_sparse)(data)\n\n tol = {}\n if jtu.device_under_test() == \"tpu\":\n tol = {np.float32: 5E-3}\n\n self.assertAllClose(jf_dense, jr_dense, rtol=tol)\n self.assertAllClose(jf_sparse, jr_sparse, rtol=tol)\n\n # Extract the sparse jacobian from the dense & compare.\n def extract(X):\n return sparse.bcoo_extract(indices, X)\n for i in range(g_dense(X).ndim):\n extract = jax.vmap(extract)\n self.assertAllClose(extract(jf_dense), jf_sparse, rtol=tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_dimension_numbers={}_n_batch={}_n_dense={}\"\n .format(jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype),\n dimension_numbers, n_batch, n_dense),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"dimension_numbers\": dimension_numbers,\n \"n_batch\": n_batch, \"n_dense\": n_dense}\n for lhs_shape, rhs_shape, dimension_numbers, n_batch, n_dense in [\n ((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0])), 0, 0),\n ((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0])), 1, 0),\n ((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0])), 0, 1),\n ((3, 3, 2), (2, 3, 4), (([2], [0]), ([0], [1])), 0, 0),\n ((3, 3, 2), (2, 3, 4), (([2], [0]), ([0], [1])), 1, 1),\n ((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1])), 0, 0),\n ((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1])), 1, 2),\n ((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1])), 2, 1),\n ]\n for dtype in jtu.dtypes.floating + jtu.dtypes.complex))\n def test_bcoo_dot_general_sampled(self, lhs_shape, rhs_shape, dtype, dimension_numbers, n_batch, n_dense):\n rng = jtu.rand_default(self.rng())\n sprng = rand_sparse(self.rng())\n out_shape = lax.dot_general(\n jnp.zeros(lhs_shape), jnp.zeros(rhs_shape),\n dimension_numbers=dimension_numbers).shape\n\n args_maker = lambda: [\n rng(lhs_shape, dtype), rng(rhs_shape, dtype),\n sparse.BCOO.fromdense(sprng(out_shape, dtype),\n n_batch=n_batch, n_dense=n_dense).indices]\n\n def dense_fun(lhs, rhs, indices):\n AB = lax.dot_general(lhs, rhs, dimension_numbers=dimension_numbers)\n return sparse.bcoo_extract(indices, AB)\n def sparse_fun(lhs, rhs, indices):\n return sparse.bcoo_dot_general_sampled(\n lhs, rhs, indices, dimension_numbers=dimension_numbers)\n\n tol = {}\n if jtu.device_under_test() == \"tpu\":\n tol = {np.float32: 5E-3}\n\n self._CheckAgainstNumpy(dense_fun, sparse_fun, args_maker, tol=tol)\n # TODO: python_should_be_executing check occasionally fails... why?\n # self._CompileAndCheck(sparse_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_dimension_numbers={}_n_batch={}_n_dense={}\"\n .format(jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype),\n dimension_numbers, n_batch, n_dense),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"dimension_numbers\": dimension_numbers,\n \"n_batch\": n_batch, \"n_dense\": n_dense}\n for lhs_shape, rhs_shape, dimension_numbers, n_batch, n_dense in [\n ((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0])), 1, 0),\n ((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0])), 1, 1),\n ((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0])), 2, 0),\n ((3, 3, 2), (2, 3, 4), (([2], [0]), ([0], [1])), 1, 0),\n ((3, 3, 2), (2, 3, 4), (([2], [0]), ([0], [1])), 1, 1),\n ((3, 3, 2), (2, 3, 4), (([2], [0]), ([0], [1])), 2, 0),\n ((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1])), 2, 0),\n ((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1])), 2, 1),\n ]\n for dtype in jtu.dtypes.floating))\n def test_bcoo_dot_general_sampled_ad(self, lhs_shape, rhs_shape, dtype, dimension_numbers, n_batch, n_dense):\n rng = jtu.rand_default(self.rng())\n sprng = rand_sparse(self.rng())\n out_shape = lax.dot_general(\n jnp.zeros(lhs_shape), jnp.zeros(rhs_shape),\n dimension_numbers=dimension_numbers).shape\n\n lhs = rng(lhs_shape, dtype)\n rhs = rng(rhs_shape, dtype)\n indices = sparse.BCOO.fromdense(sprng(out_shape, dtype),\n n_batch=n_batch, n_dense=n_dense).indices\n\n def dense_fun(lhs, rhs, indices):\n AB = lax.dot_general(lhs, rhs, dimension_numbers=dimension_numbers)\n return sparse.bcoo_extract(indices, AB)\n def sparse_fun(lhs, rhs, indices):\n return sparse.bcoo_dot_general_sampled(\n lhs, rhs, indices, dimension_numbers=dimension_numbers)\n\n jf_dense = jax.jacfwd(dense_fun)(lhs, rhs, indices)\n jf_sparse = jax.jacfwd(sparse_fun)(lhs, rhs, indices)\n jr_dense = jax.jacrev(dense_fun)(lhs, rhs, indices)\n jr_sparse = jax.jacrev(sparse_fun)(lhs, rhs, indices)\n\n tol = {}\n if jtu.device_under_test() == \"tpu\":\n tol = {np.float32: 5E-3}\n\n self.assertAllClose(jf_sparse, jf_dense, atol=tol)\n self.assertAllClose(jr_sparse, jr_dense, atol=tol)\n self.assertAllClose(jf_sparse, jr_sparse, atol=tol)\n\n @unittest.skipIf(jtu.device_under_test() == \"tpu\", \"TPU has insufficient precision\")\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}[n_batch={}]_{}[n_batch={}]_swap={}_dims={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype), lhs_n_batch,\n jtu.format_shape_dtype_string(rhs_shape, dtype), rhs_n_batch,\n swap, dimension_numbers),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape,\n \"lhs_n_batch\": lhs_n_batch, \"rhs_n_batch\": rhs_n_batch,\n \"dimension_numbers\": dimension_numbers, \"swap\": swap, \"dtype\": dtype}\n for lhs_shape, lhs_n_batch, rhs_shape, rhs_n_batch, dimension_numbers in [\n # (batched) outer products (no contraction)\n ((5,), 0, (6,), 0, (([], []), ([], []))),\n ((3, 5), 0, (2, 4), 0, (([], []), ([], []))),\n ((3, 5), 1, (3, 4), 1, (([], []), ([0], [0]))),\n # (batched) vector-vector products\n ((5,), 0, (5,), 0, (([0], [0]), ([], []))),\n ((7,), 0, (7,), 0, (([0], [0]), ([], []))),\n ((5, 7), 1, (7,), 0, (([1], [0]), ([], []))),\n ((2, 3, 4), 2, (2, 4), 1, (([2], [1]), ([0], [0]))),\n ((2, 3, 4), 2, (2, 4), 1, (([2], [1]), ([], []))),\n ((2, 3, 4), 2, (3, 4), 1, (([2], [1]), ([1], [0]))),\n ((2, 3, 4), 2, (3, 4), 1, (([2], [1]), ([], []))),\n # (batched) matrix-vector products\n ((5, 7), 0, (7,), 0, (([1], [0]), ([], []))),\n ((2, 3, 4), 1, (4,), 0, (([2], [0]), ([], []))),\n ((2, 3, 4), 1, (2, 4), 1, (([2], [1]), ([0], [0]))),\n ((3, 2, 4), 1, (3, 4), 1, (([2], [1]), ([0], [0]))),\n ((2, 3, 4), 0, (2,), 0, (([0], [0]), ([], []))),\n # (batched) matrix-matrix products\n ((5, 7), 0, (7, 3), 0, (([1], [0]), ([], []))),\n ((2, 3, 4), 1, (4, 3), 0, (([2], [0]), ([], []))),\n ((2, 3, 4), 1, (2, 4, 3), 1, (([2], [1]), ([0], [0]))),\n # more general operations\n ((2, 3, 4, 3), 1, (2, 4, 3, 4), 1, (([2, 3], [1, 2]), ([0], [0]))),\n ((2, 3, 4, 3, 1), 2, (3, 2, 3, 4), 2, (([2, 3], [3, 2]), ([0, 1], [1, 0]))),\n ]\n for swap in [True, False]\n for dtype in jtu.dtypes.floating + jtu.dtypes.complex))\n def test_bcoo_spdot_general(self, lhs_shape, lhs_n_batch, rhs_shape, rhs_n_batch, dtype, swap, dimension_numbers):\n if swap:\n dimension_numbers = tuple(d[::-1] for d in dimension_numbers)\n lhs_shape, rhs_shape = rhs_shape, lhs_shape\n lhs_n_batch, rhs_n_batch = rhs_n_batch, lhs_n_batch\n\n lhs_n_sparse = len(lhs_shape) - lhs_n_batch\n rhs_batch = dimension_numbers[1][1]\n lhs_contracting = dimension_numbers[0][0]\n should_error = (rhs_n_batch > len(rhs_batch) and lhs_n_sparse > len(lhs_contracting))\n\n sprng = rand_sparse(self.rng())\n def args_maker():\n x = sprng(lhs_shape, dtype)\n y = sprng(rhs_shape, dtype)\n xsp = sparse.BCOO.fromdense(x, n_batch=lhs_n_batch)\n ysp = sparse.BCOO.fromdense(y, n_batch=rhs_n_batch)\n return x, y, xsp, ysp\n\n def f_dense(x, y, xsp, ysp):\n return lax.dot_general(x, y, dimension_numbers=dimension_numbers)\n\n def f_sparse(x, y, xsp, ysp):\n shape = sparse.bcoo._dot_general_validated_shape(xsp.shape, ysp.shape, dimension_numbers)\n data, indices = sparse.bcoo_spdot_general(xsp.data, xsp.indices, ysp.data, ysp.indices,\n lhs_shape=x.shape, rhs_shape=y.shape,\n dimension_numbers=dimension_numbers)\n return sparse.bcoo_todense(data, indices, shape=shape)\n\n tol = {\"complex128\": 1E-14}\n if should_error:\n with self.assertRaisesRegex(ValueError, \".*cannot have unused batch dims on rhs with unused sparse dims on lhs.\"):\n f_sparse(*args_maker())\n else:\n self._CheckAgainstNumpy(f_dense, f_sparse, args_maker, tol=tol)\n self._CheckAgainstNumpy(jit(f_dense), jit(f_sparse), args_maker, tol=tol)\n # TODO(jakevdp): This occasionally fails python_should_be_executing check. Why?\n # self._CompileAndCheck(f_sparse, args_maker)\n\n def test_bcoo_spdot_general_nse(self):\n # vector-vector product -> nse=1\n x = sparse.BCOO.fromdense(jnp.arange(3))\n self.assertEqual((x @ x).nse, 1)\n\n # matrix-vector product -> nse matches matrix\n M = sparse.BCOO.fromdense(jnp.arange(6).reshape(2, 3))\n self.assertEqual((M @ x).nse, M.nse)\n\n # matrix-matrix product -> product of nse\n N = sparse.BCOO.fromdense(jnp.arange(12).reshape(3, 4))\n self.assertEqual((M @ N).nse, M.nse * N.nse)\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}[n_batch={}]_rhs_shape={}[n_batch={}]_dimension_numbers={}\"\n .format(jtu.format_shape_dtype_string(lhs_shape, dtype), lhs_n_batch,\n jtu.format_shape_dtype_string(rhs_shape, dtype), rhs_n_batch,\n dimension_numbers),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"dimension_numbers\": dimension_numbers,\n \"lhs_n_batch\": lhs_n_batch, \"rhs_n_batch\": rhs_n_batch}\n for lhs_shape, lhs_n_batch, rhs_shape, rhs_n_batch, dimension_numbers in [\n ((4, 5), 0, (5,), 0, (([1], [0]), ([], []))),\n ((2, 4, 5), 1, (5,), 0, (([2], [0]), ([], []))),\n ((4, 5), 0, (5, 3), 0, (([1], [0]), ([], []))),\n ((2, 4, 5), 1, (2, 5, 3), 1, (([2], [1]), ([0], [0]))),\n ]\n for dtype in jtu.dtypes.floating))\n def test_bcoo_spdot_general_ad(self, lhs_shape, rhs_shape, dtype,\n dimension_numbers, lhs_n_batch, rhs_n_batch):\n rng = rand_sparse(self.rng())\n\n lhs = rng(lhs_shape, dtype)\n rhs = rng(rhs_shape, dtype)\n\n lhs_sp = sparse.BCOO.fromdense(lhs, n_batch=lhs_n_batch)\n rhs_sp = sparse.BCOO.fromdense(rhs, n_batch=rhs_n_batch)\n\n def f_dense(lhs_data, rhs_data):\n lhs = sparse.BCOO((lhs_data, lhs_sp.indices), shape=lhs_sp.shape).todense()\n rhs = sparse.BCOO((rhs_data, rhs_sp.indices), shape=rhs_sp.shape).todense()\n return (lhs @ rhs).sum()\n\n def f_sparse(lhs_data, rhs_data):\n lhs = sparse.BCOO((lhs_data, lhs_sp.indices), shape=lhs_sp.shape)\n rhs = sparse.BCOO((rhs_data, rhs_sp.indices), shape=rhs_sp.shape)\n return (lhs @ rhs).sum()\n\n tol = {}\n if jtu.device_under_test() == \"tpu\":\n tol = {np.float32: 5E-2}\n\n jf_dense_0 = jax.jacfwd(f_dense, argnums=0)(lhs_sp.data, rhs_sp.data)\n jf_sparse_0 = jax.jacfwd(f_sparse, argnums=0)(lhs_sp.data, rhs_sp.data)\n self.assertAllClose(jf_dense_0, jf_sparse_0, rtol=tol)\n\n jf_dense_1 = jax.jacfwd(f_dense, argnums=1)(lhs_sp.data, rhs_sp.data)\n jf_sparse_1 = jax.jacfwd(f_sparse, argnums=1)(lhs_sp.data, rhs_sp.data)\n self.assertAllClose(jf_dense_1, jf_sparse_1, rtol=tol)\n\n jf_dense_0, jf_dense_1 = jax.jacfwd(f_dense, argnums=(0, 1))(lhs_sp.data, rhs_sp.data)\n jf_sparse_0, jf_sparse_1 = jax.jacfwd(f_sparse, argnums=(0, 1))(lhs_sp.data, rhs_sp.data)\n self.assertAllClose(jf_dense_0, jf_sparse_0, rtol=tol)\n self.assertAllClose(jf_dense_1, jf_sparse_1, rtol=tol)\n\n @unittest.skipIf(jtu.device_under_test() == \"tpu\", \"TPU has insufficient precision\")\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}[n_batch={}]_{}[n_batch={}]_in_axes={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype), lhs_n_batch,\n jtu.format_shape_dtype_string(rhs_shape, dtype), rhs_n_batch,\n in_axes),\n \"lhs_shape\": lhs_shape, \"lhs_n_batch\": lhs_n_batch,\n \"rhs_shape\": rhs_shape, \"rhs_n_batch\": rhs_n_batch,\n \"dtype\": dtype, \"in_axes\": in_axes}\n for lhs_shape, lhs_n_batch, rhs_shape, rhs_n_batch, in_axes in [\n ((3, 5), 1, (3, 5), 1, 0),\n ((3, 4, 5), 1, (3, 5), 1, 0),\n ((3, 4, 5), 2, (3, 5), 1, 0),\n # TODO(jakevdp): test these once unequal batches are implemented\n # ((4, 5), 1, (5,), 0, (0, None)),\n # ((3, 4, 5), 1, (5,), 0, (0, None)),\n # ((4, 5), 0, (3, 5), 1, (None, 0)),\n ]\n for dtype in jtu.dtypes.floating + jtu.dtypes.complex))\n def test_bcoo_spmm_batched(self, lhs_shape, lhs_n_batch, rhs_shape, rhs_n_batch, dtype, in_axes):\n sprng = rand_sparse(self.rng())\n def args_maker():\n x = sprng(lhs_shape, dtype)\n y = sprng(rhs_shape, dtype)\n xsp = sparse.BCOO.fromdense(x, n_batch=lhs_n_batch)\n ysp = sparse.BCOO.fromdense(y, n_batch=rhs_n_batch)\n return x, y, xsp, ysp\n\n def f_dense(x, y, _, __):\n return jax.vmap(operator.matmul, in_axes=in_axes)(x, y)\n def f_sparse(_, __, x, y):\n return jax.vmap(operator.matmul, in_axes=in_axes)(x, y)\n\n args = args_maker()\n result_dense = f_dense(*args)\n result_sparse = f_sparse(*args)\n self.assertAllClose(result_dense, result_sparse.todense())\n result_sparse_jit = jax.jit(f_sparse)(*args)\n self.assertAllClose(result_dense, result_sparse_jit.todense())\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_nbatch={}_ndense={}_nse={}_remove_zeros={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), n_batch, n_dense, nse, remove_zeros),\n \"shape\": shape, \"dtype\": dtype, \"n_batch\": n_batch, \"n_dense\": n_dense,\n \"nse\": nse, \"remove_zeros\": remove_zeros}\n for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]\n for dtype in jtu.dtypes.floating + jtu.dtypes.complex\n for n_batch in range(len(shape) + 1)\n for n_dense in range(len(shape) + 1 - n_batch)\n for nse in [None, np.prod(shape) - 1]\n for remove_zeros in [True, False]))\n def test_bcoo_sum_duplicates(self, shape, dtype, n_batch, n_dense, nse, remove_zeros):\n rng = self.rng()\n rng_sparse = rand_sparse(self.rng(), rand_method=jtu.rand_some_zero)\n M = sparse.BCOO.fromdense(rng_sparse(shape, dtype), n_batch=n_batch, n_dense=n_dense)\n for i, s in enumerate(shape[n_batch:len(shape) - n_dense]):\n M.indices = M.indices.at[..., i].set(rng.randint(0, s, size=M.nse))\n dedupe = partial(M.sum_duplicates, nse=nse, remove_zeros=remove_zeros)\n jit_dedupe = jax.jit(dedupe)\n\n M_dedup = dedupe()\n self.assertAllClose(M.todense(), M_dedup.todense())\n if nse:\n self.assertEqual(M_dedup.nse, nse)\n\n if not nse:\n with self.assertRaisesRegex(ValueError, \".*nse argument\"):\n jit_dedupe()\n else:\n M_dedup = jit_dedupe()\n self.assertAllClose(M.todense(), M_dedup.todense())\n self.assertEqual(M_dedup.nse, nse)\n\n def test_bcoo_sum_duplicates_inferred_nse(self):\n x = sparse.BCOO.fromdense(jnp.diag(jnp.arange(4)))\n self.assertEqual(x.nse, 3)\n y = x + x.T\n self.assertEqual(y.nse, 6)\n y2 = y.sum_duplicates()\n self.assertEqual(y2.nse, 3)\n self.assertArraysEqual(y.todense(), y2.todense())\n\n def test_bcoo_sum_duplicates_remove_zeros(self):\n data = jnp.array([0, 1, 0, 0])\n indices = jnp.array([[0], [1], [2], [3]])\n x = sparse.BCOO((data, indices), shape=(4,))\n self.assertEqual(x.nse, 4)\n\n y1 = x.sum_duplicates(remove_zeros=True)\n self.assertArraysEqual(x.todense(), y1.todense())\n self.assertEqual(y1.nse, 1)\n\n y2 = x.sum_duplicates(remove_zeros=False)\n self.assertArraysEqual(x.todense(), y2.todense())\n self.assertEqual(y2.nse, x.nse)\n\n def test_bcoo_sum_duplicates_padding(self):\n # Regression test for https://github.com/google/jax/issues/8163\n size = 3\n data = jnp.array([1, 0, 0])\n indices = jnp.array([1, size, size])[:, None]\n x = sparse.BCOO((data, indices), shape=(3,))\n y = x.sum_duplicates(nse=x.nse)\n self.assertArraysEqual(x.todense(), y.todense())\n self.assertArraysEqual(x.indices, y.indices)\n self.assertArraysEqual(x.data, y.data)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_nbatch={}_ndense={}_axes={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), n_batch, n_dense, axes),\n \"shape\": shape, \"dtype\": dtype, \"n_batch\": n_batch, \"n_dense\": n_dense, \"axes\": axes}\n for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]\n for dtype in jtu.dtypes.floating + jtu.dtypes.complex\n for n_batch in range(len(shape) + 1)\n for n_dense in range(len(shape) + 1 - n_batch)\n for naxes in range(len(shape))\n for axes in itertools.combinations(range(len(shape)), naxes)))\n def test_bcoo_reduce_sum(self, shape, dtype, n_batch, n_dense, axes):\n rng = rand_sparse(self.rng())\n M = rng(shape, dtype)\n data, indices = sparse.bcoo_fromdense(M, n_batch=n_batch, n_dense=n_dense)\n data_out, indices_out, shape_out = sparse.bcoo_reduce_sum(data, indices, shape=shape, axes=axes)\n result_dense = M.sum(axes)\n result_sparse = sparse.bcoo_todense(data_out, indices_out, shape=shape_out)\n tol = {np.float32: 1E-6, np.float64: 1E-14}\n self.assertAllClose(result_dense, result_sparse, atol=tol, rtol=tol)\n\n @unittest.skipIf(jtu.device_under_test() == \"tpu\", \"TPU has insufficient precision\")\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}\".format(\n jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),\n jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),\n \"lhs_shape\": lhs_shape, \"lhs_dtype\": lhs_dtype,\n \"rhs_shape\": rhs_shape, \"rhs_dtype\": rhs_dtype,\n }\n for lhs_shape, rhs_shape in [[(3,), (3,)],\n [(3, 4), (4,)],\n [(4,), (4, 5)],\n [(3, 4), (4, 5)],\n [(3, 4), (2, 4, 5)],\n [(2, 3, 4), (4, 5)],\n [(2, 3, 4), (2, 4, 5)]]\n for lhs_dtype in all_dtypes\n for rhs_dtype in all_dtypes))\n def test_bcoo_matmul(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype):\n rng = jtu.rand_default(self.rng())\n lhs = jnp.array(rng(lhs_shape, lhs_dtype))\n rhs = jnp.array(rng(rhs_shape, rhs_dtype))\n\n # Note: currently, batch dimensions in matmul must correspond to batch\n # dimensions in the sparse representation.\n lhs_sp = sparse.BCOO.fromdense(lhs, n_batch=max(0, len(lhs_shape) - 2))\n rhs_sp = sparse.BCOO.fromdense(rhs, n_batch=max(0, len(rhs_shape) - 2))\n\n out1 = lhs @ rhs\n out2 = lhs_sp @ rhs\n out3 = lhs @ rhs_sp\n\n tol = {np.float64: 1E-13, np.complex128: 1E-13,\n np.float32: 1E-6, np.complex64: 1E-6}\n self.assertAllClose(out1, out2, rtol=tol)\n self.assertAllClose(out1, out3, rtol=tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}_n_batch={}_n_dense={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),\n jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),\n n_batch, n_dense),\n \"lhs_shape\": lhs_shape, \"lhs_dtype\": lhs_dtype,\n \"rhs_shape\": rhs_shape, \"rhs_dtype\": rhs_dtype,\n \"n_batch\": n_batch, \"n_dense\": n_dense,\n }\n for lhs_shape, rhs_shape in [[(3,), ()], [(3,), (1,)], [(3,), (3,)],\n [(3, 4), ()], [(3, 4), (4,)], [(3, 4), (3, 1)], [(3, 4), (3, 4)],\n [(3, 4, 5), (4, 5)], [(3, 4, 5), (3, 1, 1)], [(3, 4, 5), (1, 4, 1)]]\n for n_batch in range(len(lhs_shape) + 1)\n for n_dense in range(len(lhs_shape) + 1 - n_batch)\n for lhs_dtype in all_dtypes\n for rhs_dtype in all_dtypes))\n def test_bcoo_mul_dense(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, n_batch, n_dense):\n rng_lhs = rand_sparse(self.rng())\n rng_rhs = jtu.rand_default(self.rng())\n lhs = jnp.array(rng_lhs(lhs_shape, lhs_dtype))\n rhs = jnp.array(rng_rhs(rhs_shape, rhs_dtype))\n\n sp = lambda x: sparse.BCOO.fromdense(x, n_batch=n_batch, n_dense=n_dense)\n\n out1 = lhs * rhs\n out2 = (sp(lhs) * rhs).todense()\n out3 = (rhs * sp(lhs)).todense()\n\n tol = {np.float64: 1E-13, np.complex128: 1E-13,\n np.float32: 1E-6, np.complex64: 1E-6}\n self.assertAllClose(out1, out2, rtol=tol)\n self.assertAllClose(out1, out3, rtol=tol)\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}_n_batch={}_n_dense={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),\n jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),\n n_batch, n_dense),\n \"lhs_shape\": lhs_shape, \"lhs_dtype\": lhs_dtype,\n \"rhs_shape\": rhs_shape, \"rhs_dtype\": rhs_dtype,\n \"n_batch\": n_batch, \"n_dense\": n_dense,\n }\n # TODO(jakevdp): add broadcasted shapes (from bcoo_mul_dense) once sparse-sparse mul\n # supports inputs of differing rank.\n for lhs_shape, rhs_shape in [[(3,), (1,)], [(3,), (3,)],\n [(3, 4), (1, 1)], [(3, 4), (1, 4)], [(3, 4), (3, 1)], [(3, 4), (3, 4)],\n [(3, 4, 5), (1, 4, 5)], [(3, 4, 5), (3, 1, 1)], [(3, 4, 5), (1, 4, 1)]]\n # TODO(jakevdp): add tests for batch & dense dimensions.\n for n_batch in range(len(lhs_shape) + 1)\n for n_dense in range(len(lhs_shape) + 1 - n_batch)\n for lhs_dtype in all_dtypes\n for rhs_dtype in all_dtypes))\n def test_bcoo_mul_sparse(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, n_batch, n_dense):\n rng = rand_sparse(self.rng())\n lhs = jnp.array(rng(lhs_shape, lhs_dtype))\n rhs = jnp.array(rng(rhs_shape, rhs_dtype))\n\n sp = lambda x: sparse.BCOO.fromdense(x, n_batch=n_batch, n_dense=n_dense)\n\n out1 = lhs * rhs\n out2 = (sp(lhs) * sp(rhs)).todense()\n\n tol = {np.float64: 1E-13, np.complex128: 1E-13,\n np.float32: 1E-6, np.complex64: 1E-6}\n self.assertAllClose(out1, out2, rtol=tol)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_n_batch={}_n_dense={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), n_batch, n_dense),\n \"shape\": shape, \"dtype\": dtype, \"n_batch\": n_batch, \"n_dense\": n_dense}\n for shape in [(), (3,), (3, 5), (3, 5, 4)]\n for dtype in all_dtypes\n for n_batch in range(len(shape) + 1)\n for n_dense in range(len(shape) + 1 - n_batch)))\n def test_bcoo_broadcast_in_dim(self, shape, dtype, n_batch, n_dense):\n rng = rand_sparse(self.rng())\n x = jnp.array(rng(shape, dtype))\n xsp = sparse.BCOO.fromdense(x, n_batch=n_batch, n_dense=n_dense)\n\n self.assertArraysEqual(xsp[None].todense(), x[None])\n if len(shape) >= 1:\n self.assertArraysEqual(xsp[:, None].todense(), x[:, None])\n self.assertArraysEqual(xsp[:, None, None].todense(), x[:, None, None])\n if len(shape) >= 2:\n self.assertArraysEqual(xsp[:, :, None].todense(), x[:, :, None])\n self.assertArraysEqual(xsp[:, None, :, None].todense(), x[:, None, :, None])\n\n def test_bcoo_vmap_shape(self, shape=(2, 3, 4, 5), dtype=np.float32):\n # This test checks that BCOO shape metadata interacts correctly with vmap.\n rng = rand_sparse(self.rng())\n M = rng(shape, dtype)\n\n def make_bcoo(M):\n return sparse.BCOO.fromdense(M, nse=np.prod(M.shape[:-1], dtype=int), n_dense=1)\n\n for _ in range(3):\n make_bcoo = jax.vmap(make_bcoo)\n Msp = make_bcoo(M)\n self.assertEqual(Msp.shape, M.shape)\n self.assertArraysEqual(Msp.todense(), M)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_nbatch={}_ndense={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), n_batch, n_dense),\n \"shape\": shape, \"dtype\": dtype, \"n_batch\": n_batch, \"n_dense\": n_dense}\n for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]\n for dtype in jtu.dtypes.floating + jtu.dtypes.complex\n for n_batch in range(len(shape) + 1)\n for n_dense in range(len(shape) + 1 - n_batch)))\n def test_bcoo_unbatch(self, shape, dtype, n_batch, n_dense):\n rng_sparse = rand_sparse(self.rng())\n M1 = sparse.BCOO.fromdense(rng_sparse(shape, dtype), n_batch=n_batch, n_dense=n_dense)\n M2 = M1._unbatch()\n self.assertEqual(M2.n_batch, 0)\n self.assertEqual(M1.n_dense, M2.n_dense)\n self.assertEqual(M1.shape, M2.shape)\n self.assertEqual(M1.dtype, M2.dtype)\n self.assertArraysEqual(M1.todense(), M2.todense())\n\n def test_bcoo_bad_fillvals(self):\n # Extra values have 100 rather than zero. This lets us check that logic is\n # properly ignoring these indices.\n data = jnp.array([1, 2, 3, 100, 100])\n indices = jnp.array([1, 2, 3, 5, 5])[:, None]\n x_sp = sparse.BCOO((data, indices), shape=(5,))\n x_de = x_sp.todense()\n\n data = jnp.array([3, 2, 100, 100])\n indices = jnp.array([2, 3, 5, 5])[:, None]\n y_sp = sparse.BCOO((data, indices), shape=(5,))\n y_de = y_sp.todense()\n\n self.assertArraysEqual(x_de, jnp.array([0, 1, 2, 3, 0]))\n self.assertArraysEqual(y_de, jnp.array([0, 0, 3, 2, 0]))\n\n self.assertArraysEqual(x_sp.sum_duplicates().todense(), x_de)\n self.assertArraysEqual(y_sp.sum_duplicates().todense(), y_de)\n\n # reduce_sum:\n self.assertArraysEqual(x_sp.sum(), x_de.sum())\n\n # bcoo_dot_general\n self.assertArraysEqual(x_sp @ y_de, x_de @ y_de)\n\n # bcoo_spdot_general\n self.assertArraysEqual((x_sp @ y_sp).todense(), x_de @ y_de)\n self.assertArraysEqual((y_sp @ x_sp).todense(), y_de @ x_de)\n\n\nclass SparseGradTest(jtu.JaxTestCase):\n def test_sparse_grad(self):\n rng_sparse = rand_sparse(self.rng())\n rng = jtu.rand_default(self.rng())\n\n y = rng(5, \"float32\")\n X = rng_sparse((10, 5), \"float32\")\n Xsp = sparse.BCOO.fromdense(X)\n\n def f(X, y):\n return jnp.sum(X @ y)\n\n grad_dense = jax.grad(f, argnums=0)(X, y)\n grad_sparse = sparse.grad(f, argnums=0)(Xsp, y)\n\n # extract sparse gradient from dense gradient\n indices = tuple(Xsp.indices.T)\n grad_sparse_from_dense = jnp.zeros_like(grad_dense).at[indices].set(grad_dense[indices])\n\n self.assertArraysEqual(grad_sparse.todense(), grad_sparse_from_dense)\n\n\nclass SparseObjectTest(jtu.JaxTestCase):\n def test_repr(self):\n M = sparse.BCOO.fromdense(jnp.arange(5, dtype='float32'))\n self.assertEqual(repr(M), \"BCOO(float32[5], nse=4)\")\n\n M_invalid = sparse.BCOO(([], []), shape=(100,))\n self.assertEqual(repr(M_invalid), \"BCOO(<invalid>)\")\n\n @parameterized.named_parameters(\n {\"testcase_name\": \"_{}{}\".format(cls.__name__, shape), \"cls\": cls, \"shape\": shape}\n for cls in [sparse.CSR, sparse.CSC, sparse.COO, sparse.BCOO]\n for shape in ([2, 5], [5, 3]))\n def test_empty(self, cls, shape):\n sparse_format = cls.__name__.lower()\n M = sparse.empty(shape, sparse_format=sparse_format)\n self.assertIsInstance(M, cls)\n self.assertEqual(M.nse, 0)\n self.assertArraysEqual(M.todense(), jnp.empty(shape))\n\n @parameterized.named_parameters(\n {\"testcase_name\": \"_{}\".format(Obj.__name__), \"Obj\": Obj}\n for Obj in [sparse.CSR, sparse.CSC, sparse.COO, sparse.BCOO])\n def test_block_until_ready(self, Obj, shape=(5, 8), dtype=np.float32):\n rng = rand_sparse(self.rng(), post=Obj.fromdense)\n M = rng(shape, dtype)\n self.assertEqual(M.shape, M.block_until_ready().shape)\n self.assertArraysEqual(M.data, M.block_until_ready().data)\n self.assertArraysEqual(M.todense(), M.block_until_ready().todense())\n\n @parameterized.named_parameters(\n {\"testcase_name\": \"_{}\".format(Obj.__name__), \"Obj\": Obj}\n for Obj in [jnp.array, sparse.CSR, sparse.CSC, sparse.COO, sparse.BCOO])\n def test_todense(self, Obj, shape=(5, 8), dtype=np.float32):\n rng = rand_sparse(self.rng())\n M_dense = rng(shape, dtype)\n M = jnp.array(M_dense) if Obj is jnp.array else Obj.fromdense(M_dense)\n self.assertArraysEqual(sparse.todense(M), M_dense)\n self.assertArraysEqual(jit(sparse.todense)(M), M_dense)\n\n @parameterized.named_parameters(\n {\"testcase_name\": \"_{}\".format(Obj.__name__), \"Obj\": Obj}\n for Obj in [jnp.array, sparse.BCOO])\n def test_todense_batching(self, Obj, shape=(5, 8), dtype=np.float32):\n rng = rand_sparse(self.rng())\n M_dense = rng(shape, dtype)\n if Obj is sparse.BCOO:\n M = sparse.BCOO.fromdense(M_dense, n_batch=1)\n else:\n M = jnp.asarray(M_dense)\n self.assertArraysEqual(vmap(sparse.todense)(M), M_dense)\n self.assertArraysEqual(jit(vmap(sparse.todense))(M), M_dense)\n\n @parameterized.named_parameters(\n {\"testcase_name\": \"_{}\".format(Obj.__name__), \"Obj\": Obj}\n for Obj in [jnp.array, sparse.BCOO])\n def test_todense_ad(self, Obj, shape=(3,), dtype=np.float32):\n M_dense = jnp.array([1., 2., 3.])\n M = M_dense if Obj is jnp.array else Obj.fromdense(M_dense)\n bufs, tree = tree_util.tree_flatten(M)\n jac = jnp.eye(M.shape[0], dtype=M.dtype)\n jac1 = jax.jacfwd(lambda *bufs: sparse.todense_p.bind(*bufs, tree=tree))(*bufs)\n jac2 = jax.jacrev(lambda *bufs: sparse.todense_p.bind(*bufs, tree=tree))(*bufs)\n self.assertArraysEqual(jac1, jac2)\n self.assertArraysEqual(jac, jac2)\n\n @parameterized.named_parameters(\n {\"testcase_name\": \"_{}\".format(Obj.__name__), \"Obj\": Obj}\n for Obj in [sparse.CSR, sparse.CSC, sparse.COO, sparse.BCOO])\n def test_attrs(self, Obj, shape=(5, 8), dtype=np.float16):\n rng = rand_sparse(self.rng(), post=Obj.fromdense)\n M = rng(shape, dtype)\n\n assert isinstance(M, Obj)\n assert M.shape == shape\n assert M.dtype == dtype\n assert M.nse == (M.todense() != 0).sum()\n assert M.data.dtype == dtype\n\n if isinstance(M, sparse.CSR):\n assert len(M.data) == len(M.indices)\n assert len(M.indptr) == M.shape[0] + 1\n elif isinstance(M, sparse.CSC):\n assert len(M.data) == len(M.indices)\n assert len(M.indptr) == M.shape[1] + 1\n elif isinstance(M, sparse.COO):\n assert len(M.data) == len(M.row) == len(M.col)\n elif isinstance(M, sparse.BCOO):\n assert M.data.shape[M.n_batch] == M.indices.shape[-2]\n assert M.indices.shape[-1] == M.n_sparse\n else:\n raise ValueError(\"Obj={Obj} not expected.\")\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": \"_{}_Obj={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), Obj.__name__),\n \"shape\": shape, \"dtype\": dtype, \"Obj\": Obj}\n for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]\n for dtype in jtu.dtypes.floating + jtu.dtypes.complex)\n for Obj in [sparse.CSR, sparse.CSC, sparse.COO, sparse.BCOO]))\n def test_dense_round_trip(self, shape, dtype, Obj):\n rng = rand_sparse(self.rng())\n M = rng(shape, dtype)\n Msparse = Obj.fromdense(M)\n self.assertArraysEqual(M, Msparse.todense())\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": \"_{}_Obj={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), Obj.__name__),\n \"shape\": shape, \"dtype\": dtype, \"Obj\": Obj}\n for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]\n for dtype in jtu.dtypes.floating + jtu.dtypes.complex)\n for Obj in [sparse.CSR, sparse.CSC, sparse.COO, sparse.BCOO]))\n def test_transpose(self, shape, dtype, Obj):\n rng = rand_sparse(self.rng())\n M = rng(shape, dtype)\n Msparse = Obj.fromdense(M)\n self.assertArraysEqual(M.T, Msparse.T.todense())\n\n @unittest.skipIf(jtu.device_under_test() == \"tpu\", \"TPU has insufficient precision\")\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": \"_{}_Obj={}_bshape={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), Obj.__name__, bshape),\n \"shape\": shape, \"dtype\": dtype, \"Obj\": Obj, \"bshape\": bshape}\n for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]\n for bshape in [shape[-1:] + s for s in [(), (3,), (4,)]]\n for dtype in jtu.dtypes.floating + jtu.dtypes.complex)\n for Obj in [sparse.CSR, sparse.CSC, sparse.COO, sparse.BCOO]))\n def test_matmul(self, shape, dtype, Obj, bshape):\n rng = rand_sparse(self.rng(), post=jnp.array)\n rng_b = jtu.rand_default(self.rng())\n M = rng(shape, dtype)\n Msp = Obj.fromdense(M)\n\n # Test matching type\n x = rng_b(bshape, dtype)\n x = jnp.asarray(x)\n self.assertAllClose(M @ x, Msp @ x, rtol=MATMUL_TOL)\n\n # Test mismatched type\n x = rng_b(bshape, np.int32)\n x = jnp.asarray(x)\n self.assertAllClose(M @ x, Msp @ x, rtol=MATMUL_TOL)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}({})\".format(\n input_type.__name__,\n jtu.format_shape_dtype_string(shape, dtype)),\n \"input_type\": input_type, \"shape\": shape, \"dtype\": dtype}\n for input_type in [scipy.sparse.coo_matrix, scipy.sparse.csr_matrix, scipy.sparse.csc_matrix]\n for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]\n for dtype in jtu.dtypes.floating + jtu.dtypes.complex))\n def test_bcoo_from_scipy_sparse(self, input_type, shape, dtype):\n rng = rand_sparse(self.rng())\n M = rng(shape, dtype)\n M_sparse = input_type(M)\n M_bcoo = sparse.BCOO.from_scipy_sparse(M_sparse)\n self.assertArraysEqual(M, M_bcoo.todense())\n\n def test_bcoo_methods(self):\n M = jnp.arange(12).reshape(3, 4)\n Msp = sparse.BCOO.fromdense(M)\n\n self.assertArraysEqual(-M, (-Msp).todense())\n\n self.assertArraysEqual(2 * M, (2 * Msp).todense())\n self.assertArraysEqual(M * 2, (Msp * 2).todense())\n\n self.assertArraysEqual(M + M, (Msp + Msp).todense())\n\n self.assertArraysEqual(M.sum(0), Msp.sum(0).todense())\n self.assertArraysEqual(M.sum(1), Msp.sum(1).todense())\n self.assertArraysEqual(M.sum(), Msp.sum())\n\nclass SparseRandomTest(jtu.JaxTestCase):\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_nbatch={}_ndense={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), n_batch, n_dense),\n \"shape\": shape, \"dtype\": dtype, \"n_batch\": n_batch, \"n_dense\": n_dense}\n for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]\n for dtype in jtu.dtypes.floating\n for n_batch in range(len(shape) + 1)\n for n_dense in range(len(shape) + 1 - n_batch)))\n def test_random_bcoo(self, shape, dtype, n_batch, n_dense):\n key = jax.random.PRNGKey(1701)\n mat = sparse.random_bcoo(key, shape=shape, dtype=dtype, n_batch=n_batch, n_dense=n_dense)\n\n mat_dense = mat.todense()\n self.assertEqual(mat_dense.shape, shape)\n self.assertEqual(mat_dense.dtype, dtype)\n\n n_sparse = len(shape) - n_batch - n_dense\n batch_shape, sparse_shape, dense_shape = split_list(shape, [n_batch, n_sparse])\n\n approx_expected_num_nonzero = (\n np.ceil(0.2 * np.prod(sparse_shape))\n * np.prod(batch_shape) * np.prod(dense_shape))\n num_nonzero = (mat_dense != 0).sum()\n self.assertAlmostEqual(num_nonzero, approx_expected_num_nonzero, delta=2)\n\n\nif __name__ == \"__main__\":\n absltest.main(testLoader=jtu.JaxTestLoader())\n"
] | [
[
"numpy.issubdtype",
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
parsons-kyle-89/pandas | [
"08c920eab602dac261b8fe55ffe439593c095e12",
"08c920eab602dac261b8fe55ffe439593c095e12"
] | [
"pandas/core/arrays/datetimes.py",
"pandas/core/indexes/accessors.py"
] | [
"# -*- coding: utf-8 -*-\nfrom datetime import datetime, time\nimport warnings\n\nimport numpy as np\nfrom pytz import utc\n\nfrom pandas._libs import lib, tslib\nfrom pandas._libs.tslibs import (\n NaT, Timestamp, ccalendar, conversion, fields, iNaT, normalize_date,\n resolution as libresolution, timezones)\nimport pandas.compat as compat\nfrom pandas.errors import PerformanceWarning\nfrom pandas.util._decorators import Appender\n\nfrom pandas.core.dtypes.common import (\n _INT64_DTYPE, _NS_DTYPE, is_categorical_dtype, is_datetime64_dtype,\n is_datetime64tz_dtype, is_extension_type, is_float_dtype, is_int64_dtype,\n is_object_dtype, is_period_dtype, is_string_dtype, is_timedelta64_dtype)\nfrom pandas.core.dtypes.dtypes import DatetimeTZDtype\nfrom pandas.core.dtypes.generic import ABCIndexClass, ABCSeries\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas.core import ops\nfrom pandas.core.algorithms import checked_add_with_arr\nfrom pandas.core.arrays import datetimelike as dtl\nfrom pandas.core.arrays._ranges import generate_regular_range\nimport pandas.core.common as com\n\nfrom pandas.tseries.frequencies import get_period_alias, to_offset\nfrom pandas.tseries.offsets import Day, Tick\n\n_midnight = time(0, 0)\n\n\ndef _to_m8(key, tz=None):\n \"\"\"\n Timestamp-like => dt64\n \"\"\"\n if not isinstance(key, Timestamp):\n # this also converts strings\n key = Timestamp(key)\n if key.tzinfo is not None and tz is not None:\n # Don't tz_localize(None) if key is already tz-aware\n key = key.tz_convert(tz)\n else:\n key = key.tz_localize(tz)\n\n return np.int64(conversion.pydt_to_i8(key)).view(_NS_DTYPE)\n\n\ndef _field_accessor(name, field, docstring=None):\n def f(self):\n values = self.asi8\n if self.tz is not None and not timezones.is_utc(self.tz):\n values = self._local_timestamps()\n\n if field in self._bool_ops:\n if field.endswith(('start', 'end')):\n freq = self.freq\n month_kw = 12\n if freq:\n kwds = freq.kwds\n month_kw = kwds.get('startingMonth', kwds.get('month', 12))\n\n result = fields.get_start_end_field(values, field,\n self.freqstr, month_kw)\n else:\n result = fields.get_date_field(values, field)\n\n # these return a boolean by-definition\n return result\n\n if field in self._object_ops:\n result = fields.get_date_name_field(values, field)\n result = self._maybe_mask_results(result, fill_value=None)\n\n else:\n result = fields.get_date_field(values, field)\n result = self._maybe_mask_results(result, fill_value=None,\n convert='float64')\n\n return result\n\n f.__name__ = name\n f.__doc__ = \"\\n{}\\n\".format(docstring)\n return property(f)\n\n\ndef _dt_array_cmp(cls, op):\n \"\"\"\n Wrap comparison operations to convert datetime-like to datetime64\n \"\"\"\n opname = '__{name}__'.format(name=op.__name__)\n nat_result = True if opname == '__ne__' else False\n\n def wrapper(self, other):\n meth = getattr(dtl.DatetimeLikeArrayMixin, opname)\n\n other = lib.item_from_zerodim(other)\n\n if isinstance(other, (datetime, np.datetime64, compat.string_types)):\n if isinstance(other, (datetime, np.datetime64)):\n # GH#18435 strings get a pass from tzawareness compat\n self._assert_tzawareness_compat(other)\n\n try:\n other = _to_m8(other, tz=self.tz)\n except ValueError:\n # string that cannot be parsed to Timestamp\n return ops.invalid_comparison(self, other, op)\n\n result = op(self.asi8, other.view('i8'))\n if isna(other):\n result.fill(nat_result)\n elif lib.is_scalar(other) or np.ndim(other) == 0:\n return ops.invalid_comparison(self, other, op)\n elif len(other) != len(self):\n raise ValueError(\"Lengths must match\")\n else:\n if isinstance(other, list):\n try:\n other = type(self)._from_sequence(other)\n except ValueError:\n other = np.array(other, dtype=np.object_)\n elif not isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries,\n DatetimeArrayMixin)):\n # Following Timestamp convention, __eq__ is all-False\n # and __ne__ is all True, others raise TypeError.\n return ops.invalid_comparison(self, other, op)\n\n if is_object_dtype(other):\n result = op(self.astype('O'), np.array(other))\n o_mask = isna(other)\n elif not (is_datetime64_dtype(other) or\n is_datetime64tz_dtype(other)):\n # e.g. is_timedelta64_dtype(other)\n return ops.invalid_comparison(self, other, op)\n else:\n self._assert_tzawareness_compat(other)\n if not hasattr(other, 'asi8'):\n # ndarray, Series\n other = type(self)(other)\n result = meth(self, other)\n o_mask = other._isnan\n\n result = com.values_from_object(result)\n\n # Make sure to pass an array to result[...]; indexing with\n # Series breaks with older version of numpy\n o_mask = np.array(o_mask)\n if o_mask.any():\n result[o_mask] = nat_result\n\n if self._hasnans:\n result[self._isnan] = nat_result\n\n return result\n\n return compat.set_function_name(wrapper, opname, cls)\n\n\nclass DatetimeArrayMixin(dtl.DatetimeLikeArrayMixin,\n dtl.TimelikeOps,\n dtl.DatelikeOps):\n \"\"\"\n Assumes that subclass __new__/__init__ defines:\n tz\n _freq\n _data\n \"\"\"\n _typ = \"datetimearray\"\n\n # define my properties & methods for delegation\n _bool_ops = ['is_month_start', 'is_month_end',\n 'is_quarter_start', 'is_quarter_end', 'is_year_start',\n 'is_year_end', 'is_leap_year']\n _object_ops = ['weekday_name', 'freq', 'tz']\n _field_ops = ['year', 'month', 'day', 'hour', 'minute', 'second',\n 'weekofyear', 'week', 'weekday', 'dayofweek',\n 'dayofyear', 'quarter', 'days_in_month',\n 'daysinmonth', 'microsecond',\n 'nanosecond']\n _other_ops = ['date', 'time', 'timetz']\n _datetimelike_ops = _field_ops + _object_ops + _bool_ops + _other_ops\n _datetimelike_methods = ['to_period', 'tz_localize',\n 'tz_convert',\n 'normalize', 'strftime', 'round', 'floor',\n 'ceil', 'month_name', 'day_name']\n\n # dummy attribute so that datetime.__eq__(DatetimeArray) defers\n # by returning NotImplemented\n timetuple = None\n\n # Needed so that Timestamp.__richcmp__(DateTimeArray) operates pointwise\n ndim = 1\n\n # ensure that operations with numpy arrays defer to our implementation\n __array_priority__ = 1000\n\n # -----------------------------------------------------------------\n # Constructors\n\n _attributes = [\"freq\", \"tz\"]\n _tz = None\n _freq = None\n\n @classmethod\n def _simple_new(cls, values, freq=None, tz=None):\n \"\"\"\n we require the we have a dtype compat for the values\n if we are passed a non-dtype compat, then coerce using the constructor\n \"\"\"\n assert isinstance(values, np.ndarray), type(values)\n if values.dtype == 'i8':\n # for compat with datetime/timedelta/period shared methods,\n # we can sometimes get here with int64 values. These represent\n # nanosecond UTC (or tz-naive) unix timestamps\n values = values.view('M8[ns]')\n\n assert values.dtype == 'M8[ns]', values.dtype\n\n result = object.__new__(cls)\n result._data = values\n result._freq = freq\n tz = timezones.maybe_get_tz(tz)\n result._tz = timezones.tz_standardize(tz)\n return result\n\n def __new__(cls, values, freq=None, tz=None, dtype=None, copy=False,\n dayfirst=False, yearfirst=False, ambiguous='raise'):\n return cls._from_sequence(\n values, freq=freq, tz=tz, dtype=dtype, copy=copy,\n dayfirst=dayfirst, yearfirst=yearfirst, ambiguous=ambiguous)\n\n @classmethod\n def _from_sequence(cls, data, dtype=None, copy=False,\n tz=None, freq=None,\n dayfirst=False, yearfirst=False, ambiguous='raise'):\n\n freq, freq_infer = dtl.maybe_infer_freq(freq)\n\n subarr, tz, inferred_freq = sequence_to_dt64ns(\n data, dtype=dtype, copy=copy, tz=tz,\n dayfirst=dayfirst, yearfirst=yearfirst, ambiguous=ambiguous)\n\n freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq,\n freq_infer)\n\n result = cls._simple_new(subarr, freq=freq, tz=tz)\n\n if inferred_freq is None and freq is not None:\n # this condition precludes `freq_infer`\n cls._validate_frequency(result, freq, ambiguous=ambiguous)\n\n elif freq_infer:\n result.freq = to_offset(result.inferred_freq)\n\n return result\n\n @classmethod\n def _generate_range(cls, start, end, periods, freq, tz=None,\n normalize=False, ambiguous='raise',\n nonexistent='raise', closed=None):\n\n periods = dtl.validate_periods(periods)\n if freq is None and any(x is None for x in [periods, start, end]):\n raise ValueError('Must provide freq argument if no data is '\n 'supplied')\n\n if com.count_not_none(start, end, periods, freq) != 3:\n raise ValueError('Of the four parameters: start, end, periods, '\n 'and freq, exactly three must be specified')\n freq = to_offset(freq)\n\n if start is not None:\n start = Timestamp(start)\n\n if end is not None:\n end = Timestamp(end)\n\n if start is None and end is None:\n if closed is not None:\n raise ValueError(\"Closed has to be None if not both of start\"\n \"and end are defined\")\n if start is NaT or end is NaT:\n raise ValueError(\"Neither `start` nor `end` can be NaT\")\n\n left_closed, right_closed = dtl.validate_endpoints(closed)\n\n start, end, _normalized = _maybe_normalize_endpoints(start, end,\n normalize)\n\n tz = _infer_tz_from_endpoints(start, end, tz)\n\n if tz is not None:\n # Localize the start and end arguments\n start = _maybe_localize_point(\n start, getattr(start, 'tz', None), start, freq, tz\n )\n end = _maybe_localize_point(\n end, getattr(end, 'tz', None), end, freq, tz\n )\n if freq is not None:\n # We break Day arithmetic (fixed 24 hour) here and opt for\n # Day to mean calendar day (23/24/25 hour). Therefore, strip\n # tz info from start and day to avoid DST arithmetic\n if isinstance(freq, Day):\n if start is not None:\n start = start.tz_localize(None)\n if end is not None:\n end = end.tz_localize(None)\n # TODO: consider re-implementing _cached_range; GH#17914\n values, _tz = generate_regular_range(start, end, periods, freq)\n index = cls._simple_new(values, freq=freq, tz=_tz)\n\n if tz is not None and index.tz is None:\n arr = conversion.tz_localize_to_utc(\n index.asi8,\n tz, ambiguous=ambiguous, nonexistent=nonexistent)\n\n index = cls(arr)\n\n # index is localized datetime64 array -> have to convert\n # start/end as well to compare\n if start is not None:\n start = start.tz_localize(tz).asm8\n if end is not None:\n end = end.tz_localize(tz).asm8\n else:\n # Create a linearly spaced date_range in local time\n # Nanosecond-granularity timestamps aren't always correctly\n # representable with doubles, so we limit the range that we\n # pass to np.linspace as much as possible\n arr = np.linspace(\n 0, end.value - start.value,\n periods, dtype='int64') + start.value\n index = cls._simple_new(\n arr.astype('M8[ns]', copy=False), freq=None, tz=tz\n )\n\n if not left_closed and len(index) and index[0] == start:\n index = index[1:]\n if not right_closed and len(index) and index[-1] == end:\n index = index[:-1]\n\n return cls._simple_new(index.asi8, freq=freq, tz=tz)\n\n # -----------------------------------------------------------------\n # Descriptive Properties\n\n @property\n def _box_func(self):\n return lambda x: Timestamp(x, freq=self.freq, tz=self.tz)\n\n @property\n def dtype(self):\n # type: () -> Union[np.dtype, DatetimeTZDtype]\n \"\"\"\n The dtype for the DatetimeArray.\n\n Returns\n -------\n numpy.dtype or DatetimeTZDtype\n If the values are tz-naive, then ``np.dtype('datetime64[ns]')``\n is returned.\n\n If the values are tz-aware, then the ``DatetimeTZDtype``\n is returned.\n \"\"\"\n if self.tz is None:\n return _NS_DTYPE\n return DatetimeTZDtype('ns', self.tz)\n\n @property\n def tz(self):\n \"\"\"\n Return timezone, if any.\n\n Returns\n -------\n datetime.tzinfo, pytz.tzinfo.BaseTZInfo, dateutil.tz.tz.tzfile, or None\n Returns None when the array is tz-naive.\n \"\"\"\n # GH 18595\n return self._tz\n\n @tz.setter\n def tz(self, value):\n # GH 3746: Prevent localizing or converting the index by setting tz\n raise AttributeError(\"Cannot directly set timezone. Use tz_localize() \"\n \"or tz_convert() as appropriate\")\n\n @property\n def tzinfo(self):\n \"\"\"\n Alias for tz attribute\n \"\"\"\n return self.tz\n\n @property # NB: override with cache_readonly in immutable subclasses\n def _timezone(self):\n \"\"\"\n Comparable timezone both for pytz / dateutil\n \"\"\"\n return timezones.get_timezone(self.tzinfo)\n\n @property\n def offset(self):\n \"\"\"\n get/set the frequency of the instance\n \"\"\"\n msg = ('{cls}.offset has been deprecated and will be removed '\n 'in a future version; use {cls}.freq instead.'\n .format(cls=type(self).__name__))\n warnings.warn(msg, FutureWarning, stacklevel=2)\n return self.freq\n\n @offset.setter\n def offset(self, value):\n \"\"\"\n get/set the frequency of the instance\n \"\"\"\n msg = ('{cls}.offset has been deprecated and will be removed '\n 'in a future version; use {cls}.freq instead.'\n .format(cls=type(self).__name__))\n warnings.warn(msg, FutureWarning, stacklevel=2)\n self.freq = value\n\n @property # NB: override with cache_readonly in immutable subclasses\n def is_normalized(self):\n \"\"\"\n Returns True if all of the dates are at midnight (\"no time\")\n \"\"\"\n return conversion.is_date_array_normalized(self.asi8, self.tz)\n\n @property # NB: override with cache_readonly in immutable subclasses\n def _resolution(self):\n return libresolution.resolution(self.asi8, self.tz)\n\n # ----------------------------------------------------------------\n # Array-Like / EA-Interface Methods\n\n def __array__(self, dtype=None):\n if is_object_dtype(dtype):\n return np.array(list(self), dtype=object)\n elif is_int64_dtype(dtype):\n return self.asi8\n\n # TODO: warn that conversion may be lossy?\n return self._data.view(np.ndarray) # follow Index.__array__\n\n def __iter__(self):\n \"\"\"\n Return an iterator over the boxed values\n\n Yields\n -------\n tstamp : Timestamp\n \"\"\"\n\n # convert in chunks of 10k for efficiency\n data = self.asi8\n length = len(self)\n chunksize = 10000\n chunks = int(length / chunksize) + 1\n for i in range(chunks):\n start_i = i * chunksize\n end_i = min((i + 1) * chunksize, length)\n converted = tslib.ints_to_pydatetime(data[start_i:end_i],\n tz=self.tz, freq=self.freq,\n box=\"timestamp\")\n for v in converted:\n yield v\n\n # ----------------------------------------------------------------\n # ExtensionArray Interface\n\n @property\n def _ndarray_values(self):\n return self._data\n\n @Appender(dtl.DatetimeLikeArrayMixin._validate_fill_value.__doc__)\n def _validate_fill_value(self, fill_value):\n if isna(fill_value):\n fill_value = iNaT\n elif isinstance(fill_value, (datetime, np.datetime64)):\n self._assert_tzawareness_compat(fill_value)\n fill_value = Timestamp(fill_value).value\n else:\n raise ValueError(\"'fill_value' should be a Timestamp. \"\n \"Got '{got}'.\".format(got=fill_value))\n return fill_value\n\n # -----------------------------------------------------------------\n # Rendering Methods\n\n def _format_native_types(self, na_rep=u'NaT', date_format=None, **kwargs):\n from pandas.io.formats.format import _get_format_datetime64_from_values\n fmt = _get_format_datetime64_from_values(self, date_format)\n\n return tslib.format_array_from_datetime(self.asi8,\n tz=self.tz,\n format=fmt,\n na_rep=na_rep)\n\n # -----------------------------------------------------------------\n # Comparison Methods\n\n _create_comparison_method = classmethod(_dt_array_cmp)\n\n def _has_same_tz(self, other):\n zzone = self._timezone\n\n # vzone sholdn't be None if value is non-datetime like\n if isinstance(other, np.datetime64):\n # convert to Timestamp as np.datetime64 doesn't have tz attr\n other = Timestamp(other)\n vzone = timezones.get_timezone(getattr(other, 'tzinfo', '__no_tz__'))\n return zzone == vzone\n\n def _assert_tzawareness_compat(self, other):\n # adapted from _Timestamp._assert_tzawareness_compat\n other_tz = getattr(other, 'tzinfo', None)\n if is_datetime64tz_dtype(other):\n # Get tzinfo from Series dtype\n other_tz = other.dtype.tz\n if other is NaT:\n # pd.NaT quacks both aware and naive\n pass\n elif self.tz is None:\n if other_tz is not None:\n raise TypeError('Cannot compare tz-naive and tz-aware '\n 'datetime-like objects.')\n elif other_tz is None:\n raise TypeError('Cannot compare tz-naive and tz-aware '\n 'datetime-like objects')\n\n # -----------------------------------------------------------------\n # Arithmetic Methods\n\n def _sub_datetime_arraylike(self, other):\n \"\"\"subtract DatetimeArray/Index or ndarray[datetime64]\"\"\"\n if len(self) != len(other):\n raise ValueError(\"cannot add indices of unequal length\")\n\n if isinstance(other, np.ndarray):\n assert is_datetime64_dtype(other)\n other = type(self)(other)\n\n if not self._has_same_tz(other):\n # require tz compat\n raise TypeError(\"{cls} subtraction must have the same \"\n \"timezones or no timezones\"\n .format(cls=type(self).__name__))\n\n self_i8 = self.asi8\n other_i8 = other.asi8\n new_values = checked_add_with_arr(self_i8, -other_i8,\n arr_mask=self._isnan)\n if self._hasnans or other._hasnans:\n mask = (self._isnan) | (other._isnan)\n new_values[mask] = iNaT\n return new_values.view('timedelta64[ns]')\n\n def _add_offset(self, offset):\n assert not isinstance(offset, Tick)\n try:\n if self.tz is not None:\n values = self.tz_localize(None)\n else:\n values = self\n result = offset.apply_index(values)\n if self.tz is not None:\n result = result.tz_localize(self.tz)\n\n except NotImplementedError:\n warnings.warn(\"Non-vectorized DateOffset being applied to Series \"\n \"or DatetimeIndex\", PerformanceWarning)\n result = self.astype('O') + offset\n\n return type(self)._from_sequence(result, freq='infer')\n\n def _sub_datetimelike_scalar(self, other):\n # subtract a datetime from myself, yielding a ndarray[timedelta64[ns]]\n assert isinstance(other, (datetime, np.datetime64))\n assert other is not NaT\n other = Timestamp(other)\n if other is NaT:\n return self - NaT\n\n if not self._has_same_tz(other):\n # require tz compat\n raise TypeError(\"Timestamp subtraction must have the same \"\n \"timezones or no timezones\")\n\n i8 = self.asi8\n result = checked_add_with_arr(i8, -other.value,\n arr_mask=self._isnan)\n result = self._maybe_mask_results(result)\n return result.view('timedelta64[ns]')\n\n def _add_delta(self, delta):\n \"\"\"\n Add a timedelta-like, Tick, or TimedeltaIndex-like object\n to self, yielding a new DatetimeArray\n\n Parameters\n ----------\n other : {timedelta, np.timedelta64, Tick,\n TimedeltaIndex, ndarray[timedelta64]}\n\n Returns\n -------\n result : DatetimeArray\n \"\"\"\n new_values = super(DatetimeArrayMixin, self)._add_delta(delta)\n return type(self)._from_sequence(new_values, tz=self.tz, freq='infer')\n\n # -----------------------------------------------------------------\n # Timezone Conversion and Localization Methods\n\n def _local_timestamps(self):\n \"\"\"\n Convert to an i8 (unix-like nanosecond timestamp) representation\n while keeping the local timezone and not using UTC.\n This is used to calculate time-of-day information as if the timestamps\n were timezone-naive.\n \"\"\"\n return conversion.tz_convert(self.asi8, utc, self.tz)\n\n def tz_convert(self, tz):\n \"\"\"\n Convert tz-aware Datetime Array/Index from one time zone to another.\n\n Parameters\n ----------\n tz : string, pytz.timezone, dateutil.tz.tzfile or None\n Time zone for time. Corresponding timestamps would be converted\n to this time zone of the Datetime Array/Index. A `tz` of None will\n convert to UTC and remove the timezone information.\n\n Returns\n -------\n normalized : same type as self\n\n Raises\n ------\n TypeError\n If Datetime Array/Index is tz-naive.\n\n See Also\n --------\n DatetimeIndex.tz : A timezone that has a variable offset from UTC.\n DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a\n given time zone, or remove timezone from a tz-aware DatetimeIndex.\n\n Examples\n --------\n With the `tz` parameter, we can change the DatetimeIndex\n to other time zones:\n\n >>> dti = pd.DatetimeIndex(start='2014-08-01 09:00',\n ... freq='H', periods=3, tz='Europe/Berlin')\n\n >>> dti\n DatetimeIndex(['2014-08-01 09:00:00+02:00',\n '2014-08-01 10:00:00+02:00',\n '2014-08-01 11:00:00+02:00'],\n dtype='datetime64[ns, Europe/Berlin]', freq='H')\n\n >>> dti.tz_convert('US/Central')\n DatetimeIndex(['2014-08-01 02:00:00-05:00',\n '2014-08-01 03:00:00-05:00',\n '2014-08-01 04:00:00-05:00'],\n dtype='datetime64[ns, US/Central]', freq='H')\n\n With the ``tz=None``, we can remove the timezone (after converting\n to UTC if necessary):\n\n >>> dti = pd.DatetimeIndex(start='2014-08-01 09:00',freq='H',\n ... periods=3, tz='Europe/Berlin')\n\n >>> dti\n DatetimeIndex(['2014-08-01 09:00:00+02:00',\n '2014-08-01 10:00:00+02:00',\n '2014-08-01 11:00:00+02:00'],\n dtype='datetime64[ns, Europe/Berlin]', freq='H')\n\n >>> dti.tz_convert(None)\n DatetimeIndex(['2014-08-01 07:00:00',\n '2014-08-01 08:00:00',\n '2014-08-01 09:00:00'],\n dtype='datetime64[ns]', freq='H')\n \"\"\"\n tz = timezones.maybe_get_tz(tz)\n\n if self.tz is None:\n # tz naive, use tz_localize\n raise TypeError('Cannot convert tz-naive timestamps, use '\n 'tz_localize to localize')\n\n # No conversion since timestamps are all UTC to begin with\n return self._simple_new(self.asi8, tz=tz, freq=self.freq)\n\n def tz_localize(self, tz, ambiguous='raise', nonexistent='raise',\n errors=None):\n \"\"\"\n Localize tz-naive Datetime Array/Index to tz-aware\n Datetime Array/Index.\n\n This method takes a time zone (tz) naive Datetime Array/Index object\n and makes this time zone aware. It does not move the time to another\n time zone.\n Time zone localization helps to switch from time zone aware to time\n zone unaware objects.\n\n Parameters\n ----------\n tz : string, pytz.timezone, dateutil.tz.tzfile or None\n Time zone to convert timestamps to. Passing ``None`` will\n remove the time zone information preserving local time.\n ambiguous : 'infer', 'NaT', bool array, default 'raise'\n When clocks moved backward due to DST, ambiguous times may arise.\n For example in Central European Time (UTC+01), when going from\n 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at\n 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the\n `ambiguous` parameter dictates how ambiguous times should be\n handled.\n\n - 'infer' will attempt to infer fall dst-transition hours based on\n order\n - bool-ndarray where True signifies a DST time, False signifies a\n non-DST time (note that this flag is only applicable for\n ambiguous times)\n - 'NaT' will return NaT where there are ambiguous times\n - 'raise' will raise an AmbiguousTimeError if there are ambiguous\n times\n\n nonexistent : 'shift', 'NaT' default 'raise'\n A nonexistent time does not exist in a particular timezone\n where clocks moved forward due to DST.\n\n - 'shift' will shift the nonexistent times forward to the closest\n existing time\n - 'NaT' will return NaT where there are nonexistent times\n - 'raise' will raise an NonExistentTimeError if there are\n nonexistent times\n\n .. versionadded:: 0.24.0\n\n errors : {'raise', 'coerce'}, default None\n\n - 'raise' will raise a NonExistentTimeError if a timestamp is not\n valid in the specified time zone (e.g. due to a transition from\n or to DST time). Use ``nonexistent='raise'`` instead.\n - 'coerce' will return NaT if the timestamp can not be converted\n to the specified time zone. Use ``nonexistent='NaT'`` instead.\n\n .. deprecated:: 0.24.0\n\n Returns\n -------\n result : same type as self\n Array/Index converted to the specified time zone.\n\n Raises\n ------\n TypeError\n If the Datetime Array/Index is tz-aware and tz is not None.\n\n See Also\n --------\n DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from\n one time zone to another.\n\n Examples\n --------\n >>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3)\n >>> tz_naive\n DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',\n '2018-03-03 09:00:00'],\n dtype='datetime64[ns]', freq='D')\n\n Localize DatetimeIndex in US/Eastern time zone:\n\n >>> tz_aware = tz_naive.tz_localize(tz='US/Eastern')\n >>> tz_aware\n DatetimeIndex(['2018-03-01 09:00:00-05:00',\n '2018-03-02 09:00:00-05:00',\n '2018-03-03 09:00:00-05:00'],\n dtype='datetime64[ns, US/Eastern]', freq='D')\n\n With the ``tz=None``, we can remove the time zone information\n while keeping the local time (not converted to UTC):\n\n >>> tz_aware.tz_localize(None)\n DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',\n '2018-03-03 09:00:00'],\n dtype='datetime64[ns]', freq='D')\n\n Be careful with DST changes. When there is sequential data, pandas can\n infer the DST time:\n >>> s = pd.to_datetime(pd.Series([\n ... '2018-10-28 01:30:00',\n ... '2018-10-28 02:00:00',\n ... '2018-10-28 02:30:00',\n ... '2018-10-28 02:00:00',\n ... '2018-10-28 02:30:00',\n ... '2018-10-28 03:00:00',\n ... '2018-10-28 03:30:00']))\n >>> s.dt.tz_localize('CET', ambiguous='infer')\n 2018-10-28 01:30:00+02:00 0\n 2018-10-28 02:00:00+02:00 1\n 2018-10-28 02:30:00+02:00 2\n 2018-10-28 02:00:00+01:00 3\n 2018-10-28 02:30:00+01:00 4\n 2018-10-28 03:00:00+01:00 5\n 2018-10-28 03:30:00+01:00 6\n dtype: int64\n\n In some cases, inferring the DST is impossible. In such cases, you can\n pass an ndarray to the ambiguous parameter to set the DST explicitly\n\n >>> s = pd.to_datetime(pd.Series([\n ... '2018-10-28 01:20:00',\n ... '2018-10-28 02:36:00',\n ... '2018-10-28 03:46:00']))\n >>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False]))\n 0 2018-10-28 01:20:00+02:00\n 1 2018-10-28 02:36:00+02:00\n 2 2018-10-28 03:46:00+01:00\n dtype: datetime64[ns, CET]\n \"\"\"\n if errors is not None:\n warnings.warn(\"The errors argument is deprecated and will be \"\n \"removed in a future release. Use \"\n \"nonexistent='NaT' or nonexistent='raise' \"\n \"instead.\", FutureWarning)\n if errors == 'coerce':\n nonexistent = 'NaT'\n elif errors == 'raise':\n nonexistent = 'raise'\n else:\n raise ValueError(\"The errors argument must be either 'coerce' \"\n \"or 'raise'.\")\n\n if nonexistent not in ('raise', 'NaT', 'shift'):\n raise ValueError(\"The nonexistent argument must be one of 'raise',\"\n \" 'NaT' or 'shift'\")\n\n if self.tz is not None:\n if tz is None:\n new_dates = conversion.tz_convert(self.asi8, timezones.UTC,\n self.tz)\n else:\n raise TypeError(\"Already tz-aware, use tz_convert to convert.\")\n else:\n tz = timezones.maybe_get_tz(tz)\n # Convert to UTC\n\n new_dates = conversion.tz_localize_to_utc(\n self.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent,\n )\n new_dates = new_dates.view(_NS_DTYPE)\n return self._simple_new(new_dates, tz=tz, freq=self.freq)\n\n # ----------------------------------------------------------------\n # Conversion Methods - Vectorized analogues of Timestamp methods\n\n def to_pydatetime(self):\n \"\"\"\n Return Datetime Array/Index as object ndarray of datetime.datetime\n objects\n\n Returns\n -------\n datetimes : ndarray\n \"\"\"\n return tslib.ints_to_pydatetime(self.asi8, tz=self.tz)\n\n def normalize(self):\n \"\"\"\n Convert times to midnight.\n\n The time component of the date-time is converted to midnight i.e.\n 00:00:00. This is useful in cases, when the time does not matter.\n Length is unaltered. The timezones are unaffected.\n\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on Datetime Array/Index.\n\n Returns\n -------\n DatetimeArray, DatetimeIndex or Series\n The same type as the original data. Series will have the same\n name and index. DatetimeIndex will have the same name.\n\n See Also\n --------\n floor : Floor the datetimes to the specified freq.\n ceil : Ceil the datetimes to the specified freq.\n round : Round the datetimes to the specified freq.\n\n Examples\n --------\n >>> idx = pd.DatetimeIndex(start='2014-08-01 10:00', freq='H',\n ... periods=3, tz='Asia/Calcutta')\n >>> idx\n DatetimeIndex(['2014-08-01 10:00:00+05:30',\n '2014-08-01 11:00:00+05:30',\n '2014-08-01 12:00:00+05:30'],\n dtype='datetime64[ns, Asia/Calcutta]', freq='H')\n >>> idx.normalize()\n DatetimeIndex(['2014-08-01 00:00:00+05:30',\n '2014-08-01 00:00:00+05:30',\n '2014-08-01 00:00:00+05:30'],\n dtype='datetime64[ns, Asia/Calcutta]', freq=None)\n \"\"\"\n if self.tz is None or timezones.is_utc(self.tz):\n not_null = ~self.isna()\n DAY_NS = ccalendar.DAY_SECONDS * 1000000000\n new_values = self.asi8.copy()\n adjustment = (new_values[not_null] % DAY_NS)\n new_values[not_null] = new_values[not_null] - adjustment\n else:\n new_values = conversion.normalize_i8_timestamps(self.asi8, self.tz)\n return type(self)._from_sequence(new_values,\n freq='infer').tz_localize(self.tz)\n\n def to_period(self, freq=None):\n \"\"\"\n Cast to PeriodArray/Index at a particular frequency.\n\n Converts DatetimeArray/Index to PeriodArray/Index.\n\n Parameters\n ----------\n freq : string or Offset, optional\n One of pandas' :ref:`offset strings <timeseries.offset_aliases>`\n or an Offset object. Will be inferred by default.\n\n Returns\n -------\n PeriodArray/Index\n\n Raises\n ------\n ValueError\n When converting a DatetimeArray/Index with non-regular values,\n so that a frequency cannot be inferred.\n\n See Also\n --------\n PeriodIndex: Immutable ndarray holding ordinal values.\n DatetimeIndex.to_pydatetime: Return DatetimeIndex as object.\n\n Examples\n --------\n >>> df = pd.DataFrame({\"y\": [1,2,3]},\n ... index=pd.to_datetime([\"2000-03-31 00:00:00\",\n ... \"2000-05-31 00:00:00\",\n ... \"2000-08-31 00:00:00\"]))\n >>> df.index.to_period(\"M\")\n PeriodIndex(['2000-03', '2000-05', '2000-08'],\n dtype='period[M]', freq='M')\n\n Infer the daily frequency\n\n >>> idx = pd.date_range(\"2017-01-01\", periods=2)\n >>> idx.to_period()\n PeriodIndex(['2017-01-01', '2017-01-02'],\n dtype='period[D]', freq='D')\n \"\"\"\n from pandas.core.arrays import PeriodArray\n\n if self.tz is not None:\n warnings.warn(\"Converting to PeriodArray/Index representation \"\n \"will drop timezone information.\", UserWarning)\n\n if freq is None:\n freq = self.freqstr or self.inferred_freq\n\n if freq is None:\n raise ValueError(\"You must pass a freq argument as \"\n \"current index has none.\")\n\n freq = get_period_alias(freq)\n\n return PeriodArray._from_datetime64(self._data, freq, tz=self.tz)\n\n def to_perioddelta(self, freq):\n \"\"\"\n Calculate TimedeltaArray of difference between index\n values and index converted to PeriodArray at specified\n freq. Used for vectorized offsets\n\n Parameters\n ----------\n freq : Period frequency\n\n Returns\n -------\n TimedeltaArray/Index\n \"\"\"\n # TODO: consider privatizing (discussion in GH#23113)\n from pandas.core.arrays.timedeltas import TimedeltaArrayMixin\n i8delta = self.asi8 - self.to_period(freq).to_timestamp().asi8\n m8delta = i8delta.view('m8[ns]')\n return TimedeltaArrayMixin(m8delta)\n\n # -----------------------------------------------------------------\n # Properties - Vectorized Timestamp Properties/Methods\n\n def month_name(self, locale=None):\n \"\"\"\n Return the month names of the DateTimeIndex with specified locale.\n\n .. versionadded:: 0.23.0\n\n Parameters\n ----------\n locale : str, optional\n Locale determining the language in which to return the month name.\n Default is English locale.\n\n Returns\n -------\n Index\n Index of month names.\n\n Examples\n --------\n >>> idx = pd.DatetimeIndex(start='2018-01', freq='M', periods=3)\n >>> idx\n DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],\n dtype='datetime64[ns]', freq='M')\n >>> idx.month_name()\n Index(['January', 'February', 'March'], dtype='object')\n \"\"\"\n if self.tz is not None and not timezones.is_utc(self.tz):\n values = self._local_timestamps()\n else:\n values = self.asi8\n\n result = fields.get_date_name_field(values, 'month_name',\n locale=locale)\n result = self._maybe_mask_results(result, fill_value=None)\n return result\n\n def day_name(self, locale=None):\n \"\"\"\n Return the day names of the DateTimeIndex with specified locale.\n\n .. versionadded:: 0.23.0\n\n Parameters\n ----------\n locale : str, optional\n Locale determining the language in which to return the day name.\n Default is English locale.\n\n Returns\n -------\n Index\n Index of day names.\n\n Examples\n --------\n >>> idx = pd.DatetimeIndex(start='2018-01-01', freq='D', periods=3)\n >>> idx\n DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],\n dtype='datetime64[ns]', freq='D')\n >>> idx.day_name()\n Index(['Monday', 'Tuesday', 'Wednesday'], dtype='object')\n \"\"\"\n if self.tz is not None and not timezones.is_utc(self.tz):\n values = self._local_timestamps()\n else:\n values = self.asi8\n\n result = fields.get_date_name_field(values, 'day_name',\n locale=locale)\n result = self._maybe_mask_results(result, fill_value=None)\n return result\n\n @property\n def time(self):\n \"\"\"\n Returns numpy array of datetime.time. The time part of the Timestamps.\n \"\"\"\n # If the Timestamps have a timezone that is not UTC,\n # convert them into their i8 representation while\n # keeping their timezone and not using UTC\n if self.tz is not None and not timezones.is_utc(self.tz):\n timestamps = self._local_timestamps()\n else:\n timestamps = self.asi8\n\n return tslib.ints_to_pydatetime(timestamps, box=\"time\")\n\n @property\n def timetz(self):\n \"\"\"\n Returns numpy array of datetime.time also containing timezone\n information. The time part of the Timestamps.\n \"\"\"\n return tslib.ints_to_pydatetime(self.asi8, self.tz, box=\"time\")\n\n @property\n def date(self):\n \"\"\"\n Returns numpy array of python datetime.date objects (namely, the date\n part of Timestamps without timezone information).\n \"\"\"\n # If the Timestamps have a timezone that is not UTC,\n # convert them into their i8 representation while\n # keeping their timezone and not using UTC\n if self.tz is not None and not timezones.is_utc(self.tz):\n timestamps = self._local_timestamps()\n else:\n timestamps = self.asi8\n\n return tslib.ints_to_pydatetime(timestamps, box=\"date\")\n\n year = _field_accessor('year', 'Y', \"The year of the datetime.\")\n month = _field_accessor('month', 'M',\n \"The month as January=1, December=12. \")\n day = _field_accessor('day', 'D', \"The days of the datetime.\")\n hour = _field_accessor('hour', 'h', \"The hours of the datetime.\")\n minute = _field_accessor('minute', 'm', \"The minutes of the datetime.\")\n second = _field_accessor('second', 's', \"The seconds of the datetime.\")\n microsecond = _field_accessor('microsecond', 'us',\n \"The microseconds of the datetime.\")\n nanosecond = _field_accessor('nanosecond', 'ns',\n \"The nanoseconds of the datetime.\")\n weekofyear = _field_accessor('weekofyear', 'woy',\n \"The week ordinal of the year.\")\n week = weekofyear\n _dayofweek_doc = \"\"\"\n The day of the week with Monday=0, Sunday=6.\n\n Return the day of the week. It is assumed the week starts on\n Monday, which is denoted by 0 and ends on Sunday which is denoted\n by 6. This method is available on both Series with datetime\n values (using the `dt` accessor) or DatetimeIndex.\n\n Returns\n -------\n Series or Index\n Containing integers indicating the day number.\n\n See Also\n --------\n Series.dt.dayofweek : Alias.\n Series.dt.weekday : Alias.\n Series.dt.day_name : Returns the name of the day of the week.\n\n Examples\n --------\n >>> s = pd.date_range('2016-12-31', '2017-01-08', freq='D').to_series()\n >>> s.dt.dayofweek\n 2016-12-31 5\n 2017-01-01 6\n 2017-01-02 0\n 2017-01-03 1\n 2017-01-04 2\n 2017-01-05 3\n 2017-01-06 4\n 2017-01-07 5\n 2017-01-08 6\n Freq: D, dtype: int64\n \"\"\"\n dayofweek = _field_accessor('dayofweek', 'dow', _dayofweek_doc)\n weekday = dayofweek\n\n weekday_name = _field_accessor(\n 'weekday_name',\n 'weekday_name',\n \"The name of day in a week (ex: Friday)\\n\\n.. deprecated:: 0.23.0\")\n\n dayofyear = _field_accessor('dayofyear', 'doy',\n \"The ordinal day of the year.\")\n quarter = _field_accessor('quarter', 'q', \"The quarter of the date.\")\n days_in_month = _field_accessor(\n 'days_in_month',\n 'dim',\n \"The number of days in the month.\")\n daysinmonth = days_in_month\n _is_month_doc = \"\"\"\n Indicates whether the date is the {first_or_last} day of the month.\n\n Returns\n -------\n Series or array\n For Series, returns a Series with boolean values.\n For DatetimeIndex, returns a boolean array.\n\n See Also\n --------\n is_month_start : Return a boolean indicating whether the date\n is the first day of the month.\n is_month_end : Return a boolean indicating whether the date\n is the last day of the month.\n\n Examples\n --------\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on DatetimeIndex.\n\n >>> s = pd.Series(pd.date_range(\"2018-02-27\", periods=3))\n >>> s\n 0 2018-02-27\n 1 2018-02-28\n 2 2018-03-01\n dtype: datetime64[ns]\n >>> s.dt.is_month_start\n 0 False\n 1 False\n 2 True\n dtype: bool\n >>> s.dt.is_month_end\n 0 False\n 1 True\n 2 False\n dtype: bool\n\n >>> idx = pd.date_range(\"2018-02-27\", periods=3)\n >>> idx.is_month_start\n array([False, False, True])\n >>> idx.is_month_end\n array([False, True, False])\n \"\"\"\n is_month_start = _field_accessor(\n 'is_month_start',\n 'is_month_start',\n _is_month_doc.format(first_or_last='first'))\n\n is_month_end = _field_accessor(\n 'is_month_end',\n 'is_month_end',\n _is_month_doc.format(first_or_last='last'))\n\n is_quarter_start = _field_accessor(\n 'is_quarter_start',\n 'is_quarter_start',\n \"\"\"\n Indicator for whether the date is the first day of a quarter.\n\n Returns\n -------\n is_quarter_start : Series or DatetimeIndex\n The same type as the original data with boolean values. Series will\n have the same name and index. DatetimeIndex will have the same\n name.\n\n See Also\n --------\n quarter : Return the quarter of the date.\n is_quarter_end : Similar property for indicating the quarter start.\n\n Examples\n --------\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on DatetimeIndex.\n\n >>> df = pd.DataFrame({'dates': pd.date_range(\"2017-03-30\",\n ... periods=4)})\n >>> df.assign(quarter=df.dates.dt.quarter,\n ... is_quarter_start=df.dates.dt.is_quarter_start)\n dates quarter is_quarter_start\n 0 2017-03-30 1 False\n 1 2017-03-31 1 False\n 2 2017-04-01 2 True\n 3 2017-04-02 2 False\n\n >>> idx = pd.date_range('2017-03-30', periods=4)\n >>> idx\n DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],\n dtype='datetime64[ns]', freq='D')\n\n >>> idx.is_quarter_start\n array([False, False, True, False])\n \"\"\")\n is_quarter_end = _field_accessor(\n 'is_quarter_end',\n 'is_quarter_end',\n \"\"\"\n Indicator for whether the date is the last day of a quarter.\n\n Returns\n -------\n is_quarter_end : Series or DatetimeIndex\n The same type as the original data with boolean values. Series will\n have the same name and index. DatetimeIndex will have the same\n name.\n\n See Also\n --------\n quarter : Return the quarter of the date.\n is_quarter_start : Similar property indicating the quarter start.\n\n Examples\n --------\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on DatetimeIndex.\n\n >>> df = pd.DataFrame({'dates': pd.date_range(\"2017-03-30\",\n ... periods=4)})\n >>> df.assign(quarter=df.dates.dt.quarter,\n ... is_quarter_end=df.dates.dt.is_quarter_end)\n dates quarter is_quarter_end\n 0 2017-03-30 1 False\n 1 2017-03-31 1 True\n 2 2017-04-01 2 False\n 3 2017-04-02 2 False\n\n >>> idx = pd.date_range('2017-03-30', periods=4)\n >>> idx\n DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],\n dtype='datetime64[ns]', freq='D')\n\n >>> idx.is_quarter_end\n array([False, True, False, False])\n \"\"\")\n is_year_start = _field_accessor(\n 'is_year_start',\n 'is_year_start',\n \"\"\"\n Indicate whether the date is the first day of a year.\n\n Returns\n -------\n Series or DatetimeIndex\n The same type as the original data with boolean values. Series will\n have the same name and index. DatetimeIndex will have the same\n name.\n\n See Also\n --------\n is_year_end : Similar property indicating the last day of the year.\n\n Examples\n --------\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on DatetimeIndex.\n\n >>> dates = pd.Series(pd.date_range(\"2017-12-30\", periods=3))\n >>> dates\n 0 2017-12-30\n 1 2017-12-31\n 2 2018-01-01\n dtype: datetime64[ns]\n\n >>> dates.dt.is_year_start\n 0 False\n 1 False\n 2 True\n dtype: bool\n\n >>> idx = pd.date_range(\"2017-12-30\", periods=3)\n >>> idx\n DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],\n dtype='datetime64[ns]', freq='D')\n\n >>> idx.is_year_start\n array([False, False, True])\n \"\"\")\n is_year_end = _field_accessor(\n 'is_year_end',\n 'is_year_end',\n \"\"\"\n Indicate whether the date is the last day of the year.\n\n Returns\n -------\n Series or DatetimeIndex\n The same type as the original data with boolean values. Series will\n have the same name and index. DatetimeIndex will have the same\n name.\n\n See Also\n --------\n is_year_start : Similar property indicating the start of the year.\n\n Examples\n --------\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on DatetimeIndex.\n\n >>> dates = pd.Series(pd.date_range(\"2017-12-30\", periods=3))\n >>> dates\n 0 2017-12-30\n 1 2017-12-31\n 2 2018-01-01\n dtype: datetime64[ns]\n\n >>> dates.dt.is_year_end\n 0 False\n 1 True\n 2 False\n dtype: bool\n\n >>> idx = pd.date_range(\"2017-12-30\", periods=3)\n >>> idx\n DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],\n dtype='datetime64[ns]', freq='D')\n\n >>> idx.is_year_end\n array([False, True, False])\n \"\"\")\n is_leap_year = _field_accessor(\n 'is_leap_year',\n 'is_leap_year',\n \"\"\"\n Boolean indicator if the date belongs to a leap year.\n\n A leap year is a year, which has 366 days (instead of 365) including\n 29th of February as an intercalary day.\n Leap years are years which are multiples of four with the exception\n of years divisible by 100 but not by 400.\n\n Returns\n -------\n Series or ndarray\n Booleans indicating if dates belong to a leap year.\n\n Examples\n --------\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on DatetimeIndex.\n\n >>> idx = pd.date_range(\"2012-01-01\", \"2015-01-01\", freq=\"Y\")\n >>> idx\n DatetimeIndex(['2012-12-31', '2013-12-31', '2014-12-31'],\n dtype='datetime64[ns]', freq='A-DEC')\n >>> idx.is_leap_year\n array([ True, False, False], dtype=bool)\n\n >>> dates = pd.Series(idx)\n >>> dates_series\n 0 2012-12-31\n 1 2013-12-31\n 2 2014-12-31\n dtype: datetime64[ns]\n >>> dates_series.dt.is_leap_year\n 0 True\n 1 False\n 2 False\n dtype: bool\n \"\"\")\n\n def to_julian_date(self):\n \"\"\"\n Convert Datetime Array to float64 ndarray of Julian Dates.\n 0 Julian date is noon January 1, 4713 BC.\n http://en.wikipedia.org/wiki/Julian_day\n \"\"\"\n\n # http://mysite.verizon.net/aesir_research/date/jdalg2.htm\n year = np.asarray(self.year)\n month = np.asarray(self.month)\n day = np.asarray(self.day)\n testarr = month < 3\n year[testarr] -= 1\n month[testarr] += 12\n return (day +\n np.fix((153 * month - 457) / 5) +\n 365 * year +\n np.floor(year / 4) -\n np.floor(year / 100) +\n np.floor(year / 400) +\n 1721118.5 +\n (self.hour +\n self.minute / 60.0 +\n self.second / 3600.0 +\n self.microsecond / 3600.0 / 1e+6 +\n self.nanosecond / 3600.0 / 1e+9\n ) / 24.0)\n\n\nDatetimeArrayMixin._add_comparison_ops()\n\n\n# -------------------------------------------------------------------\n# Constructor Helpers\n\ndef sequence_to_dt64ns(data, dtype=None, copy=False,\n tz=None,\n dayfirst=False, yearfirst=False, ambiguous='raise'):\n \"\"\"\n Parameters\n ----------\n data : list-like\n dtype : dtype, str, or None, default None\n copy : bool, default False\n tz : tzinfo, str, or None, default None\n dayfirst : bool, default False\n yearfirst : bool, default False\n ambiguous : str, bool, or arraylike, default 'raise'\n See pandas._libs.tslibs.conversion.tz_localize_to_utc\n\n Returns\n -------\n result : numpy.ndarray\n The sequence converted to a numpy array with dtype ``datetime64[ns]``.\n tz : tzinfo or None\n Either the user-provided tzinfo or one inferred from the data.\n inferred_freq : Tick or None\n The inferred frequency of the sequence.\n\n Raises\n ------\n TypeError : PeriodDType data is passed\n \"\"\"\n\n inferred_freq = None\n\n if not hasattr(data, \"dtype\"):\n # e.g. list, tuple\n if np.ndim(data) == 0:\n # i.e. generator\n data = list(data)\n data = np.asarray(data)\n copy = False\n elif isinstance(data, ABCSeries):\n data = data._values\n\n if hasattr(data, \"freq\"):\n # i.e. DatetimeArray/Index\n inferred_freq = data.freq\n\n # if dtype has an embedded tz, capture it\n tz = validate_tz_from_dtype(dtype, tz)\n\n # By this point we are assured to have either a numpy array or Index\n data, copy = maybe_convert_dtype(data, copy)\n\n if is_object_dtype(data) or is_string_dtype(data):\n # TODO: We do not have tests specific to string-dtypes,\n # also complex or categorical or other extension\n copy = False\n if lib.infer_dtype(data) == 'integer':\n data = data.astype(np.int64)\n else:\n # data comes back here as either i8 to denote UTC timestamps\n # or M8[ns] to denote wall times\n data, inferred_tz = objects_to_datetime64ns(\n data, dayfirst=dayfirst, yearfirst=yearfirst)\n tz = maybe_infer_tz(tz, inferred_tz)\n\n if is_datetime64tz_dtype(data):\n tz = maybe_infer_tz(tz, data.tz)\n result = data._data\n\n elif is_datetime64_dtype(data):\n # tz-naive DatetimeArray/Index or ndarray[datetime64]\n data = getattr(data, \"_data\", data)\n if data.dtype != _NS_DTYPE:\n data = conversion.ensure_datetime64ns(data)\n\n if tz is not None:\n # Convert tz-naive to UTC\n tz = timezones.maybe_get_tz(tz)\n data = conversion.tz_localize_to_utc(data.view('i8'), tz,\n ambiguous=ambiguous)\n data = data.view(_NS_DTYPE)\n\n assert data.dtype == _NS_DTYPE, data.dtype\n result = data\n\n else:\n # must be integer dtype otherwise\n # assume this data are epoch timestamps\n if data.dtype != _INT64_DTYPE:\n data = data.astype(np.int64, copy=False)\n result = data.view(_NS_DTYPE)\n\n if copy:\n # TODO: should this be deepcopy?\n result = result.copy()\n\n assert isinstance(result, np.ndarray), type(result)\n assert result.dtype == 'M8[ns]', result.dtype\n\n # We have to call this again after possibly inferring a tz above\n validate_tz_from_dtype(dtype, tz)\n\n return result, tz, inferred_freq\n\n\ndef objects_to_datetime64ns(data, dayfirst, yearfirst,\n utc=False, errors=\"raise\",\n require_iso8601=False, allow_object=False):\n \"\"\"\n Convert data to array of timestamps.\n\n Parameters\n ----------\n data : np.ndarray[object]\n dayfirst : bool\n yearfirst : bool\n utc : bool, default False\n Whether to convert timezone-aware timestamps to UTC\n errors : {'raise', 'ignore', 'coerce'}\n allow_object : bool\n Whether to return an object-dtype ndarray instead of raising if the\n data contains more than one timezone.\n\n Returns\n -------\n result : ndarray\n np.int64 dtype if returned values represent UTC timestamps\n np.datetime64[ns] if returned values represent wall times\n object if mixed timezones\n inferred_tz : tzinfo or None\n\n Raises\n ------\n ValueError : if data cannot be converted to datetimes\n \"\"\"\n assert errors in [\"raise\", \"ignore\", \"coerce\"]\n\n # if str-dtype, convert\n data = np.array(data, copy=False, dtype=np.object_)\n\n try:\n result, tz_parsed = tslib.array_to_datetime(\n data,\n errors=errors,\n utc=utc,\n dayfirst=dayfirst,\n yearfirst=yearfirst,\n require_iso8601=require_iso8601\n )\n except ValueError as e:\n try:\n values, tz_parsed = conversion.datetime_to_datetime64(data)\n # If tzaware, these values represent unix timestamps, so we\n # return them as i8 to distinguish from wall times\n return values.view('i8'), tz_parsed\n except (ValueError, TypeError):\n raise e\n\n if tz_parsed is not None:\n # We can take a shortcut since the datetime64 numpy array\n # is in UTC\n # Return i8 values to denote unix timestamps\n return result.view('i8'), tz_parsed\n elif is_datetime64_dtype(result):\n # returning M8[ns] denotes wall-times; since tz is None\n # the distinction is a thin one\n return result, tz_parsed\n elif is_object_dtype(result):\n # GH#23675 when called via `pd.to_datetime`, returning an object-dtype\n # array is allowed. When called via `pd.DatetimeIndex`, we can\n # only accept datetime64 dtype, so raise TypeError if object-dtype\n # is returned, as that indicates the values can be recognized as\n # datetimes but they have conflicting timezones/awareness\n if allow_object:\n return result, tz_parsed\n raise TypeError(result)\n else: # pragma: no cover\n # GH#23675 this TypeError should never be hit, whereas the TypeError\n # in the object-dtype branch above is reachable.\n raise TypeError(result)\n\n\ndef maybe_convert_dtype(data, copy):\n \"\"\"\n Convert data based on dtype conventions, issuing deprecation warnings\n or errors where appropriate.\n\n Parameters\n ----------\n data : np.ndarray or pd.Index\n copy : bool\n\n Returns\n -------\n data : np.ndarray or pd.Index\n copy : bool\n\n Raises\n ------\n TypeError : PeriodDType data is passed\n \"\"\"\n if is_float_dtype(data):\n # Note: we must cast to datetime64[ns] here in order to treat these\n # as wall-times instead of UTC timestamps.\n data = data.astype(_NS_DTYPE)\n copy = False\n # TODO: deprecate this behavior to instead treat symmetrically\n # with integer dtypes. See discussion in GH#23675\n\n elif is_timedelta64_dtype(data):\n warnings.warn(\"Passing timedelta64-dtype data is deprecated, will \"\n \"raise a TypeError in a future version\",\n FutureWarning, stacklevel=5)\n data = data.view(_NS_DTYPE)\n\n elif is_period_dtype(data):\n # Note: without explicitly raising here, PeriondIndex\n # test_setops.test_join_does_not_recur fails\n raise TypeError(\"Passing PeriodDtype data is invalid. \"\n \"Use `data.to_timestamp()` instead\")\n\n elif is_categorical_dtype(data):\n # GH#18664 preserve tz in going DTI->Categorical->DTI\n # TODO: cases where we need to do another pass through this func,\n # e.g. the categories are timedelta64s\n data = data.categories.take(data.codes, fill_value=NaT)\n copy = False\n\n elif is_extension_type(data) and not is_datetime64tz_dtype(data):\n # Includes categorical\n # TODO: We have no tests for these\n data = np.array(data, dtype=np.object_)\n copy = False\n\n return data, copy\n\n\n# -------------------------------------------------------------------\n# Validation and Inference\n\ndef maybe_infer_tz(tz, inferred_tz):\n \"\"\"\n If a timezone is inferred from data, check that it is compatible with\n the user-provided timezone, if any.\n\n Parameters\n ----------\n tz : tzinfo or None\n inferred_tz : tzinfo or None\n\n Returns\n -------\n tz : tzinfo or None\n\n Raises\n ------\n TypeError : if both timezones are present but do not match\n \"\"\"\n if tz is None:\n tz = inferred_tz\n elif inferred_tz is None:\n pass\n elif not timezones.tz_compare(tz, inferred_tz):\n raise TypeError('data is already tz-aware {inferred_tz}, unable to '\n 'set specified tz: {tz}'\n .format(inferred_tz=inferred_tz, tz=tz))\n return tz\n\n\ndef validate_tz_from_dtype(dtype, tz):\n \"\"\"\n If the given dtype is a DatetimeTZDtype, extract the implied\n tzinfo object from it and check that it does not conflict with the given\n tz.\n\n Parameters\n ----------\n dtype : dtype, str\n tz : None, tzinfo\n\n Returns\n -------\n tz : consensus tzinfo\n\n Raises\n ------\n ValueError : on tzinfo mismatch\n \"\"\"\n if dtype is not None:\n if isinstance(dtype, compat.string_types):\n try:\n dtype = DatetimeTZDtype.construct_from_string(dtype)\n except TypeError:\n # Things like `datetime64[ns]`, which is OK for the\n # constructors, but also nonsense, which should be validated\n # but not by us. We *do* allow non-existent tz errors to\n # go through\n pass\n dtz = getattr(dtype, 'tz', None)\n if dtz is not None:\n if tz is not None and not timezones.tz_compare(tz, dtz):\n raise ValueError(\"cannot supply both a tz and a dtype\"\n \" with a tz\")\n tz = dtz\n\n if tz is not None and is_datetime64_dtype(dtype):\n # We also need to check for the case where the user passed a\n # tz-naive dtype (i.e. datetime64[ns])\n if tz is not None and not timezones.tz_compare(tz, dtz):\n raise ValueError(\"cannot supply both a tz and a \"\n \"timezone-naive dtype (i.e. datetime64[ns]\")\n\n return tz\n\n\ndef _infer_tz_from_endpoints(start, end, tz):\n \"\"\"\n If a timezone is not explicitly given via `tz`, see if one can\n be inferred from the `start` and `end` endpoints. If more than one\n of these inputs provides a timezone, require that they all agree.\n\n Parameters\n ----------\n start : Timestamp\n end : Timestamp\n tz : tzinfo or None\n\n Returns\n -------\n tz : tzinfo or None\n\n Raises\n ------\n TypeError : if start and end timezones do not agree\n \"\"\"\n try:\n inferred_tz = timezones.infer_tzinfo(start, end)\n except Exception:\n raise TypeError('Start and end cannot both be tz-aware with '\n 'different timezones')\n\n inferred_tz = timezones.maybe_get_tz(inferred_tz)\n tz = timezones.maybe_get_tz(tz)\n\n if tz is not None and inferred_tz is not None:\n if not timezones.tz_compare(inferred_tz, tz):\n raise AssertionError(\"Inferred time zone not equal to passed \"\n \"time zone\")\n\n elif inferred_tz is not None:\n tz = inferred_tz\n\n return tz\n\n\ndef _maybe_normalize_endpoints(start, end, normalize):\n _normalized = True\n\n if start is not None:\n if normalize:\n start = normalize_date(start)\n _normalized = True\n else:\n _normalized = _normalized and start.time() == _midnight\n\n if end is not None:\n if normalize:\n end = normalize_date(end)\n _normalized = True\n else:\n _normalized = _normalized and end.time() == _midnight\n\n return start, end, _normalized\n\n\ndef _maybe_localize_point(ts, is_none, is_not_none, freq, tz):\n \"\"\"\n Localize a start or end Timestamp to the timezone of the corresponding\n start or end Timestamp\n\n Parameters\n ----------\n ts : start or end Timestamp to potentially localize\n is_none : argument that should be None\n is_not_none : argument that should not be None\n freq : Tick, DateOffset, or None\n tz : str, timezone object or None\n\n Returns\n -------\n ts : Timestamp\n \"\"\"\n # Make sure start and end are timezone localized if:\n # 1) freq = a Timedelta-like frequency (Tick)\n # 2) freq = None i.e. generating a linspaced range\n if isinstance(freq, Tick) or freq is None:\n localize_args = {'tz': tz, 'ambiguous': False}\n else:\n localize_args = {'tz': None}\n if is_none is None and is_not_none is not None:\n ts = ts.tz_localize(**localize_args)\n return ts\n",
"\"\"\"\ndatetimelike delegation\n\"\"\"\nimport numpy as np\n\nfrom pandas.core.dtypes.common import (\n is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype,\n is_datetime_arraylike, is_integer_dtype, is_list_like, is_period_arraylike,\n is_timedelta64_dtype)\nfrom pandas.core.dtypes.generic import ABCSeries\n\nfrom pandas.core.accessor import PandasDelegate, delegate_names\nfrom pandas.core.algorithms import take_1d\nfrom pandas.core.base import NoNewAttributesMixin, PandasObject\nfrom pandas.core.indexes.datetimes import DatetimeIndex\nfrom pandas.core.indexes.period import PeriodArray\nfrom pandas.core.indexes.timedeltas import TimedeltaIndex\n\n\nclass Properties(PandasDelegate, PandasObject, NoNewAttributesMixin):\n\n def __init__(self, data, orig):\n if not isinstance(data, ABCSeries):\n raise TypeError(\"cannot convert an object of type {0} to a \"\n \"datetimelike index\".format(type(data)))\n\n self._parent = data\n self.orig = orig\n self.name = getattr(data, 'name', None)\n self._freeze()\n\n def _get_values(self):\n data = self._parent\n if is_datetime64_dtype(data.dtype):\n return DatetimeIndex(data, copy=False, name=self.name)\n\n elif is_datetime64tz_dtype(data.dtype):\n return DatetimeIndex(data, copy=False, name=self.name)\n\n elif is_timedelta64_dtype(data.dtype):\n return TimedeltaIndex(data, copy=False, name=self.name)\n\n else:\n if is_period_arraylike(data):\n # TODO: use to_period_array\n return PeriodArray(data, copy=False)\n if is_datetime_arraylike(data):\n return DatetimeIndex(data, copy=False, name=self.name)\n\n raise TypeError(\"cannot convert an object of type {0} to a \"\n \"datetimelike index\".format(type(data)))\n\n def _delegate_property_get(self, name):\n from pandas import Series\n values = self._get_values()\n\n result = getattr(values, name)\n\n # maybe need to upcast (ints)\n if isinstance(result, np.ndarray):\n if is_integer_dtype(result):\n result = result.astype('int64')\n elif not is_list_like(result):\n return result\n\n result = np.asarray(result)\n\n # blow up if we operate on categories\n if self.orig is not None:\n result = take_1d(result, self.orig.cat.codes)\n index = self.orig.index\n else:\n index = self._parent.index\n # return the result as a Series, which is by definition a copy\n result = Series(result, index=index, name=self.name)\n\n # setting this object will show a SettingWithCopyWarning/Error\n result._is_copy = (\"modifications to a property of a datetimelike \"\n \"object are not supported and are discarded. \"\n \"Change values on the original.\")\n\n return result\n\n def _delegate_property_set(self, name, value, *args, **kwargs):\n raise ValueError(\"modifications to a property of a datetimelike \"\n \"object are not supported. Change values on the \"\n \"original.\")\n\n def _delegate_method(self, name, *args, **kwargs):\n from pandas import Series\n values = self._get_values()\n\n method = getattr(values, name)\n result = method(*args, **kwargs)\n\n if not is_list_like(result):\n return result\n\n result = Series(result, index=self._parent.index, name=self.name)\n\n # setting this object will show a SettingWithCopyWarning/Error\n result._is_copy = (\"modifications to a method of a datetimelike \"\n \"object are not supported and are discarded. \"\n \"Change values on the original.\")\n\n return result\n\n\n@delegate_names(delegate=DatetimeIndex,\n accessors=DatetimeIndex._datetimelike_ops,\n typ=\"property\")\n@delegate_names(delegate=DatetimeIndex,\n accessors=DatetimeIndex._datetimelike_methods,\n typ=\"method\")\nclass DatetimeProperties(Properties):\n \"\"\"\n Accessor object for datetimelike properties of the Series values.\n\n Examples\n --------\n >>> s.dt.hour\n >>> s.dt.second\n >>> s.dt.quarter\n\n Returns a Series indexed like the original Series.\n Raises TypeError if the Series does not contain datetimelike values.\n \"\"\"\n\n def to_pydatetime(self):\n \"\"\"\n Return the data as an array of native Python datetime objects.\n\n Timezone information is retained if present.\n\n .. warning::\n\n Python's datetime uses microsecond resolution, which is lower than\n pandas (nanosecond). The values are truncated.\n\n Returns\n -------\n numpy.ndarray\n object dtype array containing native Python datetime objects.\n\n See Also\n --------\n datetime.datetime : Standard library value for a datetime.\n\n Examples\n --------\n >>> s = pd.Series(pd.date_range('20180310', periods=2))\n >>> s\n 0 2018-03-10\n 1 2018-03-11\n dtype: datetime64[ns]\n\n >>> s.dt.to_pydatetime()\n array([datetime.datetime(2018, 3, 10, 0, 0),\n datetime.datetime(2018, 3, 11, 0, 0)], dtype=object)\n\n pandas' nanosecond precision is truncated to microseconds.\n\n >>> s = pd.Series(pd.date_range('20180310', periods=2, freq='ns'))\n >>> s\n 0 2018-03-10 00:00:00.000000000\n 1 2018-03-10 00:00:00.000000001\n dtype: datetime64[ns]\n\n >>> s.dt.to_pydatetime()\n array([datetime.datetime(2018, 3, 10, 0, 0),\n datetime.datetime(2018, 3, 10, 0, 0)], dtype=object)\n \"\"\"\n return self._get_values().to_pydatetime()\n\n @property\n def freq(self):\n return self._get_values().inferred_freq\n\n\n@delegate_names(delegate=TimedeltaIndex,\n accessors=TimedeltaIndex._datetimelike_ops,\n typ=\"property\")\n@delegate_names(delegate=TimedeltaIndex,\n accessors=TimedeltaIndex._datetimelike_methods,\n typ=\"method\")\nclass TimedeltaProperties(Properties):\n \"\"\"\n Accessor object for datetimelike properties of the Series values.\n\n Examples\n --------\n >>> s.dt.hours\n >>> s.dt.seconds\n\n Returns a Series indexed like the original Series.\n Raises TypeError if the Series does not contain datetimelike values.\n \"\"\"\n\n def to_pytimedelta(self):\n \"\"\"\n Return an array of native `datetime.timedelta` objects.\n\n Python's standard `datetime` library uses a different representation\n timedelta's. This method converts a Series of pandas Timedeltas\n to `datetime.timedelta` format with the same length as the original\n Series.\n\n Returns\n -------\n a : numpy.ndarray\n 1D array containing data with `datetime.timedelta` type.\n\n See Also\n --------\n datetime.timedelta\n\n Examples\n --------\n >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='d'))\n >>> s\n 0 0 days\n 1 1 days\n 2 2 days\n 3 3 days\n 4 4 days\n dtype: timedelta64[ns]\n\n >>> s.dt.to_pytimedelta()\n array([datetime.timedelta(0), datetime.timedelta(1),\n datetime.timedelta(2), datetime.timedelta(3),\n datetime.timedelta(4)], dtype=object)\n \"\"\"\n return self._get_values().to_pytimedelta()\n\n @property\n def components(self):\n \"\"\"\n Return a Dataframe of the components of the Timedeltas.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='s'))\n >>> s\n 0 00:00:00\n 1 00:00:01\n 2 00:00:02\n 3 00:00:03\n 4 00:00:04\n dtype: timedelta64[ns]\n >>> s.dt.components\n days hours minutes seconds milliseconds microseconds nanoseconds\n 0 0 0 0 0 0 0 0\n 1 0 0 0 1 0 0 0\n 2 0 0 0 2 0 0 0\n 3 0 0 0 3 0 0 0\n 4 0 0 0 4 0 0 0\n \"\"\" # noqa: E501\n return self._get_values().components.set_index(self._parent.index)\n\n @property\n def freq(self):\n return self._get_values().inferred_freq\n\n\n@delegate_names(delegate=PeriodArray,\n accessors=PeriodArray._datetimelike_ops,\n typ=\"property\")\n@delegate_names(delegate=PeriodArray,\n accessors=PeriodArray._datetimelike_methods,\n typ=\"method\")\nclass PeriodProperties(Properties):\n \"\"\"\n Accessor object for datetimelike properties of the Series values.\n\n Examples\n --------\n >>> s.dt.hour\n >>> s.dt.second\n >>> s.dt.quarter\n\n Returns a Series indexed like the original Series.\n Raises TypeError if the Series does not contain datetimelike values.\n \"\"\"\n\n\nclass CombinedDatetimelikeProperties(DatetimeProperties,\n TimedeltaProperties, PeriodProperties):\n\n def __new__(cls, data):\n # CombinedDatetimelikeProperties isn't really instantiated. Instead\n # we need to choose which parent (datetime or timedelta) is\n # appropriate. Since we're checking the dtypes anyway, we'll just\n # do all the validation here.\n from pandas import Series\n\n if not isinstance(data, Series):\n raise TypeError(\"cannot convert an object of type {0} to a \"\n \"datetimelike index\".format(type(data)))\n\n orig = data if is_categorical_dtype(data) else None\n if orig is not None:\n data = Series(orig.values.categories,\n name=orig.name,\n copy=False)\n\n try:\n if is_datetime64_dtype(data.dtype):\n return DatetimeProperties(data, orig)\n elif is_datetime64tz_dtype(data.dtype):\n return DatetimeProperties(data, orig)\n elif is_timedelta64_dtype(data.dtype):\n return TimedeltaProperties(data, orig)\n elif is_period_arraylike(data):\n return PeriodProperties(data, orig)\n elif is_datetime_arraylike(data):\n return DatetimeProperties(data, orig)\n except Exception:\n pass # we raise an attribute error anyway\n\n raise AttributeError(\"Can only use .dt accessor with datetimelike \"\n \"values\")\n"
] | [
[
"pandas._libs.tslibs.conversion.datetime_to_datetime64",
"pandas.tseries.frequencies.to_offset",
"pandas._libs.tslibs.Timestamp",
"pandas._libs.tslibs.timezones.maybe_get_tz",
"numpy.linspace",
"numpy.asarray",
"pandas._libs.tslibs.conversion.pydt_to_i8",
"pandas._libs.tslibs.fields.get_start_end_field",
"pandas.core.dtypes.dtypes.DatetimeTZDtype",
"pandas._libs.lib.is_scalar",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas._libs.tslibs.fields.get_date_field",
"pandas._libs.tslibs.conversion.ensure_datetime64ns",
"pandas.core.dtypes.common.is_datetime64_dtype",
"pandas._libs.tslibs.timezones.infer_tzinfo",
"pandas._libs.tslib.ints_to_pydatetime",
"numpy.fix",
"pandas.core.common.values_from_object",
"pandas._libs.tslibs.fields.get_date_name_field",
"pandas.core.arrays.datetimelike.validate_inferred_freq",
"pandas.core.arrays.PeriodArray._from_datetime64",
"pandas._libs.tslibs.conversion.is_date_array_normalized",
"pandas.core.arrays._ranges.generate_regular_range",
"pandas.tseries.frequencies.get_period_alias",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.compat.set_function_name",
"pandas.core.dtypes.common.is_string_dtype",
"pandas._libs.tslibs.conversion.normalize_i8_timestamps",
"pandas._libs.tslibs.timezones.tz_compare",
"pandas.core.arrays.datetimelike.maybe_infer_freq",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.util._decorators.Appender",
"pandas._libs.tslibs.resolution.resolution",
"pandas.core.algorithms.checked_add_with_arr",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas.core.arrays.datetimelike.validate_endpoints",
"pandas._libs.tslibs.timezones.tz_standardize",
"pandas._libs.tslib.format_array_from_datetime",
"pandas.core.dtypes.common.is_period_dtype",
"pandas.core.dtypes.dtypes.DatetimeTZDtype.construct_from_string",
"numpy.ndim",
"numpy.floor",
"pandas._libs.tslibs.normalize_date",
"numpy.array",
"pandas.core.ops.invalid_comparison",
"pandas.core.common.count_not_none",
"pandas.core.arrays.timedeltas.TimedeltaArrayMixin",
"pandas._libs.tslib.array_to_datetime",
"pandas.io.formats.format._get_format_datetime64_from_values",
"pandas._libs.tslibs.conversion.tz_convert",
"pandas._libs.tslibs.timezones.is_utc",
"pandas.core.dtypes.common.is_extension_type",
"pandas._libs.tslibs.timezones.get_timezone",
"pandas.core.dtypes.common.is_object_dtype",
"pandas._libs.tslibs.conversion.tz_localize_to_utc",
"pandas.core.dtypes.missing.isna",
"pandas._libs.lib.infer_dtype",
"pandas._libs.lib.item_from_zerodim",
"pandas.core.arrays.datetimelike.validate_periods",
"pandas.core.dtypes.common.is_int64_dtype"
],
[
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.core.indexes.datetimes.DatetimeIndex",
"pandas.core.dtypes.common.is_list_like",
"pandas.Series",
"pandas.core.indexes.period.PeriodArray",
"numpy.asarray",
"pandas.core.algorithms.take_1d",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas.core.dtypes.common.is_datetime_arraylike",
"pandas.core.accessor.delegate_names",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas.core.indexes.timedeltas.TimedeltaIndex",
"pandas.core.dtypes.common.is_period_arraylike",
"pandas.core.dtypes.common.is_datetime64_dtype",
"pandas.core.dtypes.common.is_categorical_dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.24"
],
"scipy": [],
"tensorflow": []
}
] |
Sibasish-Padhy/Algo-ScriptML | [
"c0d80a2968ffac2c8d8e3f891144dd91da353f5a"
] | [
"code_for_batchgradient_descent/venv/Lib/site-packages/matplotlib/font_manager.py"
] | [
"\"\"\"\nA module for finding, managing, and using fonts across platforms.\n\nThis module provides a single `FontManager` instance that can\nbe shared across backends and platforms. The `findfont`\nfunction returns the best TrueType (TTF) font file in the local or\nsystem font path that matches the specified `FontProperties`\ninstance. The `FontManager` also handles Adobe Font Metrics\n(AFM) font files for use by the PostScript backend.\n\nThe design is based on the `W3C Cascading Style Sheet, Level 1 (CSS1)\nfont specification <http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_.\nFuture versions may implement the Level 2 or 2.1 specifications.\n\"\"\"\n\n# KNOWN ISSUES\n#\n# - documentation\n# - font variant is untested\n# - font stretch is incomplete\n# - font size is incomplete\n# - default font algorithm needs improvement and testing\n# - setWeights function needs improvement\n# - 'light' is an invalid weight value, remove it.\n\nfrom functools import lru_cache\nimport json\nimport logging\nfrom numbers import Number\nimport os\nfrom pathlib import Path\nimport re\nimport subprocess\nimport sys\ntry:\n import threading\n from threading import Timer\nexcept ImportError:\n import dummy_threading as threading\n from dummy_threading import Timer\n\nimport matplotlib as mpl\nfrom matplotlib import _api, afm, cbook, ft2font, rcParams\nfrom matplotlib.fontconfig_pattern import (\n parse_fontconfig_pattern, generate_fontconfig_pattern)\nfrom matplotlib.rcsetup import _validators\n\n_log = logging.getLogger(__name__)\n\nfont_scalings = {\n 'xx-small': 0.579,\n 'x-small': 0.694,\n 'small': 0.833,\n 'medium': 1.0,\n 'large': 1.200,\n 'x-large': 1.440,\n 'xx-large': 1.728,\n 'larger': 1.2,\n 'smaller': 0.833,\n None: 1.0,\n}\nstretch_dict = {\n 'ultra-condensed': 100,\n 'extra-condensed': 200,\n 'condensed': 300,\n 'semi-condensed': 400,\n 'normal': 500,\n 'semi-expanded': 600,\n 'semi-extended': 600,\n 'expanded': 700,\n 'extended': 700,\n 'extra-expanded': 800,\n 'extra-extended': 800,\n 'ultra-expanded': 900,\n 'ultra-extended': 900,\n}\nweight_dict = {\n 'ultralight': 100,\n 'light': 200,\n 'normal': 400,\n 'regular': 400,\n 'book': 400,\n 'medium': 500,\n 'roman': 500,\n 'semibold': 600,\n 'demibold': 600,\n 'demi': 600,\n 'bold': 700,\n 'heavy': 800,\n 'extra bold': 800,\n 'black': 900,\n}\n_weight_regexes = [\n # From fontconfig's FcFreeTypeQueryFaceInternal; not the same as\n # weight_dict!\n (\"thin\", 100),\n (\"extralight\", 200),\n (\"ultralight\", 200),\n (\"demilight\", 350),\n (\"semilight\", 350),\n (\"light\", 300), # Needs to come *after* demi/semilight!\n (\"book\", 380),\n (\"regular\", 400),\n (\"normal\", 400),\n (\"medium\", 500),\n (\"demibold\", 600),\n (\"demi\", 600),\n (\"semibold\", 600),\n (\"extrabold\", 800),\n (\"superbold\", 800),\n (\"ultrabold\", 800),\n (\"bold\", 700), # Needs to come *after* extra/super/ultrabold!\n (\"ultrablack\", 1000),\n (\"superblack\", 1000),\n (\"extrablack\", 1000),\n (r\"\\bultra\", 1000),\n (\"black\", 900), # Needs to come *after* ultra/super/extrablack!\n (\"heavy\", 900),\n]\nfont_family_aliases = {\n 'serif',\n 'sans-serif',\n 'sans serif',\n 'cursive',\n 'fantasy',\n 'monospace',\n 'sans',\n}\n\n\n# OS Font paths\nMSFolders = \\\n r'Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders'\nMSFontDirectories = [\n r'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Fonts',\n r'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Fonts']\nMSUserFontDirectories = [\n str(Path.home() / 'AppData/Local/Microsoft/Windows/Fonts'),\n str(Path.home() / 'AppData/Roaming/Microsoft/Windows/Fonts'),\n]\nX11FontDirectories = [\n # an old standard installation point\n \"/usr/X11R6/lib/X11/fonts/TTF/\",\n \"/usr/X11/lib/X11/fonts\",\n # here is the new standard location for fonts\n \"/usr/share/fonts/\",\n # documented as a good place to install new fonts\n \"/usr/local/share/fonts/\",\n # common application, not really useful\n \"/usr/lib/openoffice/share/fonts/truetype/\",\n # user fonts\n str((Path(os.environ.get('XDG_DATA_HOME') or Path.home() / \".local/share\"))\n / \"fonts\"),\n str(Path.home() / \".fonts\"),\n]\nOSXFontDirectories = [\n \"/Library/Fonts/\",\n \"/Network/Library/Fonts/\",\n \"/System/Library/Fonts/\",\n # fonts installed via MacPorts\n \"/opt/local/share/fonts\",\n # user fonts\n str(Path.home() / \"Library/Fonts\"),\n]\n\n\n@lru_cache(64)\ndef _cached_realpath(path):\n return os.path.realpath(path)\n\n\ndef get_fontext_synonyms(fontext):\n \"\"\"\n Return a list of file extensions extensions that are synonyms for\n the given file extension *fileext*.\n \"\"\"\n return {\n 'afm': ['afm'],\n 'otf': ['otf', 'ttc', 'ttf'],\n 'ttc': ['otf', 'ttc', 'ttf'],\n 'ttf': ['otf', 'ttc', 'ttf'],\n }[fontext]\n\n\ndef list_fonts(directory, extensions):\n \"\"\"\n Return a list of all fonts matching any of the extensions, found\n recursively under the directory.\n \"\"\"\n extensions = [\".\" + ext for ext in extensions]\n return [os.path.join(dirpath, filename)\n # os.walk ignores access errors, unlike Path.glob.\n for dirpath, _, filenames in os.walk(directory)\n for filename in filenames\n if Path(filename).suffix.lower() in extensions]\n\n\ndef win32FontDirectory():\n r\"\"\"\n Return the user-specified font directory for Win32. This is\n looked up from the registry key ::\n\n \\\\HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\\Fonts\n\n If the key is not found, ``%WINDIR%\\Fonts`` will be returned.\n \"\"\"\n import winreg\n try:\n with winreg.OpenKey(winreg.HKEY_CURRENT_USER, MSFolders) as user:\n return winreg.QueryValueEx(user, 'Fonts')[0]\n except OSError:\n return os.path.join(os.environ['WINDIR'], 'Fonts')\n\n\ndef _win32RegistryFonts(reg_domain, base_dir):\n r\"\"\"\n Search for fonts in the Windows registry.\n\n Parameters\n ----------\n reg_domain : int\n The top level registry domain (e.g. HKEY_LOCAL_MACHINE).\n\n base_dir : str\n The path to the folder where the font files are usually located (e.g.\n C:\\Windows\\Fonts). If only the filename of the font is stored in the\n registry, the absolute path is built relative to this base directory.\n\n Returns\n -------\n `set`\n `pathlib.Path` objects with the absolute path to the font files found.\n\n \"\"\"\n import winreg\n items = set()\n\n for reg_path in MSFontDirectories:\n try:\n with winreg.OpenKey(reg_domain, reg_path) as local:\n for j in range(winreg.QueryInfoKey(local)[1]):\n # value may contain the filename of the font or its\n # absolute path.\n key, value, tp = winreg.EnumValue(local, j)\n if not isinstance(value, str):\n continue\n\n # Work around for https://bugs.python.org/issue25778, which\n # is fixed in Py>=3.6.1.\n value = value.split(\"\\0\", 1)[0]\n\n try:\n # If value contains already an absolute path, then it\n # is not changed further.\n path = Path(base_dir, value).resolve()\n except RuntimeError:\n # Don't fail with invalid entries.\n continue\n\n items.add(path)\n except (OSError, MemoryError):\n continue\n\n return items\n\n\ndef win32InstalledFonts(directory=None, fontext='ttf'):\n \"\"\"\n Search for fonts in the specified font directory, or use the\n system directories if none given. Additionally, it is searched for user\n fonts installed. A list of TrueType font filenames are returned by default,\n or AFM fonts if *fontext* == 'afm'.\n \"\"\"\n import winreg\n\n if directory is None:\n directory = win32FontDirectory()\n\n fontext = ['.' + ext for ext in get_fontext_synonyms(fontext)]\n\n items = set()\n\n # System fonts\n items.update(_win32RegistryFonts(winreg.HKEY_LOCAL_MACHINE, directory))\n\n # User fonts\n for userdir in MSUserFontDirectories:\n items.update(_win32RegistryFonts(winreg.HKEY_CURRENT_USER, userdir))\n\n # Keep only paths with matching file extension.\n return [str(path) for path in items if path.suffix.lower() in fontext]\n\n\n@lru_cache()\ndef _call_fc_list():\n \"\"\"Cache and list the font filenames known to `fc-list`.\"\"\"\n try:\n if b'--format' not in subprocess.check_output(['fc-list', '--help']):\n _log.warning( # fontconfig 2.7 implemented --format.\n 'Matplotlib needs fontconfig>=2.7 to query system fonts.')\n return []\n out = subprocess.check_output(['fc-list', '--format=%{file}\\\\n'])\n except (OSError, subprocess.CalledProcessError):\n return []\n return [os.fsdecode(fname) for fname in out.split(b'\\n')]\n\n\ndef get_fontconfig_fonts(fontext='ttf'):\n \"\"\"List font filenames known to `fc-list` having the given extension.\"\"\"\n fontext = ['.' + ext for ext in get_fontext_synonyms(fontext)]\n return [fname for fname in _call_fc_list()\n if Path(fname).suffix.lower() in fontext]\n\n\ndef findSystemFonts(fontpaths=None, fontext='ttf'):\n \"\"\"\n Search for fonts in the specified font paths. If no paths are\n given, will use a standard set of system paths, as well as the\n list of fonts tracked by fontconfig if fontconfig is installed and\n available. A list of TrueType fonts are returned by default with\n AFM fonts as an option.\n \"\"\"\n fontfiles = set()\n fontexts = get_fontext_synonyms(fontext)\n\n if fontpaths is None:\n if sys.platform == 'win32':\n fontpaths = MSUserFontDirectories + [win32FontDirectory()]\n # now get all installed fonts directly...\n fontfiles.update(win32InstalledFonts(fontext=fontext))\n else:\n fontpaths = X11FontDirectories\n if sys.platform == 'darwin':\n fontpaths = [*X11FontDirectories, *OSXFontDirectories]\n fontfiles.update(get_fontconfig_fonts(fontext))\n\n elif isinstance(fontpaths, str):\n fontpaths = [fontpaths]\n\n for path in fontpaths:\n fontfiles.update(map(os.path.abspath, list_fonts(path, fontexts)))\n\n return [fname for fname in fontfiles if os.path.exists(fname)]\n\n\nclass FontEntry:\n \"\"\"\n A class for storing Font properties. It is used when populating\n the font lookup dictionary.\n \"\"\"\n def __init__(self,\n fname ='',\n name ='',\n style ='normal',\n variant='normal',\n weight ='normal',\n stretch='normal',\n size ='medium',\n ):\n self.fname = fname\n self.name = name\n self.style = style\n self.variant = variant\n self.weight = weight\n self.stretch = stretch\n try:\n self.size = str(float(size))\n except ValueError:\n self.size = size\n\n def __repr__(self):\n return \"<Font '%s' (%s) %s %s %s %s>\" % (\n self.name, os.path.basename(self.fname), self.style, self.variant,\n self.weight, self.stretch)\n\n\ndef ttfFontProperty(font):\n \"\"\"\n Extract information from a TrueType font file.\n\n Parameters\n ----------\n font : `.FT2Font`\n The TrueType font file from which information will be extracted.\n\n Returns\n -------\n `FontEntry`\n The extracted font properties.\n\n \"\"\"\n name = font.family_name\n\n # Styles are: italic, oblique, and normal (default)\n\n sfnt = font.get_sfnt()\n mac_key = (1, # platform: macintosh\n 0, # id: roman\n 0) # langid: english\n ms_key = (3, # platform: microsoft\n 1, # id: unicode_cs\n 0x0409) # langid: english_united_states\n\n # These tables are actually mac_roman-encoded, but mac_roman support may be\n # missing in some alternative Python implementations and we are only going\n # to look for ASCII substrings, where any ASCII-compatible encoding works\n # - or big-endian UTF-16, since important Microsoft fonts use that.\n sfnt2 = (sfnt.get((*mac_key, 2), b'').decode('latin-1').lower() or\n sfnt.get((*ms_key, 2), b'').decode('utf_16_be').lower())\n sfnt4 = (sfnt.get((*mac_key, 4), b'').decode('latin-1').lower() or\n sfnt.get((*ms_key, 4), b'').decode('utf_16_be').lower())\n\n if sfnt4.find('oblique') >= 0:\n style = 'oblique'\n elif sfnt4.find('italic') >= 0:\n style = 'italic'\n elif sfnt2.find('regular') >= 0:\n style = 'normal'\n elif font.style_flags & ft2font.ITALIC:\n style = 'italic'\n else:\n style = 'normal'\n\n # Variants are: small-caps and normal (default)\n\n # !!!! Untested\n if name.lower() in ['capitals', 'small-caps']:\n variant = 'small-caps'\n else:\n variant = 'normal'\n\n # The weight-guessing algorithm is directly translated from fontconfig\n # 2.13.1's FcFreeTypeQueryFaceInternal (fcfreetype.c).\n wws_subfamily = 22\n typographic_subfamily = 16\n font_subfamily = 2\n styles = [\n sfnt.get((*mac_key, wws_subfamily), b'').decode('latin-1'),\n sfnt.get((*mac_key, typographic_subfamily), b'').decode('latin-1'),\n sfnt.get((*mac_key, font_subfamily), b'').decode('latin-1'),\n sfnt.get((*ms_key, wws_subfamily), b'').decode('utf-16-be'),\n sfnt.get((*ms_key, typographic_subfamily), b'').decode('utf-16-be'),\n sfnt.get((*ms_key, font_subfamily), b'').decode('utf-16-be'),\n ]\n styles = [*filter(None, styles)] or [font.style_name]\n\n def get_weight(): # From fontconfig's FcFreeTypeQueryFaceInternal.\n # OS/2 table weight.\n os2 = font.get_sfnt_table(\"OS/2\")\n if os2 and os2[\"version\"] != 0xffff:\n return os2[\"usWeightClass\"]\n # PostScript font info weight.\n try:\n ps_font_info_weight = (\n font.get_ps_font_info()[\"weight\"].replace(\" \", \"\") or \"\")\n except ValueError:\n pass\n else:\n for regex, weight in _weight_regexes:\n if re.fullmatch(regex, ps_font_info_weight, re.I):\n return weight\n # Style name weight.\n for style in styles:\n style = style.replace(\" \", \"\")\n for regex, weight in _weight_regexes:\n if re.search(regex, style, re.I):\n return weight\n if font.style_flags & ft2font.BOLD:\n return 700 # \"bold\"\n return 500 # \"medium\", not \"regular\"!\n\n weight = int(get_weight())\n\n # Stretch can be absolute and relative\n # Absolute stretches are: ultra-condensed, extra-condensed, condensed,\n # semi-condensed, normal, semi-expanded, expanded, extra-expanded,\n # and ultra-expanded.\n # Relative stretches are: wider, narrower\n # Child value is: inherit\n\n if any(word in sfnt4 for word in ['narrow', 'condensed', 'cond']):\n stretch = 'condensed'\n elif 'demi cond' in sfnt4:\n stretch = 'semi-condensed'\n elif any(word in sfnt4 for word in ['wide', 'expanded', 'extended']):\n stretch = 'expanded'\n else:\n stretch = 'normal'\n\n # Sizes can be absolute and relative.\n # Absolute sizes are: xx-small, x-small, small, medium, large, x-large,\n # and xx-large.\n # Relative sizes are: larger, smaller\n # Length value is an absolute font size, e.g., 12pt\n # Percentage values are in 'em's. Most robust specification.\n\n if not font.scalable:\n raise NotImplementedError(\"Non-scalable fonts are not supported\")\n size = 'scalable'\n\n return FontEntry(font.fname, name, style, variant, weight, stretch, size)\n\n\ndef afmFontProperty(fontpath, font):\n \"\"\"\n Extract information from an AFM font file.\n\n Parameters\n ----------\n font : `.AFM`\n The AFM font file from which information will be extracted.\n\n Returns\n -------\n `FontEntry`\n The extracted font properties.\n \"\"\"\n\n name = font.get_familyname()\n fontname = font.get_fontname().lower()\n\n # Styles are: italic, oblique, and normal (default)\n\n if font.get_angle() != 0 or 'italic' in name.lower():\n style = 'italic'\n elif 'oblique' in name.lower():\n style = 'oblique'\n else:\n style = 'normal'\n\n # Variants are: small-caps and normal (default)\n\n # !!!! Untested\n if name.lower() in ['capitals', 'small-caps']:\n variant = 'small-caps'\n else:\n variant = 'normal'\n\n weight = font.get_weight().lower()\n if weight not in weight_dict:\n weight = 'normal'\n\n # Stretch can be absolute and relative\n # Absolute stretches are: ultra-condensed, extra-condensed, condensed,\n # semi-condensed, normal, semi-expanded, expanded, extra-expanded,\n # and ultra-expanded.\n # Relative stretches are: wider, narrower\n # Child value is: inherit\n if 'demi cond' in fontname:\n stretch = 'semi-condensed'\n elif any(word in fontname for word in ['narrow', 'cond']):\n stretch = 'condensed'\n elif any(word in fontname for word in ['wide', 'expanded', 'extended']):\n stretch = 'expanded'\n else:\n stretch = 'normal'\n\n # Sizes can be absolute and relative.\n # Absolute sizes are: xx-small, x-small, small, medium, large, x-large,\n # and xx-large.\n # Relative sizes are: larger, smaller\n # Length value is an absolute font size, e.g., 12pt\n # Percentage values are in 'em's. Most robust specification.\n\n # All AFM fonts are apparently scalable.\n\n size = 'scalable'\n\n return FontEntry(fontpath, name, style, variant, weight, stretch, size)\n\n\nclass FontProperties:\n \"\"\"\n A class for storing and manipulating font properties.\n\n The font properties are the six properties described in the\n `W3C Cascading Style Sheet, Level 1\n <http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ font\n specification and *math_fontfamily* for math fonts:\n\n - family: A list of font names in decreasing order of priority.\n The items may include a generic font family name, either\n 'sans-serif' (default), 'serif', 'cursive', 'fantasy', or 'monospace'.\n In that case, the actual font to be used will be looked up\n from the associated rcParam.\n\n - style: Either 'normal' (default), 'italic' or 'oblique'.\n\n - variant: Either 'normal' (default) or 'small-caps'.\n\n - stretch: A numeric value in the range 0-1000 or one of\n 'ultra-condensed', 'extra-condensed', 'condensed',\n 'semi-condensed', 'normal' (default), 'semi-expanded', 'expanded',\n 'extra-expanded' or 'ultra-expanded'.\n\n - weight: A numeric value in the range 0-1000 or one of\n 'ultralight', 'light', 'normal' (default), 'regular', 'book', 'medium',\n 'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy',\n 'extra bold', 'black'.\n\n - size: Either an relative value of 'xx-small', 'x-small',\n 'small', 'medium', 'large', 'x-large', 'xx-large' or an\n absolute font size, e.g., 10 (default).\n\n - math_fontfamily: The family of fonts used to render math text; overrides\n :rc:`mathtext.fontset`. Supported values are the same as the ones\n supported by :rc:`mathtext.fontset`: 'dejavusans', 'dejavuserif', 'cm',\n 'stix', 'stixsans' and 'custom'.\n\n Alternatively, a font may be specified using the absolute path to a font\n file, by using the *fname* kwarg. However, in this case, it is typically\n simpler to just pass the path (as a `pathlib.Path`, not a `str`) to the\n *font* kwarg of the `.Text` object.\n\n The preferred usage of font sizes is to use the relative values,\n e.g., 'large', instead of absolute font sizes, e.g., 12. This\n approach allows all text sizes to be made larger or smaller based\n on the font manager's default font size.\n\n This class will also accept a fontconfig_ pattern_, if it is the only\n argument provided. This support does not depend on fontconfig; we are\n merely borrowing its pattern syntax for use here.\n\n .. _fontconfig: https://www.freedesktop.org/wiki/Software/fontconfig/\n .. _pattern:\n https://www.freedesktop.org/software/fontconfig/fontconfig-user.html\n\n Note that Matplotlib's internal font manager and fontconfig use a\n different algorithm to lookup fonts, so the results of the same pattern\n may be different in Matplotlib than in other applications that use\n fontconfig.\n \"\"\"\n\n def __init__(self,\n family = None,\n style = None,\n variant= None,\n weight = None,\n stretch= None,\n size = None,\n fname = None, # if set, it's a hardcoded filename to use\n math_fontfamily = None,\n ):\n self._family = _normalize_font_family(rcParams['font.family'])\n self._slant = rcParams['font.style']\n self._variant = rcParams['font.variant']\n self._weight = rcParams['font.weight']\n self._stretch = rcParams['font.stretch']\n self._size = rcParams['font.size']\n self._file = None\n self._math_fontfamily = None\n\n if isinstance(family, str):\n # Treat family as a fontconfig pattern if it is the only\n # parameter provided.\n if (style is None and variant is None and weight is None and\n stretch is None and size is None and fname is None):\n self.set_fontconfig_pattern(family)\n return\n\n self.set_family(family)\n self.set_style(style)\n self.set_variant(variant)\n self.set_weight(weight)\n self.set_stretch(stretch)\n self.set_file(fname)\n self.set_size(size)\n self.set_math_fontfamily(math_fontfamily)\n\n @classmethod\n def _from_any(cls, arg):\n \"\"\"\n Generic constructor which can build a `.FontProperties` from any of the\n following:\n\n - a `.FontProperties`: it is passed through as is;\n - `None`: a `.FontProperties` using rc values is used;\n - an `os.PathLike`: it is used as path to the font file;\n - a `str`: it is parsed as a fontconfig pattern;\n - a `dict`: it is passed as ``**kwargs`` to `.FontProperties`.\n \"\"\"\n if isinstance(arg, cls):\n return arg\n elif arg is None:\n return cls()\n elif isinstance(arg, os.PathLike):\n return cls(fname=arg)\n elif isinstance(arg, str):\n return cls(arg)\n else:\n return cls(**arg)\n\n def __hash__(self):\n l = (tuple(self.get_family()),\n self.get_slant(),\n self.get_variant(),\n self.get_weight(),\n self.get_stretch(),\n self.get_size_in_points(),\n self.get_file(),\n self.get_math_fontfamily())\n return hash(l)\n\n def __eq__(self, other):\n return hash(self) == hash(other)\n\n def __str__(self):\n return self.get_fontconfig_pattern()\n\n def get_family(self):\n \"\"\"\n Return a list of font names that comprise the font family.\n \"\"\"\n return self._family\n\n def get_name(self):\n \"\"\"\n Return the name of the font that best matches the font properties.\n \"\"\"\n return get_font(findfont(self)).family_name\n\n def get_style(self):\n \"\"\"\n Return the font style. Values are: 'normal', 'italic' or 'oblique'.\n \"\"\"\n return self._slant\n get_slant = get_style\n\n def get_variant(self):\n \"\"\"\n Return the font variant. Values are: 'normal' or 'small-caps'.\n \"\"\"\n return self._variant\n\n def get_weight(self):\n \"\"\"\n Set the font weight. Options are: A numeric value in the\n range 0-1000 or one of 'light', 'normal', 'regular', 'book',\n 'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold',\n 'heavy', 'extra bold', 'black'\n \"\"\"\n return self._weight\n\n def get_stretch(self):\n \"\"\"\n Return the font stretch or width. Options are: 'ultra-condensed',\n 'extra-condensed', 'condensed', 'semi-condensed', 'normal',\n 'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'.\n \"\"\"\n return self._stretch\n\n def get_size(self):\n \"\"\"\n Return the font size.\n \"\"\"\n return self._size\n\n def get_size_in_points(self):\n return self._size\n\n def get_file(self):\n \"\"\"\n Return the filename of the associated font.\n \"\"\"\n return self._file\n\n def get_fontconfig_pattern(self):\n \"\"\"\n Get a fontconfig_ pattern_ suitable for looking up the font as\n specified with fontconfig's ``fc-match`` utility.\n\n This support does not depend on fontconfig; we are merely borrowing its\n pattern syntax for use here.\n \"\"\"\n return generate_fontconfig_pattern(self)\n\n def set_family(self, family):\n \"\"\"\n Change the font family. May be either an alias (generic name\n is CSS parlance), such as: 'serif', 'sans-serif', 'cursive',\n 'fantasy', or 'monospace', a real font name or a list of real\n font names. Real font names are not supported when\n :rc:`text.usetex` is `True`.\n \"\"\"\n if family is None:\n family = rcParams['font.family']\n self._family = _normalize_font_family(family)\n set_name = set_family\n\n def set_style(self, style):\n \"\"\"\n Set the font style. Values are: 'normal', 'italic' or 'oblique'.\n \"\"\"\n if style is None:\n style = rcParams['font.style']\n _api.check_in_list(['normal', 'italic', 'oblique'], style=style)\n self._slant = style\n set_slant = set_style\n\n def set_variant(self, variant):\n \"\"\"\n Set the font variant. Values are: 'normal' or 'small-caps'.\n \"\"\"\n if variant is None:\n variant = rcParams['font.variant']\n _api.check_in_list(['normal', 'small-caps'], variant=variant)\n self._variant = variant\n\n def set_weight(self, weight):\n \"\"\"\n Set the font weight. May be either a numeric value in the\n range 0-1000 or one of 'ultralight', 'light', 'normal',\n 'regular', 'book', 'medium', 'roman', 'semibold', 'demibold',\n 'demi', 'bold', 'heavy', 'extra bold', 'black'\n \"\"\"\n if weight is None:\n weight = rcParams['font.weight']\n try:\n weight = int(weight)\n if weight < 0 or weight > 1000:\n raise ValueError()\n except ValueError:\n if weight not in weight_dict:\n raise ValueError(\"weight is invalid\")\n self._weight = weight\n\n def set_stretch(self, stretch):\n \"\"\"\n Set the font stretch or width. Options are: 'ultra-condensed',\n 'extra-condensed', 'condensed', 'semi-condensed', 'normal',\n 'semi-expanded', 'expanded', 'extra-expanded' or\n 'ultra-expanded', or a numeric value in the range 0-1000.\n \"\"\"\n if stretch is None:\n stretch = rcParams['font.stretch']\n try:\n stretch = int(stretch)\n if stretch < 0 or stretch > 1000:\n raise ValueError()\n except ValueError as err:\n if stretch not in stretch_dict:\n raise ValueError(\"stretch is invalid\") from err\n self._stretch = stretch\n\n def set_size(self, size):\n \"\"\"\n Set the font size. Either an relative value of 'xx-small',\n 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'\n or an absolute font size, e.g., 12.\n \"\"\"\n if size is None:\n size = rcParams['font.size']\n try:\n size = float(size)\n except ValueError:\n try:\n scale = font_scalings[size]\n except KeyError as err:\n raise ValueError(\n \"Size is invalid. Valid font size are \"\n + \", \".join(map(str, font_scalings))) from err\n else:\n size = scale * FontManager.get_default_size()\n if size < 1.0:\n _log.info('Fontsize %1.2f < 1.0 pt not allowed by FreeType. '\n 'Setting fontsize = 1 pt', size)\n size = 1.0\n self._size = size\n\n def set_file(self, file):\n \"\"\"\n Set the filename of the fontfile to use. In this case, all\n other properties will be ignored.\n \"\"\"\n self._file = os.fspath(file) if file is not None else None\n\n def set_fontconfig_pattern(self, pattern):\n \"\"\"\n Set the properties by parsing a fontconfig_ *pattern*.\n\n This support does not depend on fontconfig; we are merely borrowing its\n pattern syntax for use here.\n \"\"\"\n for key, val in parse_fontconfig_pattern(pattern).items():\n if type(val) == list:\n getattr(self, \"set_\" + key)(val[0])\n else:\n getattr(self, \"set_\" + key)(val)\n\n def get_math_fontfamily(self):\n \"\"\"\n Return the name of the font family used for math text.\n\n The default font is :rc:`mathtext.fontset`.\n \"\"\"\n return self._math_fontfamily\n\n def set_math_fontfamily(self, fontfamily):\n \"\"\"\n Set the font family for text in math mode.\n\n If not set explicitly, :rc:`mathtext.fontset` will be used.\n\n Parameters\n ----------\n fontfamily : str\n The name of the font family.\n\n Available font families are defined in the\n matplotlibrc.template file\n :ref:`here <customizing-with-matplotlibrc-files>`\n\n See Also\n --------\n .text.Text.get_math_fontfamily\n \"\"\"\n if fontfamily is None:\n fontfamily = rcParams['mathtext.fontset']\n else:\n valid_fonts = _validators['mathtext.fontset'].valid.values()\n # _check_in_list() Validates the parameter math_fontfamily as\n # if it were passed to rcParams['mathtext.fontset']\n _api.check_in_list(valid_fonts, math_fontfamily=fontfamily)\n self._math_fontfamily = fontfamily\n\n def copy(self):\n \"\"\"Return a copy of self.\"\"\"\n new = type(self)()\n vars(new).update(vars(self))\n return new\n\n\nclass _JSONEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, FontManager):\n return dict(o.__dict__, __class__='FontManager')\n elif isinstance(o, FontEntry):\n d = dict(o.__dict__, __class__='FontEntry')\n try:\n # Cache paths of fonts shipped with Matplotlib relative to the\n # Matplotlib data path, which helps in the presence of venvs.\n d[\"fname\"] = str(\n Path(d[\"fname\"]).relative_to(mpl.get_data_path()))\n except ValueError:\n pass\n return d\n else:\n return super().default(o)\n\n\ndef _json_decode(o):\n cls = o.pop('__class__', None)\n if cls is None:\n return o\n elif cls == 'FontManager':\n r = FontManager.__new__(FontManager)\n r.__dict__.update(o)\n return r\n elif cls == 'FontEntry':\n r = FontEntry.__new__(FontEntry)\n r.__dict__.update(o)\n if not os.path.isabs(r.fname):\n r.fname = os.path.join(mpl.get_data_path(), r.fname)\n return r\n else:\n raise ValueError(\"Don't know how to deserialize __class__=%s\" % cls)\n\n\ndef json_dump(data, filename):\n \"\"\"\n Dump `FontManager` *data* as JSON to the file named *filename*.\n\n See Also\n --------\n json_load\n\n Notes\n -----\n File paths that are children of the Matplotlib data path (typically, fonts\n shipped with Matplotlib) are stored relative to that data path (to remain\n valid across virtualenvs).\n\n This function temporarily locks the output file to prevent multiple\n processes from overwriting one another's output.\n \"\"\"\n with cbook._lock_path(filename), open(filename, 'w') as fh:\n try:\n json.dump(data, fh, cls=_JSONEncoder, indent=2)\n except OSError as e:\n _log.warning('Could not save font_manager cache {}'.format(e))\n\n\ndef json_load(filename):\n \"\"\"\n Load a `FontManager` from the JSON file named *filename*.\n\n See Also\n --------\n json_dump\n \"\"\"\n with open(filename, 'r') as fh:\n return json.load(fh, object_hook=_json_decode)\n\n\ndef _normalize_font_family(family):\n if isinstance(family, str):\n family = [family]\n return family\n\n\nclass FontManager:\n \"\"\"\n On import, the `FontManager` singleton instance creates a list of ttf and\n afm fonts and caches their `FontProperties`. The `FontManager.findfont`\n method does a nearest neighbor search to find the font that most closely\n matches the specification. If no good enough match is found, the default\n font is returned.\n \"\"\"\n # Increment this version number whenever the font cache data\n # format or behavior has changed and requires a existing font\n # cache files to be rebuilt.\n __version__ = 330\n\n def __init__(self, size=None, weight='normal'):\n self._version = self.__version__\n\n self.__default_weight = weight\n self.default_size = size\n\n paths = [cbook._get_data_path('fonts', subdir)\n for subdir in ['ttf', 'afm', 'pdfcorefonts']]\n # Create list of font paths\n for pathname in ['TTFPATH', 'AFMPATH']:\n if pathname in os.environ:\n ttfpath = os.environ[pathname]\n if ttfpath.find(';') >= 0: # win32 style\n paths.extend(ttfpath.split(';'))\n elif ttfpath.find(':') >= 0: # unix style\n paths.extend(ttfpath.split(':'))\n else:\n paths.append(ttfpath)\n _api.warn_deprecated(\n \"3.3\", name=pathname, obj_type=\"environment variable\",\n alternative=\"FontManager.addfont()\")\n _log.debug('font search path %s', str(paths))\n # Load TrueType fonts and create font dictionary.\n\n self.defaultFamily = {\n 'ttf': 'DejaVu Sans',\n 'afm': 'Helvetica'}\n\n self.afmlist = []\n self.ttflist = []\n\n # Delay the warning by 5s.\n timer = Timer(5, lambda: _log.warning(\n 'Matplotlib is building the font cache; this may take a moment.'))\n timer.start()\n try:\n for fontext in [\"afm\", \"ttf\"]:\n for path in [*findSystemFonts(paths, fontext=fontext),\n *findSystemFonts(fontext=fontext)]:\n try:\n self.addfont(path)\n except OSError as exc:\n _log.info(\"Failed to open font file %s: %s\", path, exc)\n except Exception as exc:\n _log.info(\"Failed to extract font properties from %s: \"\n \"%s\", path, exc)\n finally:\n timer.cancel()\n\n def addfont(self, path):\n \"\"\"\n Cache the properties of the font at *path* to make it available to the\n `FontManager`. The type of font is inferred from the path suffix.\n\n Parameters\n ----------\n path : str or path-like\n \"\"\"\n if Path(path).suffix.lower() == \".afm\":\n with open(path, \"rb\") as fh:\n font = afm.AFM(fh)\n prop = afmFontProperty(path, font)\n self.afmlist.append(prop)\n else:\n font = ft2font.FT2Font(path)\n prop = ttfFontProperty(font)\n self.ttflist.append(prop)\n\n @property\n def defaultFont(self):\n # Lazily evaluated (findfont then caches the result) to avoid including\n # the venv path in the json serialization.\n return {ext: self.findfont(family, fontext=ext)\n for ext, family in self.defaultFamily.items()}\n\n def get_default_weight(self):\n \"\"\"\n Return the default font weight.\n \"\"\"\n return self.__default_weight\n\n @staticmethod\n def get_default_size():\n \"\"\"\n Return the default font size.\n \"\"\"\n return rcParams['font.size']\n\n def set_default_weight(self, weight):\n \"\"\"\n Set the default font weight. The initial value is 'normal'.\n \"\"\"\n self.__default_weight = weight\n\n @staticmethod\n def _expand_aliases(family):\n if family in ('sans', 'sans serif'):\n family = 'sans-serif'\n return rcParams['font.' + family]\n\n # Each of the scoring functions below should return a value between\n # 0.0 (perfect match) and 1.0 (terrible match)\n def score_family(self, families, family2):\n \"\"\"\n Return a match score between the list of font families in\n *families* and the font family name *family2*.\n\n An exact match at the head of the list returns 0.0.\n\n A match further down the list will return between 0 and 1.\n\n No match will return 1.0.\n \"\"\"\n if not isinstance(families, (list, tuple)):\n families = [families]\n elif len(families) == 0:\n return 1.0\n family2 = family2.lower()\n step = 1 / len(families)\n for i, family1 in enumerate(families):\n family1 = family1.lower()\n if family1 in font_family_aliases:\n options = [*map(str.lower, self._expand_aliases(family1))]\n if family2 in options:\n idx = options.index(family2)\n return (i + (idx / len(options))) * step\n elif family1 == family2:\n # The score should be weighted by where in the\n # list the font was found.\n return i * step\n return 1.0\n\n def score_style(self, style1, style2):\n \"\"\"\n Return a match score between *style1* and *style2*.\n\n An exact match returns 0.0.\n\n A match between 'italic' and 'oblique' returns 0.1.\n\n No match returns 1.0.\n \"\"\"\n if style1 == style2:\n return 0.0\n elif (style1 in ('italic', 'oblique')\n and style2 in ('italic', 'oblique')):\n return 0.1\n return 1.0\n\n def score_variant(self, variant1, variant2):\n \"\"\"\n Return a match score between *variant1* and *variant2*.\n\n An exact match returns 0.0, otherwise 1.0.\n \"\"\"\n if variant1 == variant2:\n return 0.0\n else:\n return 1.0\n\n def score_stretch(self, stretch1, stretch2):\n \"\"\"\n Return a match score between *stretch1* and *stretch2*.\n\n The result is the absolute value of the difference between the\n CSS numeric values of *stretch1* and *stretch2*, normalized\n between 0.0 and 1.0.\n \"\"\"\n try:\n stretchval1 = int(stretch1)\n except ValueError:\n stretchval1 = stretch_dict.get(stretch1, 500)\n try:\n stretchval2 = int(stretch2)\n except ValueError:\n stretchval2 = stretch_dict.get(stretch2, 500)\n return abs(stretchval1 - stretchval2) / 1000.0\n\n def score_weight(self, weight1, weight2):\n \"\"\"\n Return a match score between *weight1* and *weight2*.\n\n The result is 0.0 if both weight1 and weight 2 are given as strings\n and have the same value.\n\n Otherwise, the result is the absolute value of the difference between\n the CSS numeric values of *weight1* and *weight2*, normalized between\n 0.05 and 1.0.\n \"\"\"\n # exact match of the weight names, e.g. weight1 == weight2 == \"regular\"\n if cbook._str_equal(weight1, weight2):\n return 0.0\n w1 = weight1 if isinstance(weight1, Number) else weight_dict[weight1]\n w2 = weight2 if isinstance(weight2, Number) else weight_dict[weight2]\n return 0.95 * (abs(w1 - w2) / 1000) + 0.05\n\n def score_size(self, size1, size2):\n \"\"\"\n Return a match score between *size1* and *size2*.\n\n If *size2* (the size specified in the font file) is 'scalable', this\n function always returns 0.0, since any font size can be generated.\n\n Otherwise, the result is the absolute distance between *size1* and\n *size2*, normalized so that the usual range of font sizes (6pt -\n 72pt) will lie between 0.0 and 1.0.\n \"\"\"\n if size2 == 'scalable':\n return 0.0\n # Size value should have already been\n try:\n sizeval1 = float(size1)\n except ValueError:\n sizeval1 = self.default_size * font_scalings[size1]\n try:\n sizeval2 = float(size2)\n except ValueError:\n return 1.0\n return abs(sizeval1 - sizeval2) / 72\n\n def findfont(self, prop, fontext='ttf', directory=None,\n fallback_to_default=True, rebuild_if_missing=True):\n \"\"\"\n Find a font that most closely matches the given font properties.\n\n Parameters\n ----------\n prop : str or `~matplotlib.font_manager.FontProperties`\n The font properties to search for. This can be either a\n `.FontProperties` object or a string defining a\n `fontconfig patterns`_.\n\n fontext : {'ttf', 'afm'}, default: 'ttf'\n The extension of the font file:\n\n - 'ttf': TrueType and OpenType fonts (.ttf, .ttc, .otf)\n - 'afm': Adobe Font Metrics (.afm)\n\n directory : str, optional\n If given, only search this directory and its subdirectories.\n\n fallback_to_default : bool\n If True, will fallback to the default font family (usually\n \"DejaVu Sans\" or \"Helvetica\") if the first lookup hard-fails.\n\n rebuild_if_missing : bool\n Whether to rebuild the font cache and search again if the first\n match appears to point to a nonexisting font (i.e., the font cache\n contains outdated entries).\n\n Returns\n -------\n str\n The filename of the best matching font.\n\n Notes\n -----\n This performs a nearest neighbor search. Each font is given a\n similarity score to the target font properties. The first font with\n the highest score is returned. If no matches below a certain\n threshold are found, the default font (usually DejaVu Sans) is\n returned.\n\n The result is cached, so subsequent lookups don't have to\n perform the O(n) nearest neighbor search.\n\n See the `W3C Cascading Style Sheet, Level 1\n <http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ documentation\n for a description of the font finding algorithm.\n\n .. _fontconfig patterns:\n https://www.freedesktop.org/software/fontconfig/fontconfig-user.html\n \"\"\"\n # Pass the relevant rcParams (and the font manager, as `self`) to\n # _findfont_cached so to prevent using a stale cache entry after an\n # rcParam was changed.\n rc_params = tuple(tuple(rcParams[key]) for key in [\n \"font.serif\", \"font.sans-serif\", \"font.cursive\", \"font.fantasy\",\n \"font.monospace\"])\n return self._findfont_cached(\n prop, fontext, directory, fallback_to_default, rebuild_if_missing,\n rc_params)\n\n @lru_cache()\n def _findfont_cached(self, prop, fontext, directory, fallback_to_default,\n rebuild_if_missing, rc_params):\n\n prop = FontProperties._from_any(prop)\n\n fname = prop.get_file()\n if fname is not None:\n return fname\n\n if fontext == 'afm':\n fontlist = self.afmlist\n else:\n fontlist = self.ttflist\n\n best_score = 1e64\n best_font = None\n\n _log.debug('findfont: Matching %s.', prop)\n for font in fontlist:\n if (directory is not None and\n Path(directory) not in Path(font.fname).parents):\n continue\n # Matching family should have top priority, so multiply it by 10.\n score = (self.score_family(prop.get_family(), font.name) * 10\n + self.score_style(prop.get_style(), font.style)\n + self.score_variant(prop.get_variant(), font.variant)\n + self.score_weight(prop.get_weight(), font.weight)\n + self.score_stretch(prop.get_stretch(), font.stretch)\n + self.score_size(prop.get_size(), font.size))\n _log.debug('findfont: score(%s) = %s', font, score)\n if score < best_score:\n best_score = score\n best_font = font\n if score == 0:\n break\n\n if best_font is None or best_score >= 10.0:\n if fallback_to_default:\n _log.warning(\n 'findfont: Font family %s not found. Falling back to %s.',\n prop.get_family(), self.defaultFamily[fontext])\n for family in map(str.lower, prop.get_family()):\n if family in font_family_aliases:\n _log.warning(\n \"findfont: Generic family %r not found because \"\n \"none of the following families were found: %s\",\n family, \", \".join(self._expand_aliases(family)))\n default_prop = prop.copy()\n default_prop.set_family(self.defaultFamily[fontext])\n return self.findfont(default_prop, fontext, directory,\n fallback_to_default=False)\n else:\n raise ValueError(f\"Failed to find font {prop}, and fallback \"\n f\"to the default font was disabled\")\n else:\n _log.debug('findfont: Matching %s to %s (%r) with score of %f.',\n prop, best_font.name, best_font.fname, best_score)\n result = best_font.fname\n\n if not os.path.isfile(result):\n if rebuild_if_missing:\n _log.info(\n 'findfont: Found a missing font file. Rebuilding cache.')\n new_fm = _load_fontmanager(try_read_cache=False)\n # Replace self by the new fontmanager, because users may have\n # a reference to this specific instance.\n # TODO: _load_fontmanager should really be (used by) a method\n # modifying the instance in place.\n vars(self).update(vars(new_fm))\n return self.findfont(\n prop, fontext, directory, rebuild_if_missing=False)\n else:\n raise ValueError(\"No valid font could be found\")\n\n return _cached_realpath(result)\n\n\n@lru_cache()\ndef is_opentype_cff_font(filename):\n \"\"\"\n Return whether the given font is a Postscript Compact Font Format Font\n embedded in an OpenType wrapper. Used by the PostScript and PDF backends\n that can not subset these fonts.\n \"\"\"\n if os.path.splitext(filename)[1].lower() == '.otf':\n with open(filename, 'rb') as fd:\n return fd.read(4) == b\"OTTO\"\n else:\n return False\n\n\n@lru_cache(64)\ndef _get_font(filename, hinting_factor, *, _kerning_factor, thread_id):\n return ft2font.FT2Font(\n filename, hinting_factor, _kerning_factor=_kerning_factor)\n\n\n# FT2Font objects cannot be used across fork()s because they reference the same\n# FT_Library object. While invalidating *all* existing FT2Fonts after a fork\n# would be too complicated to be worth it, the main way FT2Fonts get reused is\n# via the cache of _get_font, which we can empty upon forking (in Py3.7+).\nif hasattr(os, \"register_at_fork\"):\n os.register_at_fork(after_in_child=_get_font.cache_clear)\n\n\ndef get_font(filename, hinting_factor=None):\n # Resolving the path avoids embedding the font twice in pdf/ps output if a\n # single font is selected using two different relative paths.\n filename = _cached_realpath(filename)\n if hinting_factor is None:\n hinting_factor = rcParams['text.hinting_factor']\n # also key on the thread ID to prevent segfaults with multi-threading\n return _get_font(filename, hinting_factor,\n _kerning_factor=rcParams['text.kerning_factor'],\n thread_id=threading.get_ident())\n\n\ndef _load_fontmanager(*, try_read_cache=True):\n fm_path = Path(\n mpl.get_cachedir(), f\"fontlist-v{FontManager.__version__}.json\")\n if try_read_cache:\n try:\n fm = json_load(fm_path)\n except Exception as exc:\n pass\n else:\n if getattr(fm, \"_version\", object()) == FontManager.__version__:\n _log.debug(\"Using fontManager instance from %s\", fm_path)\n return fm\n fm = FontManager()\n json_dump(fm, fm_path)\n _log.info(\"generated new fontManager\")\n return fm\n\n\nfontManager = _load_fontmanager()\nfindfont = fontManager.findfont\n"
] | [
[
"matplotlib._api.check_in_list",
"matplotlib._api.warn_deprecated",
"matplotlib.get_cachedir",
"matplotlib.fontconfig_pattern.parse_fontconfig_pattern",
"matplotlib.cbook._get_data_path",
"matplotlib.cbook._lock_path",
"matplotlib.ft2font.FT2Font",
"matplotlib.get_data_path",
"matplotlib.fontconfig_pattern.generate_fontconfig_pattern",
"matplotlib.cbook._str_equal",
"matplotlib.afm.AFM"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NiklasSabel/data_mining_1 | [
"c6a572df10c4a92b941c284ccf2ea7c3fd679f02"
] | [
"src/data/make_dataset.py"
] | [
"import pandas as pd\n\n\ndef merge_data(train_values, train_target, test_values):\n \"\"\"\n Function to import all data and concatenate it into one dataframe.\n :return: one dataframe only with train and test features together with train labels\n \"\"\"\n\n data = train_values.join(train_target)\n\n return pd.concat([data, test_values])\n"
] | [
[
"pandas.concat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Thimira/Build-Deeper | [
"7259ac8f4dbf2c5fc6c5b45449d286d53e121636"
] | [
"Chapter 5/lenet_mnist_keras.py"
] | [
"# How to use\n#\n# Train the model and save the model weights\n# python lenet_mnist_keras.py --train-model 1 --save-trained 1\n#\n# Train the model and save the model weights to a give directory\n# python lenet_mnist_keras.py --train-model 1 --save-trained 1 --weights data/lenet_weights.hdf5\n#\n# Evaluate the model from pre-trained model weights\n# python lenet_mnist_keras.py\n#\n# Evaluate the model from pre-trained model weights from a give directory\n# python lenet_mnist_keras.py --weights data/lenet_weights.hdf5\n\n# import the necessary packages\nfrom keras.datasets import mnist\nfrom keras.optimizers import SGD\nfrom keras.utils import np_utils\n\n# imports used to build the deep learning model\nfrom keras.models import Sequential\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.convolutional import MaxPooling2D\nfrom keras.layers.core import Activation\nfrom keras.layers.core import Flatten\nfrom keras.layers.core import Dense\n\nimport numpy as np\nimport argparse\nimport cv2\nimport matplotlib.pyplot as plt\n\n# Setup the argument parser to parse out command line arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-t\", \"--train-model\", type=int, default=-1,\n help=\"(optional) Whether the model should be trained on the MNIST dataset. Defaults to no\")\nap.add_argument(\"-s\", \"--save-trained\", type=int, default=-1,\n help=\"(optional) Whether the trained models weights should be saved.\" +\n \"Overwrites existing weights file with the same name. Use with caution. Defaults to no\")\nap.add_argument(\"-w\", \"--weights\", type=str, default=\"data/lenet_weights.hdf5\",\n help=\"(optional) Path to the weights file. Defaults to 'data/lenet_weights.hdf5'\")\nargs = vars(ap.parse_args())\n\n\ndef build_lenet(width, height, depth, classes, weightsPath=None):\n # Initialize the model\n model = Sequential()\n\n # The first set of CONV => RELU => POOL layers\n model.add(Conv2D(20, (5, 5), padding=\"same\",\n input_shape=(height, width, depth)))\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n\n # The second set of CONV => RELU => POOL layers\n model.add(Conv2D(50, (5, 5), padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n\n # The set of FC => RELU layers\n model.add(Flatten())\n model.add(Dense(500))\n model.add(Activation(\"relu\"))\n\n # The softmax classifier\n model.add(Dense(classes))\n model.add(Activation(\"softmax\"))\n\n # If a weights path is supplied, then load the weights\n if weightsPath is not None:\n model.load_weights(weightsPath)\n\n # Return the constructed network architecture\n return model\n\n\ndef graph_training_history(history):\n plt.figure(1)\n\n # summarize history for accuracy\n\n plt.subplot(211)\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n\n # summarize history for loss\n\n plt.subplot(212)\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n\n plt.show()\n\n\n# Get the MNIST dataset from Keras datasets\n# If this is the first time you are fetching the dataset, it will be downloaded\n# File size will be ~10MB, and will placed at ~/.keras/datasets/mnist.npz\nprint(\"[INFO] Loading the MNIST dataset...\")\n(trainData, trainLabels), (testData, testLabels) = mnist.load_data()\n# The data is already in the form of numpy arrays,\n# and already split to training and testing datasets\n\n# Reshape the data matrix from (samples, height, width) to (samples, height, width, depth)\n# Depth (i.e. channels) is 1 since MNIST only has grayscale images\ntrainData = trainData[:, :, :, np.newaxis]\ntestData = testData[:, :, :, np.newaxis]\n\n# Rescale the data from values between [0 - 255] to [0 - 1.0]\ntrainData = trainData / 255.0\ntestData = testData / 255.0\n\n# The labels comes as a single digit, indicating the class.\n# But we need a categorical vector as the label. So we transform it.\n# So that,\n# '0' will become [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n# '1' will become [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]\n# '2' will become [0, 0, 1, 0, 0, 0, 0, 0, 0, 0]\n# and so on...\ntrainLabels = np_utils.to_categorical(trainLabels, 10)\ntestLabels = np_utils.to_categorical(testLabels, 10)\n\n# Build and Compile the model\nprint(\"[INFO] Building and compiling the LeNet model...\")\nopt = SGD(lr=0.01)\nmodel = build_lenet(width=28, height=28, depth=1, classes=10,\n weightsPath=args[\"weights\"] if args[\"train_model\"] <= 0 else None)\nmodel.compile(loss=\"categorical_crossentropy\",\n optimizer=opt, metrics=[\"accuracy\"])\n\n# Check the argument whether to train the model\nif args[\"train_model\"] > 0:\n print(\"[INFO] Training the model...\")\n\n history = model.fit(trainData, trainLabels,\n batch_size=128,\n epochs=20,\n validation_data=(testData, testLabels),\n verbose=1)\n\n # Use the test data to evaluate the model\n print(\"[INFO] Evaluating the model...\")\n\n (loss, accuracy) = model.evaluate(\n testData, testLabels, batch_size=128, verbose=1)\n\n print(\"[INFO] accuracy: {:.2f}%\".format(accuracy * 100))\n\n # Visualize the training history\n graph_training_history(history)\n\n# Check the argument on whether to save the model weights to file\nif args[\"save_trained\"] > 0:\n print(\"[INFO] Saving the model weights to file...\")\n model.save_weights(args[\"weights\"], overwrite=True)\n\n# Training of the model is now complete\n\n# Randomly select a few samples from the test dataset to evaluate\nfor i in np.random.choice(np.arange(0, len(testLabels)), size=(10,)):\n # Use the model to classify the digit\n probs = model.predict(testData[np.newaxis, i])\n prediction = probs.argmax(axis=1)\n\n # Convert the digit data to a color image\n image = (testData[i] * 255).astype(\"uint8\")\n image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n\n # The images are in 28x28 size. Much too small to see properly\n # So, we resize them to 280x280 for viewing\n image = cv2.resize(image, (280, 280), interpolation=cv2.INTER_LINEAR)\n\n # Add the predicted value on to the image\n cv2.putText(image, str(prediction[0]), (20, 40),\n cv2.FONT_HERSHEY_DUPLEX, 1.5, (0, 255, 0), 1)\n\n # Show the image and prediction\n print(\"[INFO] Predicted: {}, Actual: {}\".format(\n prediction[0], np.argmax(testLabels[i])))\n cv2.imshow(\"Digit\", image)\n cv2.waitKey(0)\n\ncv2.destroyAllWindows()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"numpy.argmax",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gosccm/Learning | [
"49ad38597d114ce22b00375ebbea37c401557447",
"49ad38597d114ce22b00375ebbea37c401557447"
] | [
"quantecon/markov/random.py",
"quantecon/tests/test_ricatti.py"
] | [
"\"\"\"\nFilename: random.py\n\nAuthor: Daisuke Oyama\n\nGenerate MarkovChain and DiscreteDP instances randomly.\n\n\"\"\"\nimport numpy as np\nimport scipy.sparse\nfrom numba import jit\n\nfrom .core import MarkovChain\nfrom .ddp import DiscreteDP\nfrom .utilities import sa_indices\nfrom ..util import check_random_state\nfrom ..random import probvec, sample_without_replacement\n\n\ndef random_markov_chain(n, k=None, sparse=False, random_state=None):\n \"\"\"\n Return a randomly sampled MarkovChain instance with n states, where\n each state has k states with positive transition probability.\n\n Parameters\n ----------\n n : scalar(int)\n Number of states.\n\n k : scalar(int), optional(default=None)\n Number of states that may be reached from each state with\n positive probability. Set to n if not specified.\n\n sparse : bool, optional(default=False)\n Whether to store the transition probability matrix in sparse\n matrix form.\n\n random_state : int or np.random.RandomState, optional\n Random seed (integer) or np.random.RandomState instance to set\n the initial state of the random number generator for\n reproducibility. If None, a randomly initialized RandomState is\n used.\n\n Returns\n -------\n mc : MarkovChain\n\n Examples\n --------\n >>> mc = qe.markov.random_markov_chain(3, random_state=1234)\n >>> mc.P\n array([[ 0.19151945, 0.43058932, 0.37789123],\n [ 0.43772774, 0.34763084, 0.21464142],\n [ 0.27259261, 0.5073832 , 0.22002419]])\n >>> mc = qe.markov.random_markov_chain(3, k=2, random_state=1234)\n >>> mc.P\n array([[ 0.19151945, 0.80848055, 0. ],\n [ 0. , 0.62210877, 0.37789123],\n [ 0.56227226, 0. , 0.43772774]])\n\n \"\"\"\n P = random_stochastic_matrix(n, k, sparse, format='csr',\n random_state=random_state)\n mc = MarkovChain(P)\n return mc\n\n\ndef random_stochastic_matrix(n, k=None, sparse=False, format='csr',\n random_state=None):\n \"\"\"\n Return a randomly sampled n x n stochastic matrix with k nonzero\n entries for each row.\n\n Parameters\n ----------\n n : scalar(int)\n Number of states.\n\n k : scalar(int), optional(default=None)\n Number of nonzero entries in each row of the matrix. Set to n if\n not specified.\n\n sparse : bool, optional(default=False)\n Whether to generate the matrix in sparse matrix form.\n\n format : str, optional(default='csr')\n Sparse matrix format, str in {'bsr', 'csr', 'csc', 'coo', 'lil',\n 'dia', 'dok'}. Relevant only when sparse=True.\n\n random_state : int or np.random.RandomState, optional\n Random seed (integer) or np.random.RandomState instance to set\n the initial state of the random number generator for\n reproducibility. If None, a randomly initialized RandomState is\n used.\n\n Returns\n -------\n P : numpy ndarray or scipy sparse matrix (float, ndim=2)\n Stochastic matrix.\n\n See also\n --------\n random_markov_chain : Return a random MarkovChain instance.\n\n \"\"\"\n P = _random_stochastic_matrix(m=n, n=n, k=k, sparse=sparse, format=format,\n random_state=random_state)\n return P\n\n\ndef _random_stochastic_matrix(m, n, k=None, sparse=False, format='csr',\n random_state=None):\n \"\"\"\n Generate a \"non-square stochastic matrix\" of shape (m, n), which\n contains as rows m probability vectors of length n with k nonzero\n entries.\n\n For other parameters, see `random_stochastic_matrix`.\n\n \"\"\"\n if k is None:\n k = n\n # m prob vectors of dimension k, shape (m, k)\n probvecs = probvec(m, k, random_state=random_state)\n\n if k == n:\n P = probvecs\n if sparse:\n return scipy.sparse.coo_matrix(P).asformat(format)\n else:\n return P\n\n # if k < n:\n rows = np.repeat(np.arange(m), k)\n cols = \\\n sample_without_replacement(\n n, k, num_trials=m, random_state=random_state\n ).ravel()\n data = probvecs.ravel()\n\n if sparse:\n P = scipy.sparse.coo_matrix((data, (rows, cols)), shape=(m, n))\n return P.asformat(format)\n else:\n P = np.zeros((m, n))\n P[rows, cols] = data\n return P\n\n\ndef random_discrete_dp(num_states, num_actions, beta=None,\n k=None, scale=1, sparse=False, sa_pair=False,\n random_state=None):\n \"\"\"\n Generate a DiscreteDP randomly. The reward values are drawn from the\n normal distribution with mean 0 and standard deviation `scale`.\n\n Parameters\n ----------\n num_states : scalar(int)\n Number of states.\n\n num_actions : scalar(int)\n Number of actions.\n\n beta : scalar(float), optional(default=None)\n Discount factor. Randomly chosen from [0, 1) if not specified.\n\n k : scalar(int), optional(default=None)\n Number of possible next states for each state-action pair. Equal\n to `num_states` if not specified.\n\n scale : scalar(float), optional(default=1)\n Standard deviation of the normal distribution for the reward\n values.\n\n sparse : bool, optional(default=False)\n Whether to store the transition probability array in sparse\n matrix form.\n\n sa_pair : bool, optional(default=False)\n Whether to represent the data in the state-action pairs\n formulation. (If `sparse=True`, automatically set `True`.)\n\n random_state : int or np.random.RandomState, optional\n Random seed (integer) or np.random.RandomState instance to set\n the initial state of the random number generator for\n reproducibility. If None, a randomly initialized RandomState is\n used.\n\n Returns\n -------\n ddp : DiscreteDP\n An instance of DiscreteDP.\n\n \"\"\"\n if sparse:\n sa_pair = True\n\n # Number of state-action pairs\n L = num_states * num_actions\n\n random_state = check_random_state(random_state)\n R = scale * random_state.randn(L)\n Q = _random_stochastic_matrix(L, num_states, k=k,\n sparse=sparse, format='csr',\n random_state=random_state)\n if beta is None:\n beta = random_state.random_sample()\n\n if sa_pair:\n s_indices, a_indices = sa_indices(num_states, num_actions)\n else:\n s_indices, a_indices = None, None\n R.shape = (num_states, num_actions)\n Q.shape = (num_states, num_actions, num_states)\n\n ddp = DiscreteDP(R, Q, beta, s_indices, a_indices)\n return ddp\n",
"\"\"\"\nFilename: test_ricatti.py\nAuthors: Chase Coleman, John Stachurski\nDate: 07/22/2014\n\nTests for solve_discrete_riccati in matrix_eqn.py file\n\n\"\"\"\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom nose.tools import raises\nfrom quantecon.matrix_eqn import solve_discrete_riccati\n\n\ndef dare_golden_num_float(method):\n val = solve_discrete_riccati(1.0, 1.0, 1.0, 1.0, method=method)\n gold_ratio = (1 + np.sqrt(5)) / 2.\n assert_allclose(val, gold_ratio)\n\n\ndef dare_golden_num_2d(method):\n A, B, R, Q = np.eye(2), np.eye(2), np.eye(2), np.eye(2)\n gold_diag = np.eye(2) * (1 + np.sqrt(5)) / 2.\n val = solve_discrete_riccati(A, B, R, Q, method=method)\n assert_allclose(val, gold_diag)\n\n\ndef dare_tjm_1(method):\n A = [[0.0, 0.1, 0.0],\n [0.0, 0.0, 0.1],\n [0.0, 0.0, 0.0]]\n B = [[1.0, 0.0],\n [0.0, 0.0],\n [0.0, 1.0]]\n Q = [[10**5, 0.0, 0.0],\n [0.0, 10**3, 0.0],\n [0.0, 0.0, -10.0]]\n R = [[0.0, 0.0],\n [0.0, 1.0]]\n X = solve_discrete_riccati(A, B, Q, R, method=method)\n Y = np.diag((1e5, 1e3, 0.0))\n assert_allclose(X, Y, atol=1e-07)\n\n\ndef dare_tjm_2(method):\n A = [[0, -1],\n [0, 2]]\n B = [[1, 0],\n [1, 1]]\n Q = [[1, 0],\n [0, 0]]\n R = [[4, 2],\n [2, 1]]\n X = solve_discrete_riccati(A, B, Q, R, method=method)\n Y = np.zeros((2, 2))\n Y[0, 0] = 1\n assert_allclose(X, Y, atol=1e-07)\n\n\ndef dare_tjm_3(method):\n r = 0.5\n I = np.identity(2)\n A = [[2 + r**2, 0],\n [0, 0]]\n A = np.array(A)\n B = I\n R = [[1, r],\n [r, r*r]]\n Q = I - np.dot(A.T, A) + np.dot(A.T, np.linalg.solve(R + I, A))\n X = solve_discrete_riccati(A, B, Q, R, method=method)\n Y = np.identity(2)\n assert_allclose(X, Y, atol=1e-07)\n\n\n_test_funcs = [\n dare_golden_num_float, dare_golden_num_2d,\n dare_tjm_1, dare_tjm_2, dare_tjm_3\n]\n\n\ndef test_solve_discrete_riccati():\n def _test_factory(func, method):\n func(method)\n\n for method in ['doubling', 'qz']:\n for func in _test_funcs:\n yield _test_factory, func, method\n\n\n@raises(ValueError)\ndef test_solve_discrete_riccati_invalid_method():\n method = 'invalid_method'\n _test_funcs[0](method)\n"
] | [
[
"numpy.arange",
"numpy.zeros"
],
[
"numpy.diag",
"numpy.dot",
"numpy.linalg.solve",
"numpy.sqrt",
"numpy.eye",
"numpy.identity",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hardik-prajapati/mne-python | [
"7410696b8897c8782ae293e1c453a43b20197acd"
] | [
"mne/preprocessing/ica.py"
] | [
"# -*- coding: utf-8 -*-\n#\n# Authors: Denis A. Engemann <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# Juergen Dammers <[email protected]>\n#\n# License: BSD (3-clause)\n\nfrom inspect import isfunction\nfrom collections import namedtuple\nfrom copy import deepcopy\nfrom numbers import Integral\nfrom time import time\n\nimport os\nimport json\n\nimport numpy as np\nfrom scipy import linalg\n\nfrom .ecg import (qrs_detector, _get_ecg_channel_index, _make_ecg,\n create_ecg_epochs)\nfrom .eog import _find_eog_events, _get_eog_channel_index\nfrom .infomax_ import infomax\n\nfrom ..cov import compute_whitener\nfrom .. import Covariance, Evoked\nfrom ..io.pick import (pick_types, pick_channels, pick_info,\n _picks_to_idx, _DATA_CH_TYPES_SPLIT)\nfrom ..io.write import (write_double_matrix, write_string,\n write_name_list, write_int, start_block,\n end_block)\nfrom ..io.tree import dir_tree_find\nfrom ..io.open import fiff_open\nfrom ..io.tag import read_tag\nfrom ..io.meas_info import write_meas_info, read_meas_info\nfrom ..io.constants import FIFF\nfrom ..io.base import BaseRaw\nfrom ..epochs import BaseEpochs\nfrom ..viz import (plot_ica_components, plot_ica_scores,\n plot_ica_sources, plot_ica_overlay)\nfrom ..viz.ica import plot_ica_properties\nfrom ..viz.topomap import _plot_corrmap\n\nfrom ..channels.channels import _contains_ch_type, ContainsMixin\nfrom ..io.write import start_file, end_file, write_id\nfrom ..utils import (check_version, logger, check_fname, verbose,\n _reject_data_segments, check_random_state, _validate_type,\n compute_corr, _get_inst_data, _ensure_int,\n copy_function_doc_to_method_doc, _pl, warn, Bunch,\n _check_preload, _check_compensation_grade, fill_doc,\n _check_option, _PCA)\nfrom ..utils.check import _check_all_same_channel_names\n\nfrom ..fixes import _get_args\nfrom ..filter import filter_data\nfrom .bads import find_outliers\nfrom .ctps_ import ctps\nfrom ..io.pick import channel_type, pick_channels_regexp\n\n\n__all__ = ('ICA', 'ica_find_ecg_events', 'ica_find_eog_events',\n 'get_score_funcs', 'read_ica', 'run_ica')\n\n\ndef _make_xy_sfunc(func, ndim_output=False):\n \"\"\"Aux function.\"\"\"\n if ndim_output:\n def sfunc(x, y):\n return np.array([func(a, y.ravel()) for a in x])[:, 0]\n else:\n def sfunc(x, y):\n return np.array([func(a, y.ravel()) for a in x])\n sfunc.__name__ = '.'.join(['score_func', func.__module__, func.__name__])\n sfunc.__doc__ = func.__doc__\n return sfunc\n\n\n# makes score funcs attr accessible for users\ndef get_score_funcs():\n \"\"\"Get the score functions.\n\n Returns\n -------\n score_funcs : dict\n The score functions.\n \"\"\"\n from scipy import stats\n from scipy.spatial import distance\n score_funcs = Bunch()\n xy_arg_dist_funcs = [(n, f) for n, f in vars(distance).items()\n if isfunction(f) and not n.startswith('_')]\n xy_arg_stats_funcs = [(n, f) for n, f in vars(stats).items()\n if isfunction(f) and not n.startswith('_')]\n score_funcs.update({n: _make_xy_sfunc(f)\n for n, f in xy_arg_dist_funcs\n if _get_args(f) == ['u', 'v']})\n score_funcs.update({n: _make_xy_sfunc(f, ndim_output=True)\n for n, f in xy_arg_stats_funcs\n if _get_args(f) == ['x', 'y']})\n return score_funcs\n\n\ndef _check_for_unsupported_ica_channels(picks, info, allow_ref_meg=False):\n \"\"\"Check for channels in picks that are not considered valid channels.\n\n Accepted channels are the data channels\n ('seeg','ecog','eeg', 'hbo', 'hbr', 'mag', and 'grad'), 'eog' and 'ref_meg'\n This prevents the program from crashing without\n feedback when a bad channel is provided to ICA whitening.\n \"\"\"\n types = _DATA_CH_TYPES_SPLIT + ('eog',)\n types += ('ref_meg',) if allow_ref_meg else ()\n chs = list({channel_type(info, j) for j in picks})\n check = all([ch in types for ch in chs])\n if not check:\n raise ValueError('Invalid channel type%s passed for ICA: %s.'\n 'Only the following types are supported: %s'\n .format(_pl(chs), chs, types))\n\n\n@fill_doc\nclass ICA(ContainsMixin):\n u\"\"\"M/EEG signal decomposition using Independent Component Analysis (ICA).\n\n This object estimates independent components from :class:`mne.io.Raw`,\n :class:`mne.Epochs`, or :class:`mne.Evoked` objects. Components can\n optionally be removed (for artifact repair) prior to signal reconstruction.\n\n .. warning:: ICA is sensitive to low-frequency drifts and therefore\n requires the data to be high-pass filtered prior to fitting.\n Typically, a cutoff frequency of 1 Hz is recommended.\n\n Parameters\n ----------\n n_components : int | float | None\n Number of principal components (from the pre-whitening PCA step) that\n are passed to the ICA algorithm during fitting. If :class:`int`, must\n not be larger than ``max_pca_components``. If :class:`float` between 0\n and 1, the number of components with cumulative explained variance less\n than ``n_components`` will be used. If ``None``, ``max_pca_components``\n will be used. Defaults to ``None``; the actual number used when\n executing the :meth:`ICA.fit` method will be stored in the attribute\n ``n_components_`` (note the trailing underscore).\n max_pca_components : int | None\n Number of principal components (from the pre-whitening PCA step) that\n are retained for later use (i.e., for signal reconstruction in\n :meth:`ICA.apply`; see the ``n_pca_components`` parameter). If\n ``None``, no dimensionality reduction occurs and ``max_pca_components``\n will equal the number of channels in the :class:`mne.io.Raw`,\n :class:`mne.Epochs`, or :class:`mne.Evoked` object passed to\n :meth:`ICA.fit`.\n n_pca_components : int | float | None\n Total number of components (ICA + PCA) used for signal reconstruction\n in :meth:`ICA.apply`. At minimum, at least ``n_components`` will be\n used (unless modified by ``ICA.include`` or ``ICA.exclude``). If\n ``n_pca_components > n_components``, additional PCA components will be\n incorporated. If :class:`float` between 0 and 1, the number is chosen\n as the number of *PCA* components with cumulative explained variance\n less than ``n_components`` (without accounting for ``ICA.include`` or\n ``ICA.exclude``). If :class:`int` or :class:`float`, ``n_components_ ≤\n n_pca_components ≤ max_pca_components`` must hold. If ``None``,\n ``max_pca_components`` will be used. Defaults to ``None``.\n noise_cov : None | instance of Covariance\n Noise covariance used for pre-whitening. If None (default), channels\n are scaled to unit variance (\"z-standardized\") prior to the whitening\n by PCA.\n %(random_state)s\n As estimation can be non-deterministic it can be useful to fix the\n random state to have reproducible results.\n method : {'fastica', 'infomax', 'picard'}\n The ICA method to use in the fit method. Use the fit_params argument to\n set additional parameters. Specifically, if you want Extended Infomax,\n set method='infomax' and fit_params=dict(extended=True) (this also\n works for method='picard'). Defaults to 'fastica'. For reference, see\n [1]_, [2]_, [3]_ and [4]_.\n fit_params : dict | None\n Additional parameters passed to the ICA estimator as specified by\n `method`.\n max_iter : int\n Maximum number of iterations during fit. Defaults to 200.\n allow_ref_meg : bool\n Allow ICA on MEG reference channels. Defaults to False.\n\n .. versionadded:: 0.18\n %(verbose)s\n\n Attributes\n ----------\n current_fit : str\n Flag informing about which data type (raw or epochs) was used for the\n fit.\n ch_names : list-like\n Channel names resulting from initial picking.\n n_components_ : int\n If fit, the actual number of PCA components used for ICA decomposition.\n pre_whitener_ : ndarray, shape (n_channels, 1)\n If fit, array used to pre-whiten the data prior to PCA.\n pca_components_ : ndarray, shape (`max_pca_components`, n_channels)\n If fit, the PCA components.\n pca_mean_ : ndarray, shape (n_channels,)\n If fit, the mean vector used to center the data before doing the PCA.\n pca_explained_variance_ : ndarray, shape (`max_pca_components`,)\n If fit, the variance explained by each PCA component.\n mixing_matrix_ : ndarray, shape (`n_components_`, `n_components_`)\n If fit, the whitened mixing matrix to go back from ICA space to PCA\n space.\n It is, in combination with the `pca_components_`, used by\n :meth:`ICA.apply` and :meth:`ICA.get_components` to re-mix/project\n a subset of the ICA components into the observed channel space.\n The former method also removes the pre-whitening (z-scaling) and the\n de-meaning.\n unmixing_matrix_ : ndarray, shape (`n_components_`, `n_components_`)\n If fit, the whitened matrix to go from PCA space to ICA space.\n Used, in combination with the `pca_components_`, by the methods\n :meth:`ICA.get_sources` and :meth:`ICA.apply` to unmix the observed data.\n exclude : array-like of int\n List or np.array of sources indices to exclude when re-mixing the data\n in the :meth:`ICA.apply` method, i.e. artifactual ICA components.\n The components identified manually and by the various automatic\n artifact detection methods should be (manually) appended\n (e.g. ``ica.exclude.extend(eog_inds)``).\n (There is also an `exclude` parameter in the :meth:`ICA.apply` method.)\n To scrap all marked components, set this attribute to an empty list.\n info : None | instance of Info\n The measurement info copied from the object fitted.\n n_samples_ : int\n The number of samples used on fit.\n labels_ : dict\n A dictionary of independent component indices, grouped by types of\n independent components. This attribute is set by some of the artifact\n detection functions.\n\n Notes\n -----\n A trailing ``_`` in an attribute name signifies that the attribute was\n added to the object during fitting, consistent with standard scikit-learn\n practice.\n\n Prior to fitting and applying the ICA, data is whitened (de-correlated and\n scaled to unit variance, also called sphering transformation) by means of\n a Principle Component Analysis (PCA). In addition to the whitening, this\n step introduces the option to reduce the dimensionality of the data, both\n prior to fitting the ICA (with the ``max_pca_components`` parameter) and\n prior to reconstructing the sensor signals (with the ``n_pca_components``\n parameter). In this way, we separate the question of how many ICA\n components to estimate from the question of how much to reduce the\n dimensionality of the signal. For example: by setting high values for\n ``max_pca_components`` and ``n_pca_components``, relatively little\n dimensionality reduction will occur when the signal is reconstructed,\n regardless of the value of ``n_components`` (the number of ICA components\n estimated).\n\n .. note:: Commonly used for reasons of i) computational efficiency and\n ii) additional noise reduction, it is a matter of current debate\n whether pre-ICA dimensionality reduction could decrease the\n reliability and stability of the ICA, at least for EEG data and\n especially during preprocessing [5]_. (But see also [6]_ for a\n possibly confounding effect of the different whitening/sphering\n methods used in this paper (ZCA vs. PCA).)\n On the other hand, for rank-deficient data such as EEG data after\n average reference or interpolation, it is recommended to reduce\n the dimensionality (by 1 for average reference and 1 for each\n interpolated channel) for optimal ICA performance (see the\n `EEGLAB wiki <eeglab_wiki_>`_).\n\n Caveat! If supplying a noise covariance, keep track of the projections\n available in the cov or in the raw object. For example, if you are\n interested in EOG or ECG artifacts, EOG and ECG projections should be\n temporally removed before fitting ICA, for example::\n\n >> projs, raw.info['projs'] = raw.info['projs'], []\n >> ica.fit(raw)\n >> raw.info['projs'] = projs\n\n Methods currently implemented are FastICA (default), Infomax, and Picard.\n Standard Infomax can be quite sensitive to differences in floating point\n arithmetic. Extended Infomax seems to be more stable in this respect,\n enhancing reproducibility and stability of results; use Extended Infomax\n via ``method='infomax', fit_params=dict(extended=True)``. Allowed entries\n in ``fit_params`` are determined by the various algorithm implementations:\n see :class:`~sklearn.decomposition.FastICA`, :func:`~picard.picard`,\n :func:`~mne.preprocessing.infomax`.\n\n Reducing the tolerance (set in `fit_params`) speeds up estimation at the\n cost of consistency of the obtained results. It is difficult to directly\n compare tolerance levels between Infomax and Picard, but for Picard and\n FastICA a good rule of thumb is ``tol_fastica == tol_picard ** 2``.\n\n .. _eeglab_wiki: https://sccn.ucsd.edu/wiki/Chapter_09:_Decomposing_Data_Using_ICA#Issue:_ICA_returns_near-identical_components_with_opposite_polarities\n\n References\n ----------\n .. [1] Hyvärinen, A., 1999. Fast and robust fixed-point algorithms for\n independent component analysis. IEEE transactions on Neural\n Networks, 10(3), pp.626-634.\n\n .. [2] Bell, A.J., Sejnowski, T.J., 1995. An information-maximization\n approach to blind separation and blind deconvolution. Neural\n computation, 7(6), pp.1129-1159.\n\n .. [3] Lee, T.W., Girolami, M., Sejnowski, T.J., 1999. Independent\n component analysis using an extended infomax algorithm for mixed\n subgaussian and supergaussian sources. Neural computation, 11(2),\n pp.417-441.\n\n .. [4] Ablin P, Cardoso J, Gramfort A, 2018. Faster Independent Component\n Analysis by Preconditioning With Hessian Approximations.\n IEEE Transactions on Signal Processing 66:4040–4049\n\n .. [5] Artoni, F., Delorme, A., und Makeig, S, 2018. Applying Dimension\n Reduction to EEG Data by Principal Component Analysis Reduces the\n Quality of Its Subsequent Independent Component Decomposition.\n NeuroImage 175, pp.176–187.\n\n .. [6] Montoya-Martínez, J., Cardoso, J.-F., Gramfort, A, 2017. Caveats\n with stochastic gradient and maximum likelihood based ICA for EEG.\n LVA-ICA International Conference, Feb 2017, Grenoble, France.\n `〈hal-01451432〉 <hal-01451432_>`_\n\n .. _hal-01451432: https://hal.archives-ouvertes.fr/hal-01451432/document\n \"\"\" # noqa: E501\n\n @verbose\n def __init__(self, n_components=None, max_pca_components=None,\n n_pca_components=None, noise_cov=None, random_state=None,\n method='fastica', fit_params=None, max_iter=200,\n allow_ref_meg=False, verbose=None): # noqa: D102\n _check_option('method', method, ['fastica', 'infomax', 'picard'])\n if method == 'fastica' and not check_version('sklearn', '0.15'):\n raise RuntimeError('The scikit-learn package (version >= 0.15) '\n 'is required for FastICA.')\n\n self.noise_cov = noise_cov\n\n if (n_components is not None and\n max_pca_components is not None and\n n_components > max_pca_components):\n raise ValueError('n_components must be smaller than '\n 'max_pca_components')\n\n if isinstance(n_components, float) \\\n and not 0 < n_components <= 1:\n raise ValueError('Selecting ICA components by explained variance '\n 'needs values between 0.0 and 1.0 ')\n\n self.current_fit = 'unfitted'\n self.verbose = verbose\n self.n_components = n_components\n self.max_pca_components = max_pca_components\n self.n_pca_components = n_pca_components\n self.ch_names = None\n self.random_state = random_state\n\n if fit_params is None:\n fit_params = {}\n fit_params = deepcopy(fit_params) # avoid side effects\n\n if method == 'fastica':\n update = {'algorithm': 'parallel', 'fun': 'logcosh',\n 'fun_args': None}\n fit_params.update({k: v for k, v in update.items() if k\n not in fit_params})\n elif method == 'infomax':\n # extended=True is default in underlying function, but we want\n # default False here unless user specified True:\n fit_params.setdefault('extended', False)\n fit_params.setdefault('max_iter', max_iter)\n self.max_iter = max_iter\n self.fit_params = fit_params\n\n self.exclude = []\n self.info = None\n self.method = method\n self.labels_ = dict()\n self.allow_ref_meg = allow_ref_meg\n\n def __repr__(self):\n \"\"\"ICA fit information.\"\"\"\n if self.current_fit == 'unfitted':\n s = 'no'\n elif self.current_fit == 'raw':\n s = 'raw data'\n else:\n s = 'epochs'\n s += ' decomposition, '\n s += 'fit (%s): %s samples, ' % (self.method,\n str(getattr(self, 'n_samples_', '')))\n s += ('%s components' % str(self.n_components_) if\n hasattr(self, 'n_components_') else\n 'no dimension reduction')\n if self.info is not None:\n ch_fit = ['\"%s\"' % c for c in _DATA_CH_TYPES_SPLIT if c in self]\n s += ', channels used: {}'.format('; '.join(ch_fit))\n if self.exclude:\n s += ', %i sources marked for exclusion' % len(self.exclude)\n\n return '<ICA | %s>' % s\n\n @verbose\n def fit(self, inst, picks=None, start=None, stop=None, decim=None,\n reject=None, flat=None, tstep=2.0, reject_by_annotation=True,\n verbose=None):\n \"\"\"Run the ICA decomposition on raw data.\n\n Caveat! If supplying a noise covariance keep track of the projections\n available in the cov, the raw or the epochs object. For example,\n if you are interested in EOG or ECG artifacts, EOG and ECG projections\n should be temporally removed before fitting the ICA.\n\n Parameters\n ----------\n inst : instance of Raw, Epochs or Evoked\n Raw measurements to be decomposed.\n %(picks_good_data_noref)s\n This selection remains throughout the initialized ICA solution.\n start : int | float | None\n First sample to include. If float, data will be interpreted as\n time in seconds. If None, data will be used from the first sample.\n stop : int | float | None\n Last sample to not include. If float, data will be interpreted as\n time in seconds. If None, data will be used to the last sample.\n decim : int | None\n Increment for selecting each nth time slice. If None, all samples\n within ``start`` and ``stop`` are used.\n reject : dict | None\n Rejection parameters based on peak-to-peak amplitude.\n Valid keys are 'grad', 'mag', 'eeg', 'seeg', 'ecog', 'eog', 'ecg',\n 'hbo', 'hbr'.\n If reject is None then no rejection is done. Example::\n\n reject = dict(grad=4000e-13, # T / m (gradiometers)\n mag=4e-12, # T (magnetometers)\n eeg=40e-6, # V (EEG channels)\n eog=250e-6 # V (EOG channels)\n )\n\n It only applies if `inst` is of type Raw.\n flat : dict | None\n Rejection parameters based on flatness of signal.\n Valid keys are 'grad', 'mag', 'eeg', 'seeg', 'ecog', 'eog', 'ecg',\n 'hbo', 'hbr'.\n Values are floats that set the minimum acceptable peak-to-peak\n amplitude. If flat is None then no rejection is done.\n It only applies if `inst` is of type Raw.\n tstep : float\n Length of data chunks for artifact rejection in seconds.\n It only applies if `inst` is of type Raw.\n reject_by_annotation : bool\n Whether to omit bad segments from the data before fitting. If True,\n annotated segments with a description that starts with 'bad' are\n omitted. Has no effect if ``inst`` is an Epochs or Evoked object.\n Defaults to True.\n\n .. versionadded:: 0.14.0\n\n %(verbose_meth)s\n\n Returns\n -------\n self : instance of ICA\n Returns the modified instance.\n \"\"\"\n _validate_type(inst, (BaseRaw, BaseEpochs), 'inst', 'Raw or Epochs')\n picks = _picks_to_idx(inst.info, picks, allow_empty=False,\n with_ref_meg=self.allow_ref_meg)\n _check_for_unsupported_ica_channels(\n picks, inst.info, allow_ref_meg=self.allow_ref_meg)\n\n t_start = time()\n if isinstance(inst, BaseRaw):\n self._fit_raw(inst, picks, start, stop, decim, reject, flat,\n tstep, reject_by_annotation, verbose)\n elif isinstance(inst, BaseEpochs):\n self._fit_epochs(inst, picks, decim, verbose)\n\n # sort ICA components by explained variance\n var = _ica_explained_variance(self, inst)\n var_ord = var.argsort()[::-1]\n _sort_components(self, var_ord, copy=False)\n t_stop = time()\n logger.info(\"Fitting ICA took {:.1f}s.\".format(t_stop - t_start))\n return self\n\n def _reset(self):\n \"\"\"Aux method.\"\"\"\n del self.pre_whitener_\n del self.unmixing_matrix_\n del self.mixing_matrix_\n del self.n_components_\n del self.n_samples_\n del self.pca_components_\n del self.pca_explained_variance_\n del self.pca_mean_\n if hasattr(self, 'drop_inds_'):\n del self.drop_inds_\n if hasattr(self, 'reject_'):\n del self.reject_\n\n def _fit_raw(self, raw, picks, start, stop, decim, reject, flat, tstep,\n reject_by_annotation, verbose):\n \"\"\"Aux method.\"\"\"\n if self.current_fit != 'unfitted':\n self._reset()\n\n logger.info('Fitting ICA to data using %i channels '\n '(please be patient, this may take a while)' % len(picks))\n\n if self.max_pca_components is None:\n self.max_pca_components = len(picks)\n logger.info('Inferring max_pca_components from picks')\n\n self.info = pick_info(raw.info, picks)\n if self.info['comps']:\n self.info['comps'] = []\n self.ch_names = self.info['ch_names']\n start, stop = _check_start_stop(raw, start, stop)\n\n reject_by_annotation = 'omit' if reject_by_annotation else None\n # this will be a copy\n data = raw.get_data(picks, start, stop, reject_by_annotation)\n\n # this will be a view\n if decim is not None:\n data = data[:, ::decim]\n\n # this will make a copy\n if (reject is not None) or (flat is not None):\n self.reject_ = reject\n data, self.drop_inds_ = _reject_data_segments(data, reject, flat,\n decim, self.info,\n tstep)\n\n self.n_samples_ = data.shape[1]\n # this may operate inplace or make a copy\n data, self.pre_whitener_ = self._pre_whiten(data, raw.info, picks)\n\n self._fit(data, self.max_pca_components, 'raw')\n\n return self\n\n def _fit_epochs(self, epochs, picks, decim, verbose):\n \"\"\"Aux method.\"\"\"\n if self.current_fit != 'unfitted':\n self._reset()\n\n if epochs.events.size == 0:\n raise RuntimeError('Tried to fit ICA with epochs, but none were '\n 'found: epochs.events is \"{}\".'\n .format(epochs.events))\n\n logger.info('Fitting ICA to data using %i channels '\n '(please be patient, this may take a while)' % len(picks))\n\n # filter out all the channels the raw wouldn't have initialized\n self.info = pick_info(epochs.info, picks)\n\n if self.info['comps']:\n self.info['comps'] = []\n self.ch_names = self.info['ch_names']\n\n if self.max_pca_components is None:\n self.max_pca_components = len(picks)\n logger.info('Inferring max_pca_components from picks')\n\n # this should be a copy (picks a list of int)\n data = epochs.get_data()[:, picks]\n # this will be a view\n if decim is not None:\n data = data[:, :, ::decim]\n\n self.n_samples_ = np.prod(data[:, 0, :].shape)\n\n # This will make at least one copy (one from hstack, maybe one\n # more from _pre_whiten)\n data, self.pre_whitener_ = \\\n self._pre_whiten(np.hstack(data), epochs.info, picks)\n\n self._fit(data, self.max_pca_components, 'epochs')\n\n return self\n\n def _pre_whiten(self, data, info, picks):\n \"\"\"Aux function.\"\"\"\n has_pre_whitener = hasattr(self, 'pre_whitener_')\n if not has_pre_whitener and self.noise_cov is None:\n # use standardization as whitener\n # Scale (z-score) the data by channel type\n info = pick_info(info, picks)\n pre_whitener = np.empty([len(data), 1])\n for ch_type in _DATA_CH_TYPES_SPLIT + ('eog', \"ref_meg\"):\n if _contains_ch_type(info, ch_type):\n if ch_type == 'seeg':\n this_picks = pick_types(info, meg=False, seeg=True)\n elif ch_type == 'ecog':\n this_picks = pick_types(info, meg=False, ecog=True)\n elif ch_type == 'eeg':\n this_picks = pick_types(info, meg=False, eeg=True)\n elif ch_type in ('mag', 'grad'):\n this_picks = pick_types(info, meg=ch_type)\n elif ch_type == 'eog':\n this_picks = pick_types(info, meg=False, eog=True)\n elif ch_type in ('hbo', 'hbr'):\n this_picks = pick_types(info, meg=False, fnirs=ch_type)\n elif ch_type == 'ref_meg':\n this_picks = pick_types(info, meg=False, ref_meg=True)\n else:\n raise RuntimeError('Should not be reached.'\n 'Unsupported channel {}'\n .format(ch_type))\n pre_whitener[this_picks] = np.std(data[this_picks])\n data /= pre_whitener\n elif not has_pre_whitener and self.noise_cov is not None:\n pre_whitener, _ = compute_whitener(self.noise_cov, info, picks)\n assert data.shape[0] == pre_whitener.shape[1]\n data = np.dot(pre_whitener, data)\n elif has_pre_whitener and self.noise_cov is None:\n data /= self.pre_whitener_\n pre_whitener = self.pre_whitener_\n else:\n data = np.dot(self.pre_whitener_, data)\n pre_whitener = self.pre_whitener_\n\n return data, pre_whitener\n\n def _fit(self, data, max_pca_components, fit_type):\n \"\"\"Aux function.\"\"\"\n random_state = check_random_state(self.random_state)\n pca = _PCA(n_components=max_pca_components, whiten=True)\n data = pca.fit_transform(data.T)\n\n if isinstance(self.n_components, float):\n n_components_ = np.sum(pca.explained_variance_ratio_.cumsum() <=\n self.n_components)\n if n_components_ < 1:\n raise RuntimeError('One PCA component captures most of the '\n 'explained variance, your threshold resu'\n 'lts in 0 components. You should select '\n 'a higher value.')\n logger.info('Selection by explained variance: %i components' %\n n_components_)\n sel = slice(n_components_)\n else:\n if self.n_components is not None: # normal n case\n sel = slice(self.n_components)\n logger.info('Selection by number: %i components' %\n self.n_components)\n else: # None case\n logger.info('Using all PCA components: %i'\n % len(pca.components_))\n sel = slice(len(pca.components_))\n\n # the things to store for PCA\n self.pca_mean_ = pca.mean_\n self.pca_components_ = pca.components_\n self.pca_explained_variance_ = exp_var = pca.explained_variance_\n del pca\n # update number of components\n self.n_components_ = sel.stop\n self._update_ica_names()\n if self.n_pca_components is not None:\n if self.n_pca_components > len(self.pca_components_):\n self.n_pca_components = len(self.pca_components_)\n\n # take care of ICA\n if self.method == 'fastica':\n from sklearn.decomposition import FastICA\n ica = FastICA(whiten=False, random_state=random_state,\n **self.fit_params)\n ica.fit(data[:, sel])\n self.unmixing_matrix_ = ica.components_\n elif self.method in ('infomax', 'extended-infomax'):\n self.unmixing_matrix_ = infomax(data[:, sel],\n random_state=random_state,\n **self.fit_params)\n elif self.method == 'picard':\n from picard import picard\n _, W, _ = picard(data[:, sel].T, whiten=False,\n random_state=random_state, **self.fit_params)\n del _\n self.unmixing_matrix_ = W\n self.unmixing_matrix_ /= np.sqrt(exp_var[sel])[None, :] # whitening\n self.mixing_matrix_ = linalg.pinv(self.unmixing_matrix_)\n self.current_fit = fit_type\n\n def _update_ica_names(self):\n \"\"\"Update ICA names when n_components_ is set.\"\"\"\n self._ica_names = ['ICA%03d' % ii for ii in range(self.n_components_)]\n\n def _transform(self, data):\n \"\"\"Compute sources from data (operates inplace).\"\"\"\n if self.pca_mean_ is not None:\n data -= self.pca_mean_[:, None]\n\n # Apply first PCA\n pca_data = np.dot(self.pca_components_[:self.n_components_], data)\n # Apply unmixing to low dimension PCA\n sources = np.dot(self.unmixing_matrix_, pca_data)\n return sources\n\n def _transform_raw(self, raw, start, stop, reject_by_annotation=False):\n \"\"\"Transform raw data.\"\"\"\n if not hasattr(self, 'mixing_matrix_'):\n raise RuntimeError('No fit available. Please fit ICA.')\n start, stop = _check_start_stop(raw, start, stop)\n\n picks = pick_types(raw.info, include=self.ch_names, exclude='bads',\n meg=False, ref_meg=False)\n if len(picks) != len(self.ch_names):\n raise RuntimeError('Raw doesn\\'t match fitted data: %i channels '\n 'fitted but %i channels supplied. \\nPlease '\n 'provide Raw compatible with '\n 'ica.ch_names' % (len(self.ch_names),\n len(picks)))\n\n if reject_by_annotation:\n data = raw.get_data(picks, start, stop, 'omit')\n else:\n data = raw[picks, start:stop][0]\n data, _ = self._pre_whiten(data, raw.info, picks)\n return self._transform(data)\n\n def _transform_epochs(self, epochs, concatenate):\n \"\"\"Aux method.\"\"\"\n if not hasattr(self, 'mixing_matrix_'):\n raise RuntimeError('No fit available. Please fit ICA.')\n\n picks = pick_types(epochs.info, include=self.ch_names, exclude='bads',\n meg=False, ref_meg=False)\n # special case where epochs come picked but fit was 'unpicked'.\n if len(picks) != len(self.ch_names):\n raise RuntimeError('Epochs don\\'t match fitted data: %i channels '\n 'fitted but %i channels supplied. \\nPlease '\n 'provide Epochs compatible with '\n 'ica.ch_names' % (len(self.ch_names),\n len(picks)))\n\n data = np.hstack(epochs.get_data()[:, picks])\n data, _ = self._pre_whiten(data, epochs.info, picks)\n sources = self._transform(data)\n\n if not concatenate:\n # Put the data back in 3D\n sources = np.array(np.split(sources, len(epochs.events), 1))\n\n return sources\n\n def _transform_evoked(self, evoked):\n \"\"\"Aux method.\"\"\"\n if not hasattr(self, 'mixing_matrix_'):\n raise RuntimeError('No fit available. Please fit ICA.')\n\n picks = pick_types(evoked.info, include=self.ch_names, exclude='bads',\n meg=False, ref_meg=False)\n\n if len(picks) != len(self.ch_names):\n raise RuntimeError('Evoked doesn\\'t match fitted data: %i channels'\n ' fitted but %i channels supplied. \\nPlease '\n 'provide Evoked compatible with '\n 'ica.ch_names' % (len(self.ch_names),\n len(picks)))\n\n data, _ = self._pre_whiten(evoked.data[picks], evoked.info, picks)\n sources = self._transform(data)\n\n return sources\n\n def get_components(self):\n \"\"\"Get ICA topomap for components as numpy arrays.\n\n Returns\n -------\n components : array, shape (n_channels, n_components)\n The ICA components (maps).\n \"\"\"\n return np.dot(self.mixing_matrix_[:, :self.n_components_].T,\n self.pca_components_[:self.n_components_]).T\n\n def get_sources(self, inst, add_channels=None, start=None, stop=None):\n \"\"\"Estimate sources given the unmixing matrix.\n\n This method will return the sources in the container format passed.\n Typical usecases:\n\n 1. pass Raw object to use `raw.plot` for ICA sources\n 2. pass Epochs object to compute trial-based statistics in ICA space\n 3. pass Evoked object to investigate time-locking in ICA space\n\n Parameters\n ----------\n inst : instance of Raw, Epochs or Evoked\n Object to compute sources from and to represent sources in.\n add_channels : None | list of str\n Additional channels to be added. Useful to e.g. compare sources\n with some reference. Defaults to None\n start : int | float | None\n First sample to include. If float, data will be interpreted as\n time in seconds. If None, the entire data will be used.\n stop : int | float | None\n Last sample to not include. If float, data will be interpreted as\n time in seconds. If None, the entire data will be used.\n\n Returns\n -------\n sources : instance of Raw, Epochs or Evoked\n The ICA sources time series.\n \"\"\"\n if isinstance(inst, BaseRaw):\n _check_compensation_grade(self.info, inst.info, 'ICA', 'Raw',\n ch_names=self.ch_names)\n sources = self._sources_as_raw(inst, add_channels, start, stop)\n elif isinstance(inst, BaseEpochs):\n _check_compensation_grade(self.info, inst.info, 'ICA', 'Epochs',\n ch_names=self.ch_names)\n sources = self._sources_as_epochs(inst, add_channels, False)\n elif isinstance(inst, Evoked):\n _check_compensation_grade(self.info, inst.info, 'ICA', 'Evoked',\n ch_names=self.ch_names)\n sources = self._sources_as_evoked(inst, add_channels)\n else:\n raise ValueError('Data input must be of Raw, Epochs or Evoked '\n 'type')\n return sources\n\n def _sources_as_raw(self, raw, add_channels, start, stop):\n \"\"\"Aux method.\"\"\"\n # merge copied instance and picked data with sources\n sources = self._transform_raw(raw, start=start, stop=stop)\n if raw.preload: # get data and temporarily delete\n data = raw._data\n del raw._data\n\n out = raw.copy() # copy and reappend\n if raw.preload:\n raw._data = data\n\n # populate copied raw.\n start, stop = _check_start_stop(raw, start, stop)\n if add_channels is not None:\n raw_picked = raw.copy().pick_channels(add_channels)\n data_, times_ = raw_picked[:, start:stop]\n data_ = np.r_[sources, data_]\n else:\n data_ = sources\n _, times_ = raw[0, start:stop]\n out._data = data_\n out._times = times_\n out._filenames = [None]\n out.preload = True\n\n # update first and last samples\n out._first_samps = np.array([raw.first_samp +\n (start if start else 0)])\n out._last_samps = np.array([out.first_samp + stop\n if stop else raw.last_samp])\n\n out._projector = None\n self._export_info(out.info, raw, add_channels)\n out._update_times()\n\n return out\n\n def _sources_as_epochs(self, epochs, add_channels, concatenate):\n \"\"\"Aux method.\"\"\"\n out = epochs.copy()\n sources = self._transform_epochs(epochs, concatenate)\n if add_channels is not None:\n picks = [epochs.ch_names.index(k) for k in add_channels]\n else:\n picks = []\n out._data = np.concatenate([sources, epochs.get_data()[:, picks]],\n axis=1) if len(picks) > 0 else sources\n\n self._export_info(out.info, epochs, add_channels)\n out.preload = True\n out._raw = None\n out._projector = None\n\n return out\n\n def _sources_as_evoked(self, evoked, add_channels):\n \"\"\"Aux method.\"\"\"\n if add_channels is not None:\n picks = [evoked.ch_names.index(k) for k in add_channels]\n else:\n picks = []\n\n sources = self._transform_evoked(evoked)\n if len(picks) > 1:\n data = np.r_[sources, evoked.data[picks]]\n else:\n data = sources\n out = evoked.copy()\n out.data = data\n self._export_info(out.info, evoked, add_channels)\n\n return out\n\n def _export_info(self, info, container, add_channels):\n \"\"\"Aux method.\"\"\"\n # set channel names and info\n ch_names = []\n ch_info = info['chs'] = []\n for ii, name in enumerate(self._ica_names):\n ch_names.append(name)\n ch_info.append(dict(\n ch_name=name, cal=1, logno=ii + 1,\n coil_type=FIFF.FIFFV_COIL_NONE, kind=FIFF.FIFFV_MISC_CH,\n coord_Frame=FIFF.FIFFV_COORD_UNKNOWN, unit=FIFF.FIFF_UNIT_NONE,\n loc=np.array([0., 0., 0., 1.] * 3, dtype='f4'),\n range=1.0, scanno=ii + 1, unit_mul=0))\n\n if add_channels is not None:\n # re-append additionally picked ch_names\n ch_names += add_channels\n # re-append additionally picked ch_info\n ch_info += [k for k in container.info['chs'] if k['ch_name'] in\n add_channels]\n info['bads'] = [ch_names[k] for k in self.exclude]\n info['projs'] = [] # make sure projections are removed.\n info._update_redundant()\n info._check_consistency()\n\n @verbose\n def score_sources(self, inst, target=None, score_func='pearsonr',\n start=None, stop=None, l_freq=None, h_freq=None,\n reject_by_annotation=True, verbose=None):\n \"\"\"Assign score to components based on statistic or metric.\n\n Parameters\n ----------\n inst : instance of Raw, Epochs or Evoked\n The object to reconstruct the sources from.\n target : array-like | str | None\n Signal to which the sources shall be compared. It has to be of\n the same shape as the sources. If str, a routine will try to find\n a matching channel name. If None, a score\n function expecting only one input-array argument must be used,\n for instance, scipy.stats.skew (default).\n score_func : callable | str\n Callable taking as arguments either two input arrays\n (e.g. Pearson correlation) or one input\n array (e. g. skewness) and returns a float. For convenience the\n most common score_funcs are available via string labels:\n Currently, all distance metrics from scipy.spatial and All\n functions from scipy.stats taking compatible input arguments are\n supported. These function have been modified to support iteration\n over the rows of a 2D array.\n start : int | float | None\n First sample to include. If float, data will be interpreted as\n time in seconds. If None, data will be used from the first sample.\n stop : int | float | None\n Last sample to not include. If float, data will be interpreted as\n time in seconds. If None, data will be used to the last sample.\n l_freq : float\n Low pass frequency.\n h_freq : float\n High pass frequency.\n reject_by_annotation : bool\n If True, data annotated as bad will be omitted. Defaults to True.\n\n .. versionadded:: 0.14.0\n\n %(verbose_meth)s\n\n Returns\n -------\n scores : ndarray\n scores for each source as returned from score_func\n \"\"\"\n if isinstance(inst, BaseRaw):\n _check_compensation_grade(self.info, inst.info, 'ICA', 'Raw',\n ch_names=self.ch_names)\n sources = self._transform_raw(inst, start, stop,\n reject_by_annotation)\n elif isinstance(inst, BaseEpochs):\n _check_compensation_grade(self.info, inst.info, 'ICA', 'Epochs',\n ch_names=self.ch_names)\n sources = self._transform_epochs(inst, concatenate=True)\n elif isinstance(inst, Evoked):\n _check_compensation_grade(self.info, inst.info, 'ICA', 'Evoked',\n ch_names=self.ch_names)\n sources = self._transform_evoked(inst)\n else:\n raise ValueError('Data input must be of Raw, Epochs or Evoked '\n 'type')\n\n if target is not None: # we can have univariate metrics without target\n target = self._check_target(target, inst, start, stop,\n reject_by_annotation)\n\n if sources.shape[-1] != target.shape[-1]:\n raise ValueError('Sources and target do not have the same'\n 'number of time slices.')\n # auto target selection\n if isinstance(inst, BaseRaw):\n # We pass inst, not self, because the sfreq of the data we\n # use for scoring components can be different:\n sources, target = _band_pass_filter(inst, sources, target,\n l_freq, h_freq)\n\n scores = _find_sources(sources, target, score_func)\n\n return scores\n\n def _check_target(self, target, inst, start, stop,\n reject_by_annotation=False):\n \"\"\"Aux Method.\"\"\"\n if isinstance(inst, BaseRaw):\n reject_by_annotation = 'omit' if reject_by_annotation else None\n start, stop = _check_start_stop(inst, start, stop)\n if hasattr(target, 'ndim'):\n if target.ndim < 2:\n target = target.reshape(1, target.shape[-1])\n if isinstance(target, str):\n pick = _get_target_ch(inst, target)\n target = inst.get_data(pick, start, stop, reject_by_annotation)\n\n elif isinstance(inst, BaseEpochs):\n if isinstance(target, str):\n pick = _get_target_ch(inst, target)\n target = inst.get_data()[:, pick]\n\n if hasattr(target, 'ndim'):\n if target.ndim == 3 and min(target.shape) == 1:\n target = target.ravel()\n\n elif isinstance(inst, Evoked):\n if isinstance(target, str):\n pick = _get_target_ch(inst, target)\n target = inst.data[pick]\n\n return target\n\n def _find_bads_ch(self, inst, chs, threshold=3.0, start=None,\n stop=None, l_freq=None, h_freq=None,\n reject_by_annotation=True, prefix=\"chs\"):\n \"\"\"Compute ExG/ref components.\n\n See find_bads_ecg, find_bads, eog, and find_bads_ref for details.\n \"\"\"\n scores, idx = [], []\n # some magic we need inevitably ...\n # get targets before equalizing\n targets = [self._check_target(\n ch, inst, start, stop, reject_by_annotation) for ch in chs]\n # assign names, if targets are arrays instead of strings\n target_names = []\n for ch in chs:\n if not isinstance(ch, str):\n if prefix == \"ecg\":\n target_names.append('ECG-MAG')\n else:\n target_names.append(prefix)\n else:\n target_names.append(ch)\n\n for ii, (ch, target) in enumerate(zip(target_names, targets)):\n scores += [self.score_sources(\n inst, target=target, score_func='pearsonr', start=start,\n stop=stop, l_freq=l_freq, h_freq=h_freq,\n reject_by_annotation=reject_by_annotation)]\n # pick last scores\n this_idx = find_outliers(scores[-1], threshold=threshold)\n idx += [this_idx]\n self.labels_['%s/%i/' % (prefix, ii) + ch] = list(this_idx)\n\n # remove duplicates but keep order by score, even across multiple\n # ref channels\n scores_ = np.concatenate([scores[ii][inds]\n for ii, inds in enumerate(idx)])\n idx_ = np.concatenate(idx)[np.abs(scores_).argsort()[::-1]]\n\n idx_unique = list(np.unique(idx_))\n idx = []\n for i in idx_:\n if i in idx_unique:\n idx.append(i)\n idx_unique.remove(i)\n if len(scores) == 1:\n scores = scores[0]\n labels = list(idx)\n\n return labels, scores\n\n @verbose\n def find_bads_ecg(self, inst, ch_name=None, threshold=None, start=None,\n stop=None, l_freq=8, h_freq=16, method='ctps',\n reject_by_annotation=True, verbose=None):\n \"\"\"Detect ECG related components using correlation.\n\n .. note:: If no ECG channel is available, routine attempts to create\n an artificial ECG based on cross-channel averaging.\n\n Parameters\n ----------\n inst : instance of Raw, Epochs or Evoked\n Object to compute sources from.\n ch_name : str\n The name of the channel to use for ECG peak detection.\n The argument is mandatory if the dataset contains no ECG\n channels.\n threshold : float\n The value above which a feature is classified as outlier. If\n method is 'ctps', defaults to 0.25, else defaults to 3.0.\n start : int | float | None\n First sample to include. If float, data will be interpreted as\n time in seconds. If None, data will be used from the first sample.\n stop : int | float | None\n Last sample to not include. If float, data will be interpreted as\n time in seconds. If None, data will be used to the last sample.\n l_freq : float\n Low pass frequency.\n h_freq : float\n High pass frequency.\n method : {'ctps', 'correlation'}\n The method used for detection. If 'ctps', cross-trial phase\n statistics [1] are used to detect ECG related components.\n Thresholding is then based on the significance value of a Kuiper\n statistic.\n If 'correlation', detection is based on Pearson correlation\n between the filtered data and the filtered ECG channel.\n Thresholding is based on iterative z-scoring. The above\n threshold components will be masked and the z-score will\n be recomputed until no supra-threshold component remains.\n Defaults to 'ctps'.\n reject_by_annotation : bool\n If True, data annotated as bad will be omitted. Defaults to True.\n\n .. versionadded:: 0.14.0\n\n %(verbose_meth)s\n\n Returns\n -------\n ecg_idx : list of int\n The indices of ECG related components.\n scores : np.ndarray of float, shape (``n_components_``)\n The correlation scores.\n\n See Also\n --------\n find_bads_eog, find_bads_ref\n\n References\n ----------\n [1] Dammers, J., Schiek, M., Boers, F., Silex, C., Zvyagintsev,\n M., Pietrzyk, U., Mathiak, K., 2008. Integration of amplitude\n and phase statistics for complete artifact removal in independent\n components of neuromagnetic recordings. Biomedical\n Engineering, IEEE Transactions on 55 (10), 2353-2362.\n \"\"\"\n idx_ecg = _get_ecg_channel_index(ch_name, inst)\n\n if idx_ecg is None:\n ecg, times = _make_ecg(inst, start, stop,\n reject_by_annotation=reject_by_annotation)\n else:\n ecg = inst.ch_names[idx_ecg]\n\n if method == 'ctps':\n if threshold is None:\n threshold = 0.25\n if isinstance(inst, BaseRaw):\n sources = self.get_sources(create_ecg_epochs(\n inst, ch_name, l_freq=l_freq, h_freq=h_freq,\n keep_ecg=False,\n reject_by_annotation=reject_by_annotation)).get_data()\n\n if sources.shape[0] == 0:\n warn('No ECG activity detected. Consider changing '\n 'the input parameters.')\n elif isinstance(inst, BaseEpochs):\n sources = self.get_sources(inst).get_data()\n else:\n raise ValueError('With `ctps` only Raw and Epochs input is '\n 'supported')\n _, p_vals, _ = ctps(sources)\n scores = p_vals.max(-1)\n ecg_idx = np.where(scores >= threshold)[0]\n # sort indices by scores\n ecg_idx = ecg_idx[np.abs(scores[ecg_idx]).argsort()[::-1]]\n\n self.labels_['ecg'] = list(ecg_idx)\n if ch_name is None:\n ch_name = 'ECG-MAG'\n self.labels_['ecg/%s' % ch_name] = list(ecg_idx)\n elif method == 'correlation':\n if threshold is None:\n threshold = 3.0\n self.labels_['ecg'], scores = self._find_bads_ch(\n inst, [ecg], threshold=threshold, start=start, stop=stop,\n l_freq=l_freq, h_freq=h_freq, prefix=\"ecg\",\n reject_by_annotation=reject_by_annotation)\n else:\n raise ValueError('Method \"%s\" not supported.' % method)\n return self.labels_['ecg'], scores\n\n @verbose\n def find_bads_ref(self, inst, ch_name=None, threshold=3.0, start=None,\n stop=None, l_freq=None, h_freq=None,\n reject_by_annotation=True, verbose=None):\n \"\"\"Detect MEG reference related components using correlation.\n\n Parameters\n ----------\n inst : instance of Raw, Epochs or Evoked\n Object to compute sources from. Should contain at least one channel\n i.e. component derived from MEG reference channels.\n ch_name: list of int\n Which MEG reference components to use. If None, then all channels\n that begin with REF_ICA\n threshold : int | float\n The value above which a feature is classified as outlier.\n start : int | float | None\n First sample to include. If float, data will be interpreted as\n time in seconds. If None, data will be used from the first sample.\n stop : int | float | None\n Last sample to not include. If float, data will be interpreted as\n time in seconds. If None, data will be used to the last sample.\n l_freq : float\n Low pass frequency.\n h_freq : float\n High pass frequency.\n reject_by_annotation : bool\n If True, data annotated as bad will be omitted. Defaults to True.\n %(verbose_meth)s\n\n Returns\n -------\n ref_idx : list of int\n The indices of MEG reference related components, sorted by score.\n scores : np.ndarray of float, shape (``n_components_``) | list of array\n The correlation scores.\n\n Notes\n -----\n Detection is based on Pearson correlation between the MEG data\n components and MEG reference components.\n Thresholding is based on adaptive z-scoring. The above threshold\n components will be masked and the z-score will be recomputed\n until no supra-threshold component remains.\n\n Recommended procedure is to perform ICA separately on reference\n channels, extract them using .get_sources(), and then append them to\n the inst using .add_channels(), preferably with the prefix REF_ICA so\n that they can be automatically detected.\n\n .. versionadded:: 0.18\n\n See Also\n --------\n find_bads_ecg, find_bads_eog\n \"\"\"\n inds = []\n if not ch_name:\n inds = pick_channels_regexp(inst.ch_names, \"REF_ICA*\")\n else:\n inds = pick_channels(inst.ch_names, ch_name)\n if not inds:\n raise ValueError('No reference components found or selected.')\n ref_chs = [inst.ch_names[k] for k in inds]\n\n self.labels_['ref_meg'], scores = self._find_bads_ch(\n inst, ref_chs, threshold=threshold, start=start, stop=stop,\n l_freq=l_freq, h_freq=h_freq, prefix=\"ref_meg\",\n reject_by_annotation=reject_by_annotation)\n return self.labels_['ref_meg'], scores\n\n @verbose\n def find_bads_eog(self, inst, ch_name=None, threshold=3.0, start=None,\n stop=None, l_freq=1, h_freq=10,\n reject_by_annotation=True, verbose=None):\n \"\"\"Detect EOG related components using correlation.\n\n Detection is based on Pearson correlation between the\n filtered data and the filtered EOG channel.\n Thresholding is based on adaptive z-scoring. The above threshold\n components will be masked and the z-score will be recomputed\n until no supra-threshold component remains.\n\n Parameters\n ----------\n inst : instance of Raw, Epochs or Evoked\n Object to compute sources from.\n ch_name : str\n The name of the channel to use for EOG peak detection.\n The argument is mandatory if the dataset contains no EOG\n channels.\n threshold : int | float\n The value above which a feature is classified as outlier.\n start : int | float | None\n First sample to include. If float, data will be interpreted as\n time in seconds. If None, data will be used from the first sample.\n stop : int | float | None\n Last sample to not include. If float, data will be interpreted as\n time in seconds. If None, data will be used to the last sample.\n l_freq : float\n Low pass frequency.\n h_freq : float\n High pass frequency.\n reject_by_annotation : bool\n If True, data annotated as bad will be omitted. Defaults to True.\n\n .. versionadded:: 0.14.0\n\n %(verbose_meth)s\n\n Returns\n -------\n eog_idx : list of int\n The indices of EOG related components, sorted by score.\n scores : np.ndarray of float, shape (``n_components_``) | list of array\n The correlation scores.\n\n See Also\n --------\n find_bads_ecg, find_bads_ref\n \"\"\"\n eog_inds = _get_eog_channel_index(ch_name, inst)\n if len(eog_inds) > 2:\n eog_inds = eog_inds[:1]\n logger.info('Using EOG channel %s' % inst.ch_names[eog_inds[0]])\n eog_chs = [inst.ch_names[k] for k in eog_inds]\n\n self.labels_['eog'], scores = self._find_bads_ch(\n inst, eog_chs, threshold=threshold, start=start, stop=stop,\n l_freq=l_freq, h_freq=h_freq, prefix=\"eog\",\n reject_by_annotation=reject_by_annotation)\n return self.labels_['eog'], scores\n\n def apply(self, inst, include=None, exclude=None, n_pca_components=None,\n start=None, stop=None):\n \"\"\"Remove selected components from the signal.\n\n Given the unmixing matrix, transform data,\n zero out components, and inverse transform the data.\n This procedure will reconstruct M/EEG signals from which\n the dynamics described by the excluded components is subtracted.\n The data is processed in place.\n\n Parameters\n ----------\n inst : instance of Raw, Epochs or Evoked\n The data to be processed. The instance is modified inplace.\n include : array_like of int.\n The indices referring to columns in the ummixing matrix. The\n components to be kept.\n exclude : array_like of int.\n The indices referring to columns in the ummixing matrix. The\n components to be zeroed out.\n n_pca_components : int | float | None\n The number of PCA components to be kept, either absolute (int)\n or percentage of the explained variance (float). If None (default),\n all PCA components will be used.\n start : int | float | None\n First sample to include. If float, data will be interpreted as\n time in seconds. If None, data will be used from the first sample.\n stop : int | float | None\n Last sample to not include. If float, data will be interpreted as\n time in seconds. If None, data will be used to the last sample.\n\n Returns\n -------\n out : instance of Raw, Epochs or Evoked\n The processed data.\n \"\"\"\n _validate_type(inst, (BaseRaw, BaseEpochs, Evoked), 'inst',\n 'Raw, Epochs, or Evoked')\n kwargs = dict(include=include, exclude=exclude,\n n_pca_components=n_pca_components)\n if isinstance(inst, BaseRaw):\n kind, meth = 'Raw', self._apply_raw\n kwargs.update(raw=inst, start=start, stop=stop)\n elif isinstance(inst, BaseEpochs):\n kind, meth = 'Epochs', self._apply_epochs\n kwargs.update(epochs=inst)\n else: # isinstance(inst, Evoked):\n kind, meth = 'Evoked', self._apply_evoked\n kwargs.update(evoked=inst)\n _check_compensation_grade(self.info, inst.info, 'ICA', kind,\n ch_names=self.ch_names)\n return meth(**kwargs)\n\n def _check_exclude(self, exclude):\n if exclude is None:\n return list(set(self.exclude))\n else:\n # Allow both self.exclude and exclude to be array-like:\n return list(set(self.exclude).union(set(exclude)))\n\n def _apply_raw(self, raw, include, exclude, n_pca_components, start, stop):\n \"\"\"Aux method.\"\"\"\n _check_preload(raw, \"ica.apply\")\n\n if n_pca_components is not None:\n self.n_pca_components = n_pca_components\n\n start, stop = _check_start_stop(raw, start, stop)\n\n picks = pick_types(raw.info, meg=False, include=self.ch_names,\n exclude='bads', ref_meg=False)\n\n data = raw[picks, start:stop][0]\n data, _ = self._pre_whiten(data, raw.info, picks)\n\n data = self._pick_sources(data, include, exclude)\n\n raw[picks, start:stop] = data\n return raw\n\n def _apply_epochs(self, epochs, include, exclude, n_pca_components):\n \"\"\"Aux method.\"\"\"\n _check_preload(epochs, \"ica.apply\")\n\n picks = pick_types(epochs.info, meg=False, ref_meg=False,\n include=self.ch_names,\n exclude='bads')\n\n # special case where epochs come picked but fit was 'unpicked'.\n if len(picks) != len(self.ch_names):\n raise RuntimeError('Epochs don\\'t match fitted data: %i channels '\n 'fitted but %i channels supplied. \\nPlease '\n 'provide Epochs compatible with '\n 'ica.ch_names' % (len(self.ch_names),\n len(picks)))\n\n if n_pca_components is not None:\n self.n_pca_components = n_pca_components\n\n data = np.hstack(epochs.get_data()[:, picks])\n data, _ = self._pre_whiten(data, epochs.info, picks)\n data = self._pick_sources(data, include=include, exclude=exclude)\n\n # restore epochs, channels, tsl order\n epochs._data[:, picks] = np.array(\n np.split(data, len(epochs.events), 1))\n epochs.preload = True\n\n return epochs\n\n def _apply_evoked(self, evoked, include, exclude, n_pca_components):\n \"\"\"Aux method.\"\"\"\n picks = pick_types(evoked.info, meg=False, ref_meg=False,\n include=self.ch_names,\n exclude='bads')\n\n # special case where evoked come picked but fit was 'unpicked'.\n if len(picks) != len(self.ch_names):\n raise RuntimeError('Evoked does not match fitted data: %i channels'\n ' fitted but %i channels supplied. \\nPlease '\n 'provide an Evoked object that\\'s compatible '\n 'with ica.ch_names' % (len(self.ch_names),\n len(picks)))\n\n if n_pca_components is not None:\n self.n_pca_components = n_pca_components\n\n data = evoked.data[picks]\n data, _ = self._pre_whiten(data, evoked.info, picks)\n data = self._pick_sources(data, include=include,\n exclude=exclude)\n\n # restore evoked\n evoked.data[picks] = data\n\n return evoked\n\n def _pick_sources(self, data, include, exclude):\n \"\"\"Aux function.\"\"\"\n exclude = self._check_exclude(exclude)\n _n_pca_comp = self._check_n_pca_components(self.n_pca_components)\n\n if not(self.n_components_ <= _n_pca_comp <= self.max_pca_components):\n raise ValueError('n_pca_components must be >= '\n 'n_components and <= max_pca_components.')\n\n n_components = self.n_components_\n logger.info('Transforming to ICA space (%i components)' % n_components)\n\n # Apply first PCA\n if self.pca_mean_ is not None:\n data -= self.pca_mean_[:, None]\n\n sel_keep = np.arange(n_components)\n if include not in (None, []):\n sel_keep = np.unique(include)\n elif exclude not in (None, []):\n sel_keep = np.setdiff1d(np.arange(n_components), exclude)\n\n logger.info('Zeroing out %i ICA components'\n % (n_components - len(sel_keep)))\n\n unmixing = np.eye(_n_pca_comp)\n unmixing[:n_components, :n_components] = self.unmixing_matrix_\n unmixing = np.dot(unmixing, self.pca_components_[:_n_pca_comp])\n\n mixing = np.eye(_n_pca_comp)\n mixing[:n_components, :n_components] = self.mixing_matrix_\n mixing = np.dot(self.pca_components_[:_n_pca_comp].T, mixing)\n\n if _n_pca_comp > n_components:\n sel_keep = np.concatenate(\n (sel_keep, range(n_components, _n_pca_comp)))\n\n proj_mat = np.dot(mixing[:, sel_keep], unmixing[sel_keep, :])\n\n data = np.dot(proj_mat, data)\n\n if self.pca_mean_ is not None:\n data += self.pca_mean_[:, None]\n\n # restore scaling\n if self.noise_cov is None: # revert standardization\n data *= self.pre_whitener_\n else:\n data = np.dot(linalg.pinv(self.pre_whitener_, cond=1e-14), data)\n\n return data\n\n @verbose\n def save(self, fname):\n \"\"\"Store ICA solution into a fiff file.\n\n Parameters\n ----------\n fname : str\n The absolute path of the file name to save the ICA solution into.\n The file name should end with -ica.fif or -ica.fif.gz.\n \"\"\"\n if self.current_fit == 'unfitted':\n raise RuntimeError('No fit available. Please first fit ICA')\n\n check_fname(fname, 'ICA', ('-ica.fif', '-ica.fif.gz',\n '_ica.fif', '_ica.fif.gz'))\n\n logger.info('Writing ICA solution to %s...' % fname)\n fid = start_file(fname)\n\n try:\n _write_ica(fid, self)\n end_file(fid)\n except Exception:\n end_file(fid)\n os.remove(fname)\n raise\n\n return self\n\n def copy(self):\n \"\"\"Copy the ICA object.\n\n Returns\n -------\n ica : instance of ICA\n The copied object.\n \"\"\"\n return deepcopy(self)\n\n @copy_function_doc_to_method_doc(plot_ica_components)\n def plot_components(self, picks=None, ch_type=None, res=64, layout=None,\n vmin=None, vmax=None, cmap='RdBu_r', sensors=True,\n colorbar=False, title=None, show=True, outlines='head',\n contours=6, image_interp='bilinear', head_pos=None,\n inst=None, plot_std=True, topomap_args=None,\n image_args=None, psd_args=None, reject='auto'):\n return plot_ica_components(self, picks=picks, ch_type=ch_type,\n res=res, layout=layout, vmin=vmin,\n vmax=vmax, cmap=cmap, sensors=sensors,\n colorbar=colorbar, title=title, show=show,\n outlines=outlines, contours=contours,\n image_interp=image_interp,\n head_pos=head_pos, inst=inst,\n plot_std=plot_std,\n topomap_args=topomap_args,\n image_args=image_args, psd_args=psd_args,\n reject=reject)\n\n @copy_function_doc_to_method_doc(plot_ica_properties)\n def plot_properties(self, inst, picks=None, axes=None, dB=True,\n plot_std=True, topomap_args=None, image_args=None,\n psd_args=None, figsize=None, show=True, reject='auto'):\n return plot_ica_properties(self, inst, picks=picks, axes=axes,\n dB=dB, plot_std=plot_std,\n topomap_args=topomap_args,\n image_args=image_args, psd_args=psd_args,\n figsize=figsize, show=show, reject=reject)\n\n @copy_function_doc_to_method_doc(plot_ica_sources)\n def plot_sources(self, inst, picks=None, start=None,\n stop=None, title=None, show=True, block=False,\n show_first_samp=False, show_scrollbars=True):\n return plot_ica_sources(self, inst=inst, picks=picks,\n start=start, stop=stop, title=title, show=show,\n block=block, show_first_samp=show_first_samp,\n show_scrollbars=show_scrollbars)\n\n @copy_function_doc_to_method_doc(plot_ica_scores)\n def plot_scores(self, scores, exclude=None, labels=None, axhline=None,\n title='ICA component scores', figsize=None,\n show=True):\n return plot_ica_scores(\n ica=self, scores=scores, exclude=exclude, labels=labels,\n axhline=axhline, title=title, figsize=figsize, show=show)\n\n @copy_function_doc_to_method_doc(plot_ica_overlay)\n def plot_overlay(self, inst, exclude=None, picks=None, start=None,\n stop=None, title=None, show=True):\n return plot_ica_overlay(self, inst=inst, exclude=exclude, picks=picks,\n start=start, stop=stop, title=title, show=show)\n\n def detect_artifacts(self, raw, start_find=None, stop_find=None,\n ecg_ch=None, ecg_score_func='pearsonr',\n ecg_criterion=0.1, eog_ch=None,\n eog_score_func='pearsonr',\n eog_criterion=0.1, skew_criterion=0,\n kurt_criterion=0, var_criterion=-1,\n add_nodes=None):\n \"\"\"Run ICA artifacts detection workflow.\n\n Note. This is still experimental and will most likely change over\n the next releases. For maximum control use the workflow exposed in\n the examples.\n\n Hints and caveats:\n - It is highly recommended to bandpass filter ECG and EOG\n data and pass them instead of the channel names as ecg_ch and eog_ch\n arguments.\n - please check your results. Detection by kurtosis and variance\n may be powerful but misclassification of brain signals as\n noise cannot be precluded.\n - Consider using shorter times for start_find and stop_find than\n for start and stop. It can save you much time.\n\n Example invocation (taking advantage of the defaults)::\n\n ica.detect_artifacts(ecg_channel='MEG 1531', eog_channel='EOG 061')\n\n Parameters\n ----------\n raw : instance of Raw\n Raw object to draw sources from. No components are actually removed\n here, i.e. ica is not applied to raw in this function. Use\n `ica.apply()` for this after inspection of the identified\n components.\n start_find : int | float | None\n First sample to include for artifact search. If float, data will be\n interpreted as time in seconds. If None, data will be used from the\n first sample.\n stop_find : int | float | None\n Last sample to not include for artifact search. If float, data will\n be interpreted as time in seconds. If None, data will be used to\n the last sample.\n ecg_ch : str | ndarray | None\n The `target` argument passed to ica.find_sources_raw. Either the\n name of the ECG channel or the ECG time series. If None, this step\n will be skipped.\n ecg_score_func : str | callable\n The `score_func` argument passed to ica.find_sources_raw. Either\n the name of function supported by ICA or a custom function.\n ecg_criterion : float | int | list-like | slice\n The indices of the sorted ecg scores. If float, sources with\n absolute scores greater than the criterion will be dropped. Else,\n the absolute scores sorted in descending order will be indexed\n accordingly. E.g. range(2) would return the two sources with the\n highest absolute score. If None, this step will be skipped.\n eog_ch : list | str | ndarray | None\n The `target` argument or the list of target arguments subsequently\n passed to ica.find_sources_raw. Either the name of the vertical EOG\n channel or the corresponding EOG time series. If None, this step\n will be skipped.\n eog_score_func : str | callable\n The `score_func` argument passed to ica.find_sources_raw. Either\n the name of function supported by ICA or a custom function.\n eog_criterion : float | int | list-like | slice\n The indices of the sorted eog scores. If float, sources with\n absolute scores greater than the criterion will be dropped. Else,\n the absolute scores sorted in descending order will be indexed\n accordingly. E.g. range(2) would return the two sources with the\n highest absolute score. If None, this step will be skipped.\n skew_criterion : float | int | list-like | slice\n The indices of the sorted skewness scores. If float, sources with\n absolute scores greater than the criterion will be dropped. Else,\n the absolute scores sorted in descending order will be indexed\n accordingly. E.g. range(2) would return the two sources with the\n highest absolute score. If None, this step will be skipped.\n kurt_criterion : float | int | list-like | slice\n The indices of the sorted kurtosis scores. If float, sources with\n absolute scores greater than the criterion will be dropped. Else,\n the absolute scores sorted in descending order will be indexed\n accordingly. E.g. range(2) would return the two sources with the\n highest absolute score. If None, this step will be skipped.\n var_criterion : float | int | list-like | slice\n The indices of the sorted variance scores. If float, sources with\n absolute scores greater than the criterion will be dropped. Else,\n the absolute scores sorted in descending order will be indexed\n accordingly. E.g. range(2) would return the two sources with the\n highest absolute score. If None, this step will be skipped.\n add_nodes : list of tuple\n Additional list if tuples carrying the following parameters\n of ica nodes:\n (name : str, target : str | array, score_func : callable,\n criterion : float | int | list-like | slice). This parameter is a\n generalization of the artifact specific parameters above and has\n the same structure. Example:\n add_nodes=('ECG phase lock', ECG 01', my_phase_lock_function, 0.5)\n\n Returns\n -------\n self : instance of ICA\n The ICA object with the detected artifact indices marked for\n exclusion\n \"\"\"\n logger.info(' Searching for artifacts...')\n _detect_artifacts(self, raw=raw, start_find=start_find,\n stop_find=stop_find, ecg_ch=ecg_ch,\n ecg_score_func=ecg_score_func,\n ecg_criterion=ecg_criterion,\n eog_ch=eog_ch, eog_score_func=eog_score_func,\n eog_criterion=eog_criterion,\n skew_criterion=skew_criterion,\n kurt_criterion=kurt_criterion,\n var_criterion=var_criterion,\n add_nodes=add_nodes)\n\n return self\n\n @verbose\n def _check_n_pca_components(self, _n_pca_comp, verbose=None):\n \"\"\"Aux function.\"\"\"\n if isinstance(_n_pca_comp, float):\n _n_pca_comp = ((self.pca_explained_variance_ /\n self.pca_explained_variance_.sum()).cumsum() <=\n _n_pca_comp).sum()\n logger.info('Selected %i PCA components by explained '\n 'variance' % _n_pca_comp)\n elif _n_pca_comp is None:\n _n_pca_comp = self.max_pca_components\n elif _n_pca_comp < self.n_components_:\n _n_pca_comp = self.n_components_\n\n return _n_pca_comp\n\n\ndef _check_start_stop(raw, start, stop):\n \"\"\"Aux function.\"\"\"\n out = list()\n for st in (start, stop):\n if st is None:\n out.append(st)\n else:\n try:\n out.append(_ensure_int(st))\n except TypeError: # not int-like\n out.append(raw.time_as_index(st)[0])\n return out\n\n\n@verbose\ndef ica_find_ecg_events(raw, ecg_source, event_id=999,\n tstart=0.0, l_freq=5, h_freq=35, qrs_threshold='auto',\n verbose=None):\n \"\"\"Find ECG peaks from one selected ICA source.\n\n Parameters\n ----------\n raw : instance of Raw\n Raw object to draw sources from.\n ecg_source : ndarray\n ICA source resembling ECG to find peaks from.\n event_id : int\n The index to assign to found events.\n tstart : float\n Start detection after tstart seconds. Useful when beginning\n of run is noisy.\n l_freq : float\n Low pass frequency.\n h_freq : float\n High pass frequency.\n qrs_threshold : float | str\n Between 0 and 1. qrs detection threshold. Can also be \"auto\" to\n automatically choose the threshold that generates a reasonable\n number of heartbeats (40-160 beats / min).\n %(verbose)s\n\n Returns\n -------\n ecg_events : array\n Events.\n ch_ECG : string\n Name of channel used.\n average_pulse : float.\n Estimated average pulse.\n \"\"\"\n logger.info('Using ICA source to identify heart beats')\n\n # detecting QRS and generating event file\n ecg_events = qrs_detector(raw.info['sfreq'], ecg_source.ravel(),\n tstart=tstart, thresh_value=qrs_threshold,\n l_freq=l_freq, h_freq=h_freq)\n\n n_events = len(ecg_events)\n\n ecg_events = np.c_[ecg_events + raw.first_samp, np.zeros(n_events),\n event_id * np.ones(n_events)]\n\n return ecg_events\n\n\n@verbose\ndef ica_find_eog_events(raw, eog_source=None, event_id=998, l_freq=1,\n h_freq=10, verbose=None):\n \"\"\"Locate EOG artifacts from one selected ICA source.\n\n Parameters\n ----------\n raw : instance of Raw\n The raw data.\n eog_source : ndarray\n ICA source resembling EOG to find peaks from.\n event_id : int\n The index to assign to found events.\n l_freq : float\n Low cut-off frequency in Hz.\n h_freq : float\n High cut-off frequency in Hz.\n %(verbose)s\n\n Returns\n -------\n eog_events : array\n Events.\n \"\"\"\n eog_events = _find_eog_events(eog_source[np.newaxis], event_id=event_id,\n l_freq=l_freq, h_freq=h_freq,\n sampling_rate=raw.info['sfreq'],\n first_samp=raw.first_samp)\n return eog_events\n\n\ndef _get_target_ch(container, target):\n \"\"\"Aux function.\"\"\"\n # auto target selection\n picks = pick_channels(container.ch_names, include=[target])\n ref_picks = pick_types(container.info, meg=False, eeg=False, ref_meg=True)\n if len(ref_picks) > 0:\n picks = list(set(picks) - set(ref_picks))\n\n if len(picks) == 0:\n raise ValueError('%s not in channel list (%s)' %\n (target, container.ch_names))\n return picks\n\n\ndef _find_sources(sources, target, score_func):\n \"\"\"Aux function.\"\"\"\n if isinstance(score_func, str):\n score_func = get_score_funcs().get(score_func, score_func)\n\n if not callable(score_func):\n raise ValueError('%s is not a valid score_func.' % score_func)\n\n scores = (score_func(sources, target) if target is not None\n else score_func(sources, 1))\n\n return scores\n\n\ndef _ica_explained_variance(ica, inst, normalize=False):\n \"\"\"Check variance accounted for by each component in supplied data.\n\n Parameters\n ----------\n ica : ICA\n Instance of `mne.preprocessing.ICA`.\n inst : Raw | Epochs | Evoked\n Data to explain with ICA. Instance of Raw, Epochs or Evoked.\n normalize : bool\n Whether to normalize the variance.\n\n Returns\n -------\n var : array\n Variance explained by each component.\n \"\"\"\n # check if ica is ICA and whether inst is Raw or Epochs\n if not isinstance(ica, ICA):\n raise TypeError('first argument must be an instance of ICA.')\n if not isinstance(inst, (BaseRaw, BaseEpochs, Evoked)):\n raise TypeError('second argument must an instance of either Raw, '\n 'Epochs or Evoked.')\n\n source_data = _get_inst_data(ica.get_sources(inst))\n\n # if epochs - reshape to channels x timesamples\n if isinstance(inst, BaseEpochs):\n n_epochs, n_chan, n_samp = source_data.shape\n source_data = source_data.transpose(1, 0, 2).reshape(\n (n_chan, n_epochs * n_samp))\n\n n_chan, n_samp = source_data.shape\n var = np.sum(ica.mixing_matrix_ ** 2, axis=0) * np.sum(\n source_data ** 2, axis=1) / (n_chan * n_samp - 1)\n if normalize:\n var /= var.sum()\n return var\n\n\ndef _sort_components(ica, order, copy=True):\n \"\"\"Change the order of components in ica solution.\"\"\"\n assert ica.n_components_ == len(order)\n if copy:\n ica = ica.copy()\n\n # reorder components\n ica.mixing_matrix_ = ica.mixing_matrix_[:, order]\n ica.unmixing_matrix_ = ica.unmixing_matrix_[order, :]\n\n # reorder labels, excludes etc.\n if isinstance(order, np.ndarray):\n order = list(order)\n if ica.exclude:\n ica.exclude = [order.index(ic) for ic in ica.exclude]\n for k in ica.labels_.keys():\n ica.labels_[k] = [order.index(ic) for ic in ica.labels_[k]]\n\n return ica\n\n\ndef _serialize(dict_, outer_sep=';', inner_sep=':'):\n \"\"\"Aux function.\"\"\"\n s = []\n for key, value in dict_.items():\n if callable(value):\n value = value.__name__\n elif isinstance(value, Integral):\n value = int(value)\n elif isinstance(value, dict):\n # py35 json does not support numpy int64\n for subkey, subvalue in value.items():\n if isinstance(subvalue, list):\n if len(subvalue) > 0:\n if isinstance(subvalue[0], (int, np.integer)):\n value[subkey] = [int(i) for i in subvalue]\n\n for cls in (np.random.RandomState, Covariance):\n if isinstance(value, cls):\n value = cls.__name__\n\n s.append(key + inner_sep + json.dumps(value))\n\n return outer_sep.join(s)\n\n\ndef _deserialize(str_, outer_sep=';', inner_sep=':'):\n \"\"\"Aux Function.\"\"\"\n out = {}\n for mapping in str_.split(outer_sep):\n k, v = mapping.split(inner_sep, 1)\n out[k] = json.loads(v)\n return out\n\n\ndef _write_ica(fid, ica):\n \"\"\"Write an ICA object.\n\n Parameters\n ----------\n fid: file\n The file descriptor\n ica:\n The instance of ICA to write\n \"\"\"\n ica_init = dict(noise_cov=ica.noise_cov,\n n_components=ica.n_components,\n n_pca_components=ica.n_pca_components,\n max_pca_components=ica.max_pca_components,\n current_fit=ica.current_fit,\n allow_ref_meg=ica.allow_ref_meg)\n\n if ica.info is not None:\n start_block(fid, FIFF.FIFFB_MEAS)\n write_id(fid, FIFF.FIFF_BLOCK_ID)\n if ica.info['meas_id'] is not None:\n write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, ica.info['meas_id'])\n\n # Write measurement info\n write_meas_info(fid, ica.info)\n end_block(fid, FIFF.FIFFB_MEAS)\n\n start_block(fid, FIFF.FIFFB_MNE_ICA)\n\n # ICA interface params\n write_string(fid, FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS,\n _serialize(ica_init))\n\n # Channel names\n if ica.ch_names is not None:\n write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, ica.ch_names)\n\n # samples on fit\n n_samples = getattr(ica, 'n_samples_', None)\n ica_misc = {'n_samples_': (None if n_samples is None else int(n_samples)),\n 'labels_': getattr(ica, 'labels_', None),\n 'method': getattr(ica, 'method', None)}\n\n write_string(fid, FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS,\n _serialize(ica_init))\n\n # ICA misct params\n write_string(fid, FIFF.FIFF_MNE_ICA_MISC_PARAMS,\n _serialize(ica_misc))\n\n # Whitener\n write_double_matrix(fid, FIFF.FIFF_MNE_ICA_WHITENER, ica.pre_whitener_)\n\n # PCA components_\n write_double_matrix(fid, FIFF.FIFF_MNE_ICA_PCA_COMPONENTS,\n ica.pca_components_)\n\n # PCA mean_\n write_double_matrix(fid, FIFF.FIFF_MNE_ICA_PCA_MEAN, ica.pca_mean_)\n\n # PCA explained_variance_\n write_double_matrix(fid, FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR,\n ica.pca_explained_variance_)\n\n # ICA unmixing\n write_double_matrix(fid, FIFF.FIFF_MNE_ICA_MATRIX, ica.unmixing_matrix_)\n\n # Write bad components\n\n write_int(fid, FIFF.FIFF_MNE_ICA_BADS, list(ica.exclude))\n\n # Done!\n end_block(fid, FIFF.FIFFB_MNE_ICA)\n\n\n@verbose\ndef read_ica(fname, verbose=None):\n \"\"\"Restore ICA solution from fif file.\n\n Parameters\n ----------\n fname : str\n Absolute path to fif file containing ICA matrices.\n The file name should end with -ica.fif or -ica.fif.gz.\n %(verbose)s\n\n Returns\n -------\n ica : instance of ICA\n The ICA estimator.\n \"\"\"\n check_fname(fname, 'ICA', ('-ica.fif', '-ica.fif.gz',\n '_ica.fif', '_ica.fif.gz'))\n\n logger.info('Reading %s ...' % fname)\n fid, tree, _ = fiff_open(fname)\n\n try:\n # we used to store bads that weren't part of the info...\n info, _ = read_meas_info(fid, tree, clean_bads=True)\n except ValueError:\n logger.info('Could not find the measurement info. \\n'\n 'Functionality requiring the info won\\'t be'\n ' available.')\n info = None\n\n ica_data = dir_tree_find(tree, FIFF.FIFFB_MNE_ICA)\n if len(ica_data) == 0:\n ica_data = dir_tree_find(tree, 123) # Constant 123 Used before v 0.11\n if len(ica_data) == 0:\n fid.close()\n raise ValueError('Could not find ICA data')\n\n my_ica_data = ica_data[0]\n for d in my_ica_data['directory']:\n kind = d.kind\n pos = d.pos\n if kind == FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS:\n tag = read_tag(fid, pos)\n ica_init = tag.data\n elif kind == FIFF.FIFF_MNE_ROW_NAMES:\n tag = read_tag(fid, pos)\n ch_names = tag.data\n elif kind == FIFF.FIFF_MNE_ICA_WHITENER:\n tag = read_tag(fid, pos)\n pre_whitener = tag.data\n elif kind == FIFF.FIFF_MNE_ICA_PCA_COMPONENTS:\n tag = read_tag(fid, pos)\n pca_components = tag.data\n elif kind == FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR:\n tag = read_tag(fid, pos)\n pca_explained_variance = tag.data\n elif kind == FIFF.FIFF_MNE_ICA_PCA_MEAN:\n tag = read_tag(fid, pos)\n pca_mean = tag.data\n elif kind == FIFF.FIFF_MNE_ICA_MATRIX:\n tag = read_tag(fid, pos)\n unmixing_matrix = tag.data\n elif kind == FIFF.FIFF_MNE_ICA_BADS:\n tag = read_tag(fid, pos)\n exclude = tag.data\n elif kind == FIFF.FIFF_MNE_ICA_MISC_PARAMS:\n tag = read_tag(fid, pos)\n ica_misc = tag.data\n\n fid.close()\n\n ica_init, ica_misc = [_deserialize(k) for k in (ica_init, ica_misc)]\n current_fit = ica_init.pop('current_fit')\n if ica_init['noise_cov'] == Covariance.__name__:\n logger.info('Reading whitener drawn from noise covariance ...')\n\n logger.info('Now restoring ICA solution ...')\n\n # make sure dtypes are np.float64 to satisfy fast_dot\n def f(x):\n return x.astype(np.float64)\n\n ica_init = {k: v for k, v in ica_init.items()\n if k in _get_args(ICA.__init__)}\n ica = ICA(**ica_init)\n ica.current_fit = current_fit\n ica.ch_names = ch_names.split(':')\n ica.pre_whitener_ = f(pre_whitener)\n ica.pca_mean_ = f(pca_mean)\n ica.pca_components_ = f(pca_components)\n ica.n_components_ = unmixing_matrix.shape[0]\n ica._update_ica_names()\n ica.pca_explained_variance_ = f(pca_explained_variance)\n ica.unmixing_matrix_ = f(unmixing_matrix)\n ica.mixing_matrix_ = linalg.pinv(ica.unmixing_matrix_)\n ica.exclude = [] if exclude is None else list(exclude)\n ica.info = info\n if 'n_samples_' in ica_misc:\n ica.n_samples_ = ica_misc['n_samples_']\n if 'labels_' in ica_misc:\n labels_ = ica_misc['labels_']\n if labels_ is not None:\n ica.labels_ = labels_\n if 'method' in ica_misc:\n ica.method = ica_misc['method']\n\n logger.info('Ready.')\n\n return ica\n\n\n_ica_node = namedtuple('Node', 'name target score_func criterion')\n\n\ndef _detect_artifacts(ica, raw, start_find, stop_find, ecg_ch, ecg_score_func,\n ecg_criterion, eog_ch, eog_score_func, eog_criterion,\n skew_criterion, kurt_criterion, var_criterion,\n add_nodes):\n \"\"\"Aux Function.\"\"\"\n from scipy import stats\n\n nodes = []\n if ecg_ch is not None:\n nodes += [_ica_node('ECG', ecg_ch, ecg_score_func, ecg_criterion)]\n\n if eog_ch not in [None, []]:\n if not isinstance(eog_ch, list):\n eog_ch = [eog_ch]\n for idx, ch in enumerate(eog_ch):\n nodes += [_ica_node('EOG %02d' % idx, ch, eog_score_func,\n eog_criterion)]\n\n if skew_criterion is not None:\n nodes += [_ica_node('skewness', None, stats.skew, skew_criterion)]\n\n if kurt_criterion is not None:\n nodes += [_ica_node('kurtosis', None, stats.kurtosis, kurt_criterion)]\n\n if var_criterion is not None:\n nodes += [_ica_node('variance', None, np.var, var_criterion)]\n\n if add_nodes is not None:\n nodes.extend(add_nodes)\n\n for node in nodes:\n scores = ica.score_sources(raw, start=start_find, stop=stop_find,\n target=node.target,\n score_func=node.score_func)\n if isinstance(node.criterion, float):\n found = list(np.where(np.abs(scores) > node.criterion)[0])\n else:\n # Sort in descending order; use (-abs()), rather than [::-1] to\n # keep any NaN values in the end (and also keep the order of same\n # values):\n found = list(np.atleast_1d((-np.abs(scores)).argsort()\n [node.criterion]))\n\n case = (len(found), _pl(found), node.name)\n logger.info(' found %s artifact%s by %s' % case)\n ica.exclude = list(ica.exclude) + found\n\n logger.info('Artifact indices found:\\n ' + str(ica.exclude).strip('[]'))\n if len(set(ica.exclude)) != len(ica.exclude):\n logger.info(' Removing duplicate indices...')\n ica.exclude = list(set(ica.exclude))\n\n logger.info('Ready.')\n\n\n@verbose\ndef run_ica(raw, n_components, max_pca_components=100,\n n_pca_components=64, noise_cov=None,\n random_state=None, picks=None, start=None, stop=None,\n start_find=None, stop_find=None, ecg_ch=None,\n ecg_score_func='pearsonr', ecg_criterion=0.1, eog_ch=None,\n eog_score_func='pearsonr', eog_criterion=0.1, skew_criterion=0,\n kurt_criterion=0, var_criterion=-1, add_nodes=None,\n method='fastica', allow_ref_meg=False, verbose=None):\n \"\"\"Run ICA decomposition on raw data and identify artifact sources.\n\n This function implements an automated artifact removal work flow.\n\n Hints and caveats:\n\n - It is highly recommended to bandpass filter ECG and EOG\n data and pass them instead of the channel names as ecg_ch and eog_ch\n arguments.\n - Please check your results. Detection by kurtosis and variance\n can be powerful but misclassification of brain signals as\n noise cannot be precluded. If you are not sure set those to None.\n - Consider using shorter times for start_find and stop_find than\n for start and stop. It can save you much time.\n\n Example invocation (taking advantage of defaults)::\n\n ica = run_ica(raw, n_components=.9, start_find=10000, stop_find=12000,\n ecg_ch='MEG 1531', eog_ch='EOG 061')\n\n Parameters\n ----------\n raw : instance of Raw\n The raw data to decompose.\n n_components : int | float | None\n The number of components used for ICA decomposition. If int, it must be\n smaller then max_pca_components. If None, all PCA components will be\n used. If float between 0 and 1 components can will be selected by the\n cumulative percentage of explained variance.\n max_pca_components : int | None\n The number of components used for PCA decomposition. If None, no\n dimension reduction will be applied and max_pca_components will equal\n the number of channels supplied on decomposing data.\n n_pca_components\n The number of PCA components used after ICA recomposition. The ensuing\n attribute allows to balance noise reduction against potential loss of\n features due to dimensionality reduction. If greater than\n ``self.n_components_``, the next ``'n_pca_components'`` minus\n ``'n_components_'`` PCA components will be added before restoring the\n sensor space data. The attribute gets updated each time the according\n parameter for in .pick_sources_raw or .pick_sources_epochs is changed.\n noise_cov : None | instance of Covariance\n Noise covariance used for whitening. If None, channels are just\n z-scored.\n %(random_state)s\n Random state to initialize the FastICA estimation. As the estimation is\n non-deterministic it can be useful to fix the random state to have\n reproducible results.\n %(picks_good_data_noref)s\n This selection remains throughout the initialized ICA solution.\n start : int | float | None\n First sample to include for decomposition. If float, data will be\n interpreted as time in seconds. If None, data will be used from the\n first sample.\n stop : int | float | None\n Last sample to not include for decomposition. If float, data will be\n interpreted as time in seconds. If None, data will be used to the\n last sample.\n start_find : int | float | None\n First sample to include for artifact search. If float, data will be\n interpreted as time in seconds. If None, data will be used from the\n first sample.\n stop_find : int | float | None\n Last sample to not include for artifact search. If float, data will be\n interpreted as time in seconds. If None, data will be used to the last\n sample.\n ecg_ch : str | ndarray | None\n The ``target`` argument passed to ica.find_sources_raw. Either the\n name of the ECG channel or the ECG time series. If None, this step\n will be skipped.\n ecg_score_func : str | callable\n The ``score_func`` argument passed to ica.find_sources_raw. Either\n the name of function supported by ICA or a custom function.\n ecg_criterion : float | int | list-like | slice\n The indices of the sorted ecg scores. If float, sources with\n absolute scores greater than the criterion will be dropped. Else, the\n absolute scores sorted in descending order will be indexed accordingly.\n E.g. range(2) would return the two sources with the highest absolute\n score. If None, this step will be skipped.\n eog_ch : list | str | ndarray | None\n The ``target`` argument or the list of target arguments subsequently\n passed to ica.find_sources_raw. Either the name of the vertical EOG\n channel or the corresponding EOG time series. If None, this step\n will be skipped.\n eog_score_func : str | callable\n The ``score_func`` argument passed to ica.find_sources_raw. Either\n the name of function supported by ICA or a custom function.\n eog_criterion : float | int | list-like | slice\n The indices of the sorted eog scores. If float, sources with\n absolute scores greater than the criterion will be dropped. Else, the\n absolute scores sorted in descending order will be indexed accordingly.\n E.g. range(2) would return the two sources with the highest absolute\n score. If None, this step will be skipped.\n skew_criterion : float | int | list-like | slice\n The indices of the sorted skewness scores. If float, sources with\n absolute scores greater than the criterion will be dropped. Else, the\n absolute scores sorted in descending order will be indexed accordingly.\n E.g. range(2) would return the two sources with the highest absolute\n score. If None, this step will be skipped.\n kurt_criterion : float | int | list-like | slice\n The indices of the sorted kurtosis scores. If float, sources with\n absolute scores greater than the criterion will be dropped. Else, the\n absolute scores sorted in descending order will be indexed accordingly.\n E.g. range(2) would return the two sources with the highest absolute\n score. If None, this step will be skipped.\n var_criterion : float | int | list-like | slice\n The indices of the sorted variance scores. If float, sources with\n absolute scores greater than the criterion will be dropped. Else, the\n absolute scores sorted in descending order will be indexed accordingly.\n E.g. range(2) would return the two sources with the highest absolute\n score. If None, this step will be skipped.\n add_nodes : list of tuple\n Additional list if tuples carrying the following parameters:\n (name : str, target : str | array, score_func : callable,\n criterion : float | int | list-like | slice). This parameter is a\n generalization of the artifact specific parameters above and has\n the same structure. Example::\n\n add_nodes=('ECG phase lock', ECG 01', my_phase_lock_function, 0.5)\n\n method : {'fastica', 'infomax', 'extended-infomax', 'picard'}\n The ICA method to use in the fit() method. Defaults to 'fastica'.\n allow_ref_meg : bool\n Allow ICA on MEG reference channels. Defaults to False.\n\n .. versionadded:: 0.18\n %(verbose)s\n\n Returns\n -------\n ica : instance of ICA\n The ICA object with detected artifact sources marked for exclusion.\n \"\"\"\n ica = ICA(n_components=n_components, max_pca_components=max_pca_components,\n n_pca_components=n_pca_components, method=method,\n noise_cov=noise_cov, random_state=random_state, verbose=verbose,\n allow_ref_meg=allow_ref_meg)\n\n ica.fit(raw, start=start, stop=stop, picks=picks)\n logger.info('%s' % ica)\n logger.info(' Now searching for artifacts...')\n\n _detect_artifacts(ica=ica, raw=raw, start_find=start_find,\n stop_find=stop_find, ecg_ch=ecg_ch,\n ecg_score_func=ecg_score_func,\n ecg_criterion=ecg_criterion, eog_ch=eog_ch,\n eog_score_func=eog_score_func,\n eog_criterion=eog_criterion,\n skew_criterion=skew_criterion,\n kurt_criterion=kurt_criterion,\n var_criterion=var_criterion,\n add_nodes=add_nodes)\n return ica\n\n\n@verbose\ndef _band_pass_filter(inst, sources, target, l_freq, h_freq, verbose=None):\n \"\"\"Optionally band-pass filter the data.\"\"\"\n if l_freq is not None and h_freq is not None:\n logger.info('... filtering ICA sources')\n # use FIR here, steeper is better\n kw = dict(phase='zero-double', filter_length='10s', fir_window='hann',\n l_trans_bandwidth=0.5, h_trans_bandwidth=0.5,\n fir_design='firwin2')\n sources = filter_data(sources, inst.info['sfreq'], l_freq, h_freq,\n **kw)\n logger.info('... filtering target')\n target = filter_data(target, inst.info['sfreq'], l_freq, h_freq, **kw)\n elif l_freq is not None or h_freq is not None:\n raise ValueError('Must specify both pass bands')\n return sources, target\n\n\n# #############################################################################\n# CORRMAP\n\ndef _find_max_corrs(all_maps, target, threshold):\n \"\"\"Compute correlations between template and target components.\"\"\"\n all_corrs = [compute_corr(target, subj.T) for subj in all_maps]\n abs_corrs = [np.abs(a) for a in all_corrs]\n corr_polarities = [np.sign(a) for a in all_corrs]\n\n if threshold <= 1:\n max_corrs = [list(np.nonzero(s_corr > threshold)[0])\n for s_corr in abs_corrs]\n else:\n max_corrs = [list(find_outliers(s_corr, threshold=threshold))\n for s_corr in abs_corrs]\n\n am = [l[i] for l, i_s in zip(abs_corrs, max_corrs)\n for i in i_s]\n median_corr_with_target = np.median(am) if len(am) > 0 else 0\n\n polarities = [l[i] for l, i_s in zip(corr_polarities, max_corrs)\n for i in i_s]\n\n maxmaps = [l[i] for l, i_s in zip(all_maps, max_corrs)\n for i in i_s]\n\n if len(maxmaps) == 0:\n return [], 0, 0, []\n newtarget = np.zeros(maxmaps[0].size)\n std_of_maps = np.std(np.asarray(maxmaps))\n mean_of_maps = np.std(np.asarray(maxmaps))\n for maxmap, polarity in zip(maxmaps, polarities):\n newtarget += (maxmap / std_of_maps - mean_of_maps) * polarity\n\n newtarget /= len(maxmaps)\n newtarget *= std_of_maps\n\n sim_i_o = np.abs(np.corrcoef(target, newtarget)[1, 0])\n\n return newtarget, median_corr_with_target, sim_i_o, max_corrs\n\n\n@verbose\ndef corrmap(icas, template, threshold=\"auto\", label=None, ch_type=\"eeg\",\n plot=True, show=True, verbose=None, outlines='head', layout=None,\n sensors=True, contours=6, cmap=None):\n \"\"\"Find similar Independent Components across subjects by map similarity.\n\n Corrmap (Viola et al. 2009 Clin Neurophysiol) identifies the best group\n match to a supplied template. Typically, feed it a list of fitted ICAs and\n a template IC, for example, the blink for the first subject, to identify\n specific ICs across subjects.\n\n The specific procedure consists of two iterations. In a first step, the\n maps best correlating with the template are identified. In the next step,\n the analysis is repeated with the mean of the maps identified in the first\n stage.\n\n Run with `plot` and `show` set to `True` and `label=False` to find\n good parameters. Then, run with labelling enabled to apply the\n labelling in the IC objects. (Running with both `plot` and `labels`\n off does nothing.)\n\n Outputs a list of fitted ICAs with the indices of the marked ICs in a\n specified field.\n\n The original Corrmap website: www.debener.de/corrmap/corrmapplugin1.html\n\n Parameters\n ----------\n icas : list of mne.preprocessing.ICA\n A list of fitted ICA objects.\n template : tuple | np.ndarray, shape (n_components,)\n Either a tuple with two elements (int, int) representing the list\n indices of the set from which the template should be chosen, and the\n template. E.g., if template=(1, 0), the first IC of the 2nd ICA object\n is used.\n Or a numpy array whose size corresponds to each IC map from the\n supplied maps, in which case this map is chosen as the template.\n threshold : \"auto\" | list of float | float\n Correlation threshold for identifying ICs\n If \"auto\", search for the best map by trying all correlations between\n 0.6 and 0.95. In the original proposal, lower values are considered,\n but this is not yet implemented.\n If list of floats, search for the best map in the specified range of\n correlation strengths. As correlation values, must be between 0 and 1\n If float > 0, select ICs correlating better than this.\n If float > 1, use find_outliers to identify ICs within subjects (not in\n original Corrmap)\n Defaults to \"auto\".\n label : None | str\n If not None, categorised ICs are stored in a dictionary ``labels_``\n under the given name. Preexisting entries will be appended to\n (excluding repeats), not overwritten. If None, a dry run is performed\n and the supplied ICs are not changed.\n ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'\n The channel type to plot. Defaults to 'eeg'.\n plot : bool\n Should constructed template and selected maps be plotted? Defaults\n to True.\n show : bool\n Show figures if True.\n %(verbose)s\n outlines : 'head' | dict | None\n The outlines to be drawn. If 'head', a head scheme will be drawn. If\n dict, each key refers to a tuple of x and y positions. The values in\n 'mask_pos' will serve as image mask. If None, nothing will be drawn.\n Defaults to 'head'. If dict, the 'autoshrink' (bool) field will\n trigger automated shrinking of the positions due to points outside the\n outline. Moreover, a matplotlib patch object can be passed for\n advanced masking options, either directly or as a function that returns\n patches (required for multi-axis plots).\n layout : None | Layout | list of Layout\n Layout instance specifying sensor positions (does not need to be\n specified for Neuromag data). Or a list of Layout if projections\n are from different sensor types.\n sensors : bool | str\n Add markers for sensor locations to the plot. Accepts matplotlib plot\n format string (e.g., 'r+' for red plusses). If True, a circle will be\n used (via .add_artist). Defaults to True.\n contours : int | array of float\n The number of contour lines to draw. If 0, no contours will be drawn.\n When an integer, matplotlib ticker locator is used to find suitable\n values for the contour thresholds (may sometimes be inaccurate, use\n array for accuracy). If an array, the values represent the levels for\n the contours. Defaults to 6.\n cmap : None | matplotlib colormap\n Colormap for the plot. If ``None``, defaults to 'Reds_r' for norm data,\n otherwise to 'RdBu_r'.\n\n Returns\n -------\n template_fig : Figure\n Figure showing the template.\n labelled_ics : Figure\n Figure showing the labelled ICs in all ICA decompositions.\n \"\"\"\n if not isinstance(plot, bool):\n raise ValueError(\"`plot` must be of type `bool`\")\n\n same_chans = _check_all_same_channel_names(icas)\n if same_chans is False:\n raise ValueError(\"Not all ICA instances have the same channel names. \"\n \"Corrmap requires all instances to have the same \"\n \"montage. Consider interpolating bad channels before \"\n \"running ICA.\")\n\n threshold_extra = ''\n if threshold == 'auto':\n threshold = np.arange(60, 95, dtype=np.float64) / 100.\n threshold_extra = ' (\"auto\")'\n\n all_maps = [ica.get_components().T for ica in icas]\n\n # check if template is an index to one IC in one ICA object, or an array\n if len(template) == 2:\n target = all_maps[template[0]][template[1]]\n is_subject = True\n elif template.ndim == 1 and len(template) == all_maps[0].shape[1]:\n target = template\n is_subject = False\n else:\n raise ValueError(\"`template` must be a length-2 tuple or an array the \"\n \"size of the ICA maps.\")\n\n template_fig, labelled_ics = None, None\n if plot is True:\n if is_subject: # plotting from an ICA object\n ttl = 'Template from subj. {}'.format(str(template[0]))\n template_fig = icas[template[0]].plot_components(\n picks=template[1], ch_type=ch_type, title=ttl,\n outlines=outlines, cmap=cmap, contours=contours, layout=layout,\n show=show)\n else: # plotting an array\n template_fig = _plot_corrmap([template], [0], [0], ch_type,\n icas[0].copy(), \"Template\",\n outlines=outlines, cmap=cmap,\n contours=contours, layout=layout,\n show=show, template=True)\n template_fig.subplots_adjust(top=0.8)\n template_fig.canvas.draw()\n\n # first run: use user-selected map\n threshold = np.atleast_1d(np.array(threshold, float)).ravel()\n threshold_err = ('No component detected using find_outliers when '\n 'using threshold%s %s, consider using a more lenient '\n 'threshold' % (threshold_extra, threshold))\n if len(all_maps) == 0:\n raise RuntimeError(threshold_err)\n paths = [_find_max_corrs(all_maps, target, t) for t in threshold]\n # find iteration with highest avg correlation with target\n new_target, _, _, _ = paths[np.argmax([path[2] for path in paths])]\n\n # second run: use output from first run\n if len(all_maps) == 0 or len(new_target) == 0:\n raise RuntimeError(threshold_err)\n paths = [_find_max_corrs(all_maps, new_target, t) for t in threshold]\n del new_target\n # find iteration with highest avg correlation with target\n _, median_corr, _, max_corrs = paths[\n np.argmax([path[1] for path in paths])]\n\n allmaps, indices, subjs, nones = [list() for _ in range(4)]\n logger.info('Median correlation with constructed map: %0.3f' % median_corr)\n del median_corr\n if plot is True:\n logger.info('Displaying selected ICs per subject.')\n\n for ii, (ica, max_corr) in enumerate(zip(icas, max_corrs)):\n if len(max_corr) > 0:\n if isinstance(max_corr[0], np.ndarray):\n max_corr = max_corr[0]\n if label is not None:\n ica.labels_[label] = list(set(list(max_corr) +\n ica.labels_.get(label, list())))\n if plot is True:\n allmaps.extend(ica.get_components()[:, max_corr].T)\n subjs.extend([ii] * len(max_corr))\n indices.extend(max_corr)\n else:\n if (label is not None) and (label not in ica.labels_):\n ica.labels_[label] = list()\n nones.append(ii)\n\n if len(nones) == 0:\n logger.info('At least 1 IC detected for each subject.')\n else:\n logger.info('No maps selected for subject%s %s, '\n 'consider a more liberal threshold.'\n % (_pl(nones), nones))\n\n if plot is True:\n labelled_ics = _plot_corrmap(allmaps, subjs, indices, ch_type, ica,\n label, outlines=outlines, cmap=cmap,\n contours=contours, layout=layout,\n show=show)\n return template_fig, labelled_ics\n else:\n return None\n"
] | [
[
"numpy.dot",
"scipy.linalg.pinv",
"numpy.sqrt",
"numpy.asarray",
"sklearn.decomposition.FastICA",
"numpy.concatenate",
"numpy.where",
"numpy.hstack",
"numpy.unique",
"numpy.arange",
"numpy.eye",
"numpy.std",
"numpy.argmax",
"numpy.zeros",
"numpy.nonzero",
"numpy.median",
"numpy.corrcoef",
"numpy.array",
"numpy.sum",
"numpy.abs",
"numpy.ones",
"numpy.sign",
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
}
] |
PSSF23/graspologic | [
"d5ae48d0481b6a60fa580158c2e9bae9cc506a9d",
"d5ae48d0481b6a60fa580158c2e9bae9cc506a9d",
"d5ae48d0481b6a60fa580158c2e9bae9cc506a9d",
"d5ae48d0481b6a60fa580158c2e9bae9cc506a9d"
] | [
"graspologic/align/seedless_procrustes.py",
"graspologic/cluster/gclust.py",
"graspologic/simulations/simulations_corr.py",
"tests/cluster/test_autogmm.py"
] | [
"# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nimport ot\nimport numpy as np\nfrom sklearn.utils import check_array\n\nfrom .base import BaseAlign\nfrom .sign_flips import SignFlips\nfrom .orthogonal_procrustes import OrthogonalProcrustes\n\n\nclass SeedlessProcrustes(BaseAlign):\n \"\"\"\n Matches two datasets using an orthogonal matrix. Unlike\n :class:`~graspologic.align.OrthogonalProcrustes`, this does not require a\n matching between entries. It can even be used in the settings where the two\n datasets do not have the same number of entries.\n\n In graph setting, it is used to align the embeddings of two different\n graphs, when it requires some simultaneous inference task and no 1-1\n matching between the vertices of the two graphs can be established, for\n example, inside of the test for the equivalence of the latent distributions\n (see: :class:`~graspologic.inference.LatentDistributionTest`).\n\n Parameters\n ----------\n optimal_transport_lambda : float (default=0.1), optional\n Regularization term of the Sinkhorn optimal transport algorithm.\n\n optimal_transport_eps : float (default=0.01), optional\n Tolerance parameter for the each Sinkhorn optimal transport algorithm.\n I.e. tolerance for each \"E-step\".\n\n optimal_transport_num_reps : int (default=1000), optional\n Number of repetitions in each iteration of the iterative optimal\n transport problem. I.e. maximum number of repetitions in each \"E-step\".\n\n iterative_num_reps : int (default=100), optional\n Number of reps in each iteration of the iterative optimal transport\n problem. I.e. maxumum number of total iterations the whole \"EM\"\n algorithm.\n\n init : string, {'2d' (default), 'sign_flips', 'custom'}, optional\n\n - '2d'\n Uses :math:`2^d` different restarts, where :math:`d` is the\n dimension of the datasets. In particular, tries all matrices that\n are simultaneously diagonal and orthogonal. In other words, these\n are diagonal matrices with all entries on the diagonal being either\n +1 or -1. This is motivated by the fact that spectral graph\n embeddings have two types of orthogonal non-identifiability, one of\n which is captured by the orthogonal diagonal matrices. The final\n result is picked based on the final values of the objective\n function. For more on this, see [2]_.\n - 'sign_flips'\n Initial alignment done by making the median value in each dimension\n have the same sign. The motivation is similar to that in '2d',\n except this is a heuristic that can save time, but can sometimes\n yield suboptimal results.\n - 'custom'\n Expects either an initial guess for :attr:`Q_` or an initial guess\n for :attr:`P_`, but not both. See ``initial_Q`` and ``initial_P``,\n respectively. If neither is provided, initializes ``initial_Q`` to an\n identity with an appropriate number of dimensions.\n\n initial_Q : np.ndarray, shape (d, d) or None, optional (default=None)\n An initial guess for the alignment matrix, :attr:`Q_`, if such exists.\n Only one of ``initial_Q``, ``initial_P`` can be provided at the same time,\n and only if ``init`` argument is set to 'custom'. If None, and\n ``initial_P`` is also None - initializes ``initial_Q`` to identity matrix.\n Must be an orthogonal matrix, if provided.\n\n initial_P : np.ndarray, shape (n, m) or None, optional (default=None)\n Initial guess for the optimal transport matrix, :attr:`P_`, if such\n exists. Only one of ``initial_Q``, ``initial_P`` can be provided at the\n same time, and only if ``init`` argument is set to 'custom'. If None, and\n ``initial_Q`` is also None - initializes ``initial_Q`` to identity matrix.\n Must be a soft assignment matrix if provided (rows sum up to 1/n, cols\n sum up to 1/m.)\n\n Attributes\n ----------\n Q_ : array, size (d, d)\n Final orthogonal matrix, used to modify ``X``.\n\n P_ : array, size (n, m) where n and m are the sizes of two datasets\n Final matrix of optimal transports, represent soft matching weights\n from points in one dataset to the other, normalized such that all rows\n sum to 1/n and all columns sum to 1/m.\n\n score_ : float\n Final value of the objective function: :math:`|| X Q - P Y ||_F`\n Lower means the datasets have been matched together better.\n\n selected_initial_Q_ : array, size (d, d)\n Initial orthogonal matrix which was used as the initialization.\n If ``init`` was set to '2d' or 'sign_flips', then it is the adaptively\n selected matrix.\n If ``init`` was set to 'custom', and ``initial_Q`` was provided, then equal\n to that. If it was not provided, but ``initial_P`` was, then it is the\n matrix after the first procrustes performed. If neither was provided,\n then it is the identity matrix.\n\n References\n ----------\n .. [1] Agterberg, J.\n # TODO Cite the Seedless Procrustes preprint whenever available.\n\n .. [2] Agterberg, J., Tang, M., Priebe., C. E. (2020).\n \"On Two Distinct Sources of Nonidentifiability in Latent Position Random Graph Models\"\n arXiv:2003.14250\n\n Notes\n -----\n In essence, the goal of this procedure is to simultaneously obtain a, not\n necessarily 1-to-1, correspondence between the vertices of the two data\n sets, and an orthogonal alignment between two datasets. If the two datasets\n are represented with matrices :math:`X \\in M_{n, d}` and\n :math:`Y \\in M_{m, d}`, then the correspondence is a matrix\n :math:`P \\in M_{n, m}` that is soft assignment matrix (that is, its rows\n sum to :math:`1/n`, and columns sum to :math:`1/m`) and the orthogonal\n alignment is an orthogonal matrix :math:`Q \\in M_{d, d}` (an orthogonal\n matrix is any matrix that satisfies :math:`Q^T Q = Q Q^T = I`). The global\n objective function is :math:`|| X Q - P Y ||_F`.\n\n Note that both :math:`X` and :math:`PY` are matrices in :math:`M_{n, d}`.\n Thus, if one knew :math:`P`, it would be simple to obtain an estimate for\n :math:`Q`, using the regular orthogonal procrustes. On the other hand, if\n :math:`Q` was known, then :math:`XQ` and :math:`Y` could be thought of\n distributions over a finite number of masses, each with weight :math:`1/n`\n or :math:`1/m`, respectively. These distributions could be \"matched\" via\n solving an optimal transport problem.\n\n However, both :math:`Q` and :math:`P` are simultaneously unknown here. So\n the algorithm performs a sequence of alternating steps, obtaining\n iteratively improving estimates of :math:`Q` and :math:`P`, similarly to an\n expectation-maximization (EM) procedure. It is not known whether this\n procedure is formally an EM, but the analogy can be drawn as follows: after\n obtaining an initial guess of of :math:`\\hat{Q}_{0}`, obtaining an\n assignment matrix :math:`\\hat{P}_{i+1} | \\hat{Q}_{i}` (\"E-step\") is done by\n solving an optimal transport problem via Sinkhorn algorithm, whereas\n obtaining an orthogonal alignment matrix :math:`Q_{i+1} | P_{i}` (\"M-step\")\n is done via regular orthogonal procurstes. These alternating steps are\n performed until ``iterative_num_reps`` is reached.\n\n For more on how the initial guess can be performed, see ``init``.\n\n \"\"\"\n\n def __init__(\n self,\n optimal_transport_lambda=0.1,\n optimal_transport_eps=0.01,\n optimal_transport_num_reps=1000,\n iterative_num_reps=100,\n init=\"2d\",\n initial_Q=None,\n initial_P=None,\n ):\n # check optimal_transport_lambda argument\n if type(optimal_transport_lambda) is not float:\n msg = \"Optimal_transport_lambda must be a float, not {}\".format(\n type(optimal_transport_lambda)\n )\n raise TypeError(msg)\n if optimal_transport_lambda < 0:\n msg = \"{} is an invalud value of the optimal_transport_lambda, must be non-negative\".format(\n optimal_transport_lambda\n )\n raise ValueError(msg)\n # check optimal_transport_lambda argument\n if type(optimal_transport_eps) is not float:\n msg = \"Optimal_transport_eps must be a float, not {}\".format(\n type(optimal_transport_eps)\n )\n raise TypeError(msg)\n if optimal_transport_eps <= 0:\n msg = \"{} is an invalid value of the optimal transport eps, must be postitive\".format(\n optimal_transport_eps\n )\n raise ValueError(msg)\n # check optimal_transport_num_reps argument\n if type(optimal_transport_num_reps) is not int:\n msg = \"Optimal_transport_num_reps must be a int, not {}\".format(\n type(optimal_transport_num_reps)\n )\n raise TypeError(msg)\n if optimal_transport_num_reps < 1:\n msg = \"{} is an invalid number of repetitions, must be non-negative\".format(\n iterative_num_reps\n )\n raise ValueError(msg)\n # check iterative_num_reps argument\n if type(iterative_num_reps) is not int:\n msg = \"Iterative_num_reps must be a int, not {}\".format(\n type(iterative_num_reps)\n )\n raise TypeError(msg)\n if iterative_num_reps < 0:\n msg = \"{} is an invalid number of repetitions, must be non-negative\".format(\n iterative_num_reps\n )\n raise ValueError(msg)\n # check init argument\n if type(init) is not str:\n msg = \"Init must be a str, not {}\".format(type(init))\n raise TypeError(msg)\n inits_supported = [\"2d\", \"sign_flips\", \"custom\"]\n if init not in inits_supported:\n msg = \"Supported inits are {}\".format(inits_supported)\n raise ValueError(msg)\n # check that initial_Q and intial_P aren't provided when shouldn't be\n if initial_Q is not None and init != \"custom\":\n msg = \"Initial_Q can only be provided if init is set to custom\"\n raise ValueError(msg)\n if initial_P is not None and init != \"custom\":\n msg = \"Initial_P can only be provided if init is set to custom\"\n raise ValueError(msg)\n if initial_Q is not None and initial_P is not None:\n msg = \"Initial_Q and initial_P cannot be provided simultaneously\"\n raise ValueError(msg)\n # check initial_Q argument\n if initial_Q is not None:\n if not isinstance(initial_Q, np.ndarray):\n msg = f\"Initial_Q must be np.ndarray or None, not {type(initial_Q)}\"\n raise TypeError(msg)\n initial_Q = check_array(initial_Q, copy=True)\n if initial_Q.shape[0] != initial_Q.shape[1]:\n msg = \"Initial_Q must be a square orthogonal matrix\"\n raise ValueError(msg)\n if not np.allclose(initial_Q.T @ initial_Q, np.eye(initial_Q.shape[0])):\n msg = \"Initial_Q must be a square orthogonal matrix\"\n raise ValueError(msg)\n # check initial_P argument\n if initial_P is not None:\n if not isinstance(initial_P, np.ndarray):\n msg = f\"Initial_P must be np.ndarray or None, not {type(initial_P)}\"\n raise TypeError(msg)\n initial_P = check_array(initial_P, copy=True)\n n, m = initial_P.shape\n if not (\n np.allclose(initial_P.sum(axis=0), np.ones(m) / m)\n and np.allclose(initial_P.sum(axis=1), np.ones(n) / n)\n ):\n msg = (\n \"Initial_P must be a soft assignment matrix \"\n \"(rows add up to (1/number of cols) \"\n \"and columns add up to (1/number of rows))\"\n )\n raise ValueError(msg)\n\n super().__init__()\n\n self.optimal_transport_eps = optimal_transport_eps\n self.optimal_transport_num_reps = optimal_transport_num_reps\n self.optimal_transport_lambda = optimal_transport_lambda\n self.iterative_num_reps = iterative_num_reps\n self.init = init\n self.initial_Q = initial_Q\n self.initial_P = initial_P\n\n def _optimal_transport(self, X, Y, Q):\n # \"E step\" of the SeedlessProcrustes.\n n, d = X.shape\n m, _ = Y.shape\n # initialize probability mass arrays & the cost matrix ; run sinkhorn\n probability_mass_X = np.ones(n) / n\n probability_mass_Y = np.ones(m) / m\n cost_matrix = (\n np.linalg.norm((X @ Q).reshape(n, 1, d) - Y.reshape(1, m, d), axis=2) ** 2\n )\n P = ot.sinkhorn(\n a=probability_mass_X,\n b=probability_mass_Y,\n M=cost_matrix,\n reg=self.optimal_transport_lambda,\n numItermax=self.optimal_transport_eps,\n stopThr=self.optimal_transport_eps,\n )\n return P\n\n def _procrustes(self, X, Y, P):\n # \"M step\" of the SeedlessProcurstes.\n aligner = OrthogonalProcrustes()\n Q = aligner.fit(X, P @ Y).Q_\n return Q\n\n def _iterative_ot(self, X, Y, Q):\n # this P is not used. it is set to default in case numreps=0\n P = np.ones((X.shape[0], Y.shape[0])) / (X.shape[0] * Y.shape[0])\n for i in range(self.iterative_num_reps):\n P = self._optimal_transport(X, Y, Q)\n Q = self._procrustes(X, Y, P)\n return P, Q\n\n def _compute_objective(self, X, Y, Q=None, P=None):\n if Q is None:\n Q = self.Q_\n if P is None:\n P = self.P_\n return np.linalg.norm(X @ Q - P @ Y, ord=\"fro\")\n\n def fit(self, X, Y):\n \"\"\"\n Uses the two datasets to learn the matrix `self.Q_` that aligns the\n first dataset with the second.\n\n Parameters\n ----------\n X : np.ndarray, shape (n, d)\n Dataset to be mapped to ``Y``, must have same number of dimensions\n (axis 1) as ``Y``.\n\n Y : np.ndarray, shape (m, d)\n Target dataset, must have same number of dimensions (axis 1) as ``X``.\n\n Returns\n -------\n self : returns an instance of self\n \"\"\"\n X, Y = self._check_datasets(X, Y)\n n, d = X.shape\n m, _ = Y.shape\n\n if self.init == \"2d\":\n P_matrices = np.zeros((2 ** d, n, m))\n Q_matrices = np.zeros((2 ** d, d, d))\n objectives = np.zeros(2 ** d)\n # try 2^d different initializations\n for i in range(2 ** d):\n initial_Q = _sign_flip_matrix_from_int(i, d)\n P_matrices[i], Q_matrices[i] = P, Q = self._iterative_ot(\n X, Y, initial_Q\n )\n objectives[i] = self._compute_objective(X, Y, Q, P)\n # pick the best one, using the objective function value\n best = np.argmin(objectives)\n self.selected_initial_Q_ = _sign_flip_matrix_from_int(best, d)\n self.P_, self.Q_ = P_matrices[best], Q_matrices[best]\n elif self.init == \"sign_flips\":\n aligner = SignFlips()\n self.selected_initial_Q_ = aligner.fit(X, Y).Q_\n self.P_, self.Q_ = self._iterative_ot(X, Y, self.selected_initial_Q_)\n else:\n # determine initial Q if \"custom\"\n if self.initial_Q is not None:\n self.selected_initial_Q_ = self.initial_Q\n elif self.initial_P is not None:\n # use initial P, if provided\n self.selected_initial_Q_ = self._procrustes(X, Y, self.initial_P)\n else:\n # set to initial Q to identity if neither Q nor P provided\n self.selected_initial_Q_ = np.eye(d)\n self.P_, self.Q_ = self._iterative_ot(X, Y, self.selected_initial_Q_)\n self.score_ = self._compute_objective(X, Y)\n\n return self\n\n\ndef _sign_flip_matrix_from_int(val_int, d):\n val_bin = bin(val_int)[2:]\n val_bin = \"0\" * (d - len(val_bin)) + val_bin\n return np.diag(np.array([(float(i) - 0.5) * -2 for i in val_bin]))\n",
"# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import adjusted_rand_score\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.model_selection import ParameterGrid\n\nfrom .base import BaseCluster\n\n\nclass GaussianCluster(BaseCluster):\n r\"\"\"\n Gaussian Mixture Model (GMM)\n\n Representation of a Gaussian mixture model probability distribution.\n This class allows to estimate the parameters of a Gaussian mixture\n distribution. It computes all possible models from one component to\n ``max_components``. The best model is given by the lowest BIC score.\n\n Parameters\n ----------\n min_components : int, default=2.\n The minimum number of mixture components to consider (unless\n ``max_components`` is None, in which case this is the maximum number of\n components to consider). If ``max_componens`` is not None, ``min_components``\n must be less than or equal to ``max_components``.\n\n max_components : int or None, default=None.\n The maximum number of mixture components to consider. Must be greater\n than or equal to ``min_components``.\n\n covariance_type : {'all' (default), 'full', 'tied', 'diag', 'spherical'}, optional\n String or list/array describing the type of covariance parameters to use.\n If a string, it must be one of:\n\n - 'all'\n considers all covariance structures in ['spherical', 'diag', 'tied', 'full']\n - 'full'\n each component has its own general covariance matrix\n - 'tied'\n all components share the same general covariance matrix\n - 'diag'\n each component has its own diagonal covariance matrix\n - 'spherical'\n each component has its own single variance\n\n If a list/array, it must be a list/array of strings containing only\n 'spherical', 'tied', 'diag', and/or 'full'.\n\n tol : float, defaults to 1e-3.\n The convergence threshold. EM iterations will stop when the\n lower bound average gain is below this threshold.\n\n reg_covar : float, defaults to 1e-6.\n Non-negative regularization added to the diagonal of covariance.\n Allows to assure that the covariance matrices are all positive.\n\n max_iter : int, defaults to 100.\n The number of EM iterations to perform.\n\n n_init : int, defaults to 1.\n The number of initializations to perform. The best results are kept.\n\n init_params : {'kmeans', 'random'}, defaults to 'kmeans'.\n The method used to initialize the weights, the means and the\n precisions.\n Must be one of::\n\n 'kmeans' : responsibilities are initialized using kmeans.\n 'random' : responsibilities are initialized randomly.\n\n random_state : int, RandomState instance or None, optional (default=None)\n If int, ``random_state`` is the seed used by the random number generator;\n If RandomState instance, ``random_state`` is the random number generator;\n If None, the random number generator is the RandomState instance used\n by ``np.random``.\n\n\n Attributes\n ----------\n n_components_ : int\n Optimal number of components based on BIC.\n covariance_type_ : str\n Optimal covariance type based on BIC.\n model_ : GaussianMixture object\n Fitted GaussianMixture object fitted with optimal number of components\n and optimal covariance structure.\n bic_ : pandas.DataFrame\n A pandas DataFrame of BIC values computed for all possible number of clusters\n given by ``range(min_components, max_components + 1)`` and all covariance\n structures given by :attr:`covariance_type`.\n ari_ : pandas.DataFrame\n Only computed when y is given. Pandas Dataframe containing ARI values computed\n for all possible number of clusters given by ``range(min_components,\n max_components)`` and all covariance structures given by :attr:`covariance_type`.\n \"\"\"\n\n def __init__(\n self,\n min_components=2,\n max_components=None,\n covariance_type=\"all\",\n tol=1e-3,\n reg_covar=1e-6,\n max_iter=100,\n n_init=1,\n init_params=\"kmeans\",\n random_state=None,\n ):\n if isinstance(min_components, int):\n if min_components <= 0:\n msg = \"min_components must be >= 1.\"\n raise ValueError(msg)\n else:\n msg = \"min_components must be an integer, not {}.\".format(\n type(min_components)\n )\n raise TypeError(msg)\n\n if isinstance(max_components, int):\n if max_components <= 0:\n msg = \"max_components must be >= 1 or None.\"\n raise ValueError(msg)\n elif min_components > max_components:\n msg = \"min_components must be less than or equal to max_components.\"\n raise ValueError(msg)\n elif max_components is not None:\n msg = \"max_components must be an integer or None, not {}.\".format(\n type(max_components)\n )\n raise TypeError(msg)\n\n if isinstance(covariance_type, (np.ndarray, list)):\n covariance_type = np.unique(covariance_type)\n elif isinstance(covariance_type, str):\n if covariance_type == \"all\":\n covariance_type = [\"spherical\", \"diag\", \"tied\", \"full\"]\n else:\n covariance_type = [covariance_type]\n else:\n msg = \"covariance_type must be a numpy array, a list, or \"\n msg += \"string, not {}\".format(type(covariance_type))\n raise TypeError(msg)\n\n for cov in covariance_type:\n if cov not in [\"spherical\", \"diag\", \"tied\", \"full\"]:\n msg = (\n \"covariance structure must be one of \"\n + '[\"spherical\", \"diag\", \"tied\", \"full\"]'\n )\n msg += \" not {}\".format(cov)\n raise ValueError(msg)\n\n new_covariance_type = []\n for cov in [\"spherical\", \"diag\", \"tied\", \"full\"]:\n if cov in covariance_type:\n new_covariance_type.append(cov)\n\n self.min_components = min_components\n self.max_components = max_components\n self.covariance_type = new_covariance_type\n self.tol = tol\n self.reg_covar = reg_covar\n self.max_iter = max_iter\n self.n_init = n_init\n self.init_params = init_params\n self.random_state = random_state\n\n def fit(self, X, y=None):\n \"\"\"\n Fits gaussian mixure model to the data.\n Estimate model parameters with the EM algorithm.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n y : array-like, shape (n_samples,), optional (default=None)\n List of labels for X if available. Used to compute\n ARI scores.\n\n Returns\n -------\n self\n \"\"\"\n\n # Deal with number of clusters\n if self.max_components is None:\n lower_ncomponents = 1\n upper_ncomponents = self.min_components\n else:\n lower_ncomponents = self.min_components\n upper_ncomponents = self.max_components\n\n n_mixture_components = upper_ncomponents - lower_ncomponents + 1\n\n if upper_ncomponents > X.shape[0]:\n if self.max_components is None:\n msg = \"if max_components is None then min_components must be >= \"\n msg += \"n_samples, but min_components = {}, n_samples = {}\".format(\n upper_ncomponents, X.shape[0]\n )\n else:\n msg = \"max_components must be >= n_samples, but max_components = \"\n msg += \"{}, n_samples = {}\".format(upper_ncomponents, X.shape[0])\n raise ValueError(msg)\n elif lower_ncomponents > X.shape[0]:\n msg = \"min_components must be <= n_samples, but min_components = \"\n msg += \"{}, n_samples = {}\".format(upper_ncomponents, X.shape[0])\n raise ValueError(msg)\n\n # Get parameters\n random_state = self.random_state\n\n param_grid = dict(\n covariance_type=self.covariance_type,\n n_components=range(lower_ncomponents, upper_ncomponents + 1),\n tol=[self.tol],\n reg_covar=[self.reg_covar],\n max_iter=[self.max_iter],\n n_init=[self.n_init],\n init_params=[self.init_params],\n random_state=[random_state],\n )\n\n param_grid = list(ParameterGrid(param_grid))\n\n models = [[] for _ in range(n_mixture_components)]\n bics = [[] for _ in range(n_mixture_components)]\n aris = [[] for _ in range(n_mixture_components)]\n\n for i, params in enumerate(param_grid):\n model = GaussianMixture(**params)\n model.fit(X)\n models[i % n_mixture_components].append(model)\n bics[i % n_mixture_components].append(model.bic(X))\n if y is not None:\n predictions = model.predict(X)\n aris[i % n_mixture_components].append(\n adjusted_rand_score(y, predictions)\n )\n\n self.bic_ = pd.DataFrame(\n bics,\n index=np.arange(lower_ncomponents, upper_ncomponents + 1),\n columns=self.covariance_type,\n )\n\n if y is not None:\n self.ari_ = pd.DataFrame(\n aris,\n index=np.arange(lower_ncomponents, upper_ncomponents + 1),\n columns=self.covariance_type,\n )\n else:\n self.ari_ = None\n\n # Get the best cov type and its index within the dataframe\n best_covariance = self.bic_.min(axis=0).idxmin()\n best_covariance_idx = self.covariance_type.index(best_covariance)\n\n # Get the index best component for best_covariance\n best_component = self.bic_.idxmin()[best_covariance]\n\n self.n_components_ = best_component\n self.covariance_type_ = best_covariance\n self.model_ = models[best_component - self.min_components][best_covariance_idx]\n\n return self\n",
"# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nimport numpy as np\nfrom graspologic.simulations import sample_edges\n\n\ndef check_dirloop(directed, loops):\n if type(directed) is not bool:\n raise TypeError(\"directed is not of type bool.\")\n if type(loops) is not bool:\n raise TypeError(\"loops is not of type bool.\")\n\n\ndef check_r(r):\n if not np.issubdtype(type(r), np.floating):\n raise TypeError(\"r is not of type float.\")\n elif r < -1 or r > 1:\n msg = \"r must between -1 and 1.\"\n raise ValueError(msg)\n\n\ndef check_rel_er(p, r):\n if p + r * (1 - p) < 0:\n msg = \"p + r * (1 - p) should be bigger than 0\"\n raise ValueError(msg)\n\n if p * (1 - r) < 0:\n msg = \"p * (1 - r) should be bigger than 0\"\n raise ValueError(msg)\n\n\ndef check_rel_sbm(p, r):\n for i in range(np.array(p).shape[0]):\n for j in range(np.array(p).shape[1]):\n if p[i][j] + r * (1 - p[i][j]) < 0:\n msg = \"p + r * (1 - p) should be bigger than 0\"\n raise ValueError(msg)\n\n elif p[i][j] * (1 - r) < 0:\n msg = \"p * (1 - r) should be bigger than 0\"\n raise ValueError(msg)\n\n\ndef sample_edges_corr(P, R, directed=False, loops=False):\n \"\"\"\n Generate a pair of correlated graphs with Bernoulli distribution.\n Both G1 and G2 are binary matrices.\n\n Parameters\n ----------\n P: np.ndarray, shape (n_vertices, n_vertices)\n Matrix of probabilities (between 0 and 1) for a random graph.\n\n R: np.ndarray, shape (n_vertices, n_vertices)\n Matrix of correlation (between 0 and 1) between graph pairs.\n\n directed: boolean, optional (default=False)\n If False, output adjacency matrix will be symmetric. Otherwise, output adjacency\n matrix will be asymmetric.\n\n loops: boolean, optional (default=False)\n If False, no edges will be sampled in the diagonal. Otherwise, edges\n are sampled in the diagonal.\n\n References\n ----------\n .. [1] Vince Lyzinski, et al. \"Seeded Graph Matching for Correlated Erdos-R\u0013enyi Graphs\",\n Journal of Machine Learning Research 15, 2014\n\n Returns\n -------\n G1: ndarray (n_vertices, n_vertices)\n Adjacency matrix the same size as P representing a random graph.\n\n G2: ndarray (n_vertices, n_vertices)\n Adjacency matrix the same size as P representing a random graph.\n\n Examples\n --------\n >>> np.random.seed(1)\n >>> p = 0.5\n >>> r = 0.3\n >>> R = r * np.ones((5, 5))\n >>> P = p * np.ones((5, 5))\n\n To sample a correlated graph pair based on P and R matrices:\n\n >>> sample_edges_corr(P, R, directed = False, loops = False)\n (array([[0., 1., 0., 0., 0.],\n [1., 0., 0., 0., 0.],\n [0., 0., 0., 0., 1.],\n [0., 0., 0., 0., 1.],\n [0., 0., 1., 1., 0.]]), array([[0., 1., 0., 0., 0.],\n [1., 0., 1., 0., 1.],\n [0., 1., 0., 1., 1.],\n [0., 0., 1., 0., 1.],\n [0., 1., 1., 1., 0.]]))\n \"\"\"\n # test input\n # check P\n if type(P) is not np.ndarray:\n raise TypeError(\"P must be numpy.ndarray\")\n if len(P.shape) != 2:\n raise ValueError(\"P must have dimension 2 (n_vertices, n_vertices)\")\n if P.shape[0] != P.shape[1]:\n raise ValueError(\"P must be a square matrix\")\n\n # check R\n if type(R) is not np.ndarray:\n raise TypeError(\"R must be numpy.ndarray\")\n if len(R.shape) != 2:\n raise ValueError(\"R must have dimension 2 (n_vertices, n_vertices)\")\n if R.shape[0] != P.shape[1]:\n raise ValueError(\"R must be a square matrix\")\n\n # check directed and loops\n check_dirloop(directed, loops)\n\n G1 = sample_edges(P, directed=directed, loops=loops)\n P2 = G1.copy()\n P2 = np.where(P2 == 1, P + R * (1 - P), P * (1 - R))\n G2 = sample_edges(P2, directed=directed, loops=loops)\n return G1, G2\n\n\ndef er_corr(n, p, r, directed=False, loops=False):\n \"\"\"\n Generate a pair of correlated graphs with specified edge probability\n Both G1 and G2 are binary matrices.\n\n Parameters\n ----------\n n: int\n Number of vertices\n\n p: float\n Probability of an edge existing between two vertices, between 0 and 1.\n\n r: float\n The value of the correlation between the same vertices in two graphs.\n\n directed: boolean, optional (default=False)\n If False, output adjacency matrix will be symmetric. Otherwise, output adjacency\n matrix will be asymmetric.\n\n loops: boolean, optional (default=False)\n If False, no edges will be sampled in the diagonal. Otherwise, edges\n are sampled in the diagonal.\n\n Returns\n -------\n G1: ndarray (n_vertices, n_vertices)\n Adjacency matrix the same size as P representing a random graph.\n\n G2: ndarray (n_vertices, n_vertices)\n Adjacency matrix the same size as P representing a random graph.\n\n Examples\n --------\n >>> np.random.seed(2)\n >>> p = 0.5\n >>> r = 0.3\n >>> n = 5\n\n To sample a correlated ER graph pair based on n, p and r:\n\n >>> er_corr(n, p, r, directed=False, loops=False)\n (array([[0., 0., 1., 0., 0.],\n [0., 0., 0., 1., 0.],\n [1., 0., 0., 1., 1.],\n [0., 1., 1., 0., 1.],\n [0., 0., 1., 1., 0.]]), array([[0., 1., 1., 1., 0.],\n [1., 0., 0., 1., 0.],\n [1., 0., 0., 1., 1.],\n [1., 1., 1., 0., 1.],\n [0., 0., 1., 1., 0.]]))\n \"\"\"\n # test input\n # check n\n if not np.issubdtype(type(n), np.integer):\n raise TypeError(\"n is not of type int.\")\n elif n <= 0:\n msg = \"n must be > 0.\"\n raise ValueError(msg)\n\n # check p\n if not np.issubdtype(type(p), np.floating):\n raise TypeError(\"r is not of type float.\")\n elif p < 0 or p > 1:\n msg = \"p must between 0 and 1.\"\n raise ValueError(msg)\n\n # check r\n check_r(r)\n\n # check the relation between r and p\n check_rel_er(p, r)\n\n # check directed and loops\n check_dirloop(directed, loops)\n\n P = p * np.ones((n, n))\n R = r * np.ones((n, n))\n G1, G2 = sample_edges_corr(P, R, directed=directed, loops=loops)\n return G1, G2\n\n\ndef sbm_corr(n, p, r, directed=False, loops=False):\n \"\"\"\n Generate a pair of correlated graphs with specified edge probability\n Both G1 and G2 are binary matrices.\n\n Parameters\n ----------\n n: list of int, shape (n_communities)\n Number of vertices in each community. Communities are assigned n[0], n[1], ...\n\n p: array-like, shape (n_communities, n_communities)\n Probability of an edge between each of the communities, where ``p[i, j]`` indicates\n the probability of a connection between edges in communities ``[i, j]``.\n ``0 < p[i, j] < 1`` for all ``i, j``.\n\n r: float\n Probability of the correlation between the same vertices in two graphs.\n\n directed: boolean, optional (default=False)\n If False, output adjacency matrix will be symmetric. Otherwise, output adjacency\n matrix will be asymmetric.\n\n loops: boolean, optional (default=False)\n If False, no edges will be sampled in the diagonal. Otherwise, edges\n are sampled in the diagonal.\n\n Returns\n -------\n G1: ndarray (n_vertices, n_vertices)\n Adjacency matrix the same size as P representing a random graph.\n\n G2: ndarray (n_vertices, n_vertices)\n Adjacency matrix the same size as P representing a random graph.\n\n Examples\n --------\n >>> np.random.seed(3)\n >>> n = [3, 3]\n >>> p = [[0.5, 0.1], [0.1, 0.5]]\n >>> r = 0.3\n\n To sample a correlated SBM graph pair based on n, p and r:\n\n >>> sbm_corr(n, p, r, directed=False, loops=False)\n (array([[0., 1., 0., 0., 0., 0.],\n [1., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 1.],\n [0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 1., 0., 0.]]), array([[0., 1., 0., 0., 0., 0.],\n [1., 0., 0., 1., 1., 0.],\n [0., 0., 0., 0., 0., 0.],\n [0., 1., 0., 0., 0., 1.],\n [0., 1., 0., 0., 0., 0.],\n [0., 0., 0., 1., 0., 0.]]))\n \"\"\"\n # test input\n # Check n\n if not isinstance(n, (list, np.ndarray)):\n msg = \"n must be a list or np.array, not {}.\".format(type(n))\n raise TypeError(msg)\n else:\n n = np.array(n)\n if not np.issubdtype(n.dtype, np.integer):\n msg = \"There are non-integer elements in n\"\n raise ValueError(msg)\n\n # Check p\n if not isinstance(p, (list, np.ndarray)):\n msg = \"p must be a list or np.array, not {}.\".format(type(p))\n raise TypeError(msg)\n else:\n p = np.array(p)\n if not np.issubdtype(p.dtype, np.number):\n msg = \"There are non-numeric elements in p\"\n raise ValueError(msg)\n elif p.shape != (n.size, n.size):\n msg = \"p is must have shape len(n) x len(n), not {}\".format(p.shape)\n raise ValueError(msg)\n elif np.any(p < 0) or np.any(p > 1):\n msg = \"Values in p must be in between 0 and 1.\"\n raise ValueError(msg)\n\n # check r\n check_r(r)\n\n # check the relation between r and p\n check_rel_sbm(p, r)\n\n # check directed and loops\n check_dirloop(directed, loops)\n\n P = np.zeros((np.sum(n), np.sum(n)))\n block_indices = np.insert(np.cumsum(np.array(n)), 0, 0)\n for i in range(np.array(p).shape[0]): # for each row\n for j in range(np.array(p).shape[1]): # for each column\n P[\n block_indices[i] : block_indices[i + 1],\n block_indices[j] : block_indices[j + 1],\n ] = p[i][j]\n R = r * np.ones((np.sum(n), np.sum(n)))\n G1, G2 = sample_edges_corr(P, R, directed=directed, loops=loops)\n return G1, G2\n",
"# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_equal\nfrom sklearn.exceptions import NotFittedError\n\nfrom graspologic.cluster.autogmm import AutoGMMCluster\nfrom graspologic.embed.ase import AdjacencySpectralEmbed\nfrom graspologic.simulations.simulations import sbm\n\n\ndef test_inputs():\n # Generate random data\n X = np.random.normal(0, 1, size=(100, 3))\n\n # min_components < 1\n with pytest.raises(ValueError):\n AutoGMM = AutoGMMCluster(min_components=0)\n\n # min_components integer\n with pytest.raises(TypeError):\n AutoGMM = AutoGMMCluster(min_components=\"1\")\n\n # max_components < min_components\n with pytest.raises(ValueError):\n AutoGMM = AutoGMMCluster(min_components=1, max_components=0)\n\n # max_components integer\n with pytest.raises(TypeError):\n AutoGMM = AutoGMMCluster(min_components=1, max_components=\"1\")\n\n # affinity is not an array, string or list\n with pytest.raises(TypeError):\n AutoGMM = AutoGMMCluster(min_components=1, affinity=1)\n\n # affinity is not in ['euclidean', 'manhattan', 'cosine', 'none']\n with pytest.raises(ValueError):\n AutoGMM = AutoGMMCluster(min_components=1, affinity=\"graspologic\")\n\n # linkage is not an array, string or list\n with pytest.raises(TypeError):\n AutoGMM = AutoGMMCluster(min_components=1, linkage=1)\n\n # linkage is not in ['single', 'average', 'complete', 'ward']\n with pytest.raises(ValueError):\n AutoGMM = AutoGMMCluster(min_components=1, linkage=\"graspologic\")\n\n # euclidean is not an affinity option when ward is a linkage option\n with pytest.raises(ValueError):\n AutoGMM = AutoGMMCluster(min_components=1, affinity=\"manhattan\", linkage=\"ward\")\n\n # covariance type is not an array, string or list\n with pytest.raises(TypeError):\n AutoGMM = AutoGMMCluster(min_components=1, covariance_type=1)\n\n # covariance type is not in ['spherical', 'diag', 'tied', 'full']\n with pytest.raises(ValueError):\n AutoGMM = AutoGMMCluster(min_components=1, covariance_type=\"graspologic\")\n\n # min_cluster > n_samples when max_cluster is None\n with pytest.raises(ValueError):\n AutoGMM = AutoGMMCluster(1000)\n AutoGMM.fit(X)\n\n with pytest.raises(ValueError):\n AutoGMM = AutoGMMCluster(1000)\n AutoGMM.fit_predict(X)\n\n # max_cluster > n_samples when max_cluster is not None\n with pytest.raises(ValueError):\n AutoGMM = AutoGMMCluster(10, 1001)\n AutoGMM.fit(X)\n\n with pytest.raises(ValueError):\n AutoGMM = AutoGMMCluster(10, 1001)\n AutoGMM.fit_predict(X)\n\n # min_cluster > n_samples when max_cluster is None\n with pytest.raises(ValueError):\n AutoGMM = AutoGMMCluster(1000)\n AutoGMM.fit(X)\n\n with pytest.raises(ValueError):\n AutoGMM = AutoGMMCluster(10, 1001)\n AutoGMM.fit_predict(X)\n\n # min_cluster > n_samples when max_cluster is not None\n with pytest.raises(ValueError):\n AutoGMM = AutoGMMCluster(1000, 1001)\n AutoGMM.fit(X)\n\n with pytest.raises(ValueError):\n AutoGMM = AutoGMMCluster(1000, 1001)\n AutoGMM.fit_predict(X)\n\n # label_init is not a 1-D array\n with pytest.raises(TypeError):\n AutoGMM = AutoGMMCluster(label_init=np.zeros([100, 2]))\n\n # label_init is not 1-D array, a list or None.\n with pytest.raises(TypeError):\n AutoGMM = AutoGMMCluster(label_init=\"label\")\n\n # label_init length is not equal to n_samples\n with pytest.raises(ValueError):\n AutoGMM = AutoGMMCluster(label_init=np.zeros([50, 1]))\n AutoGMM.fit(X)\n\n with pytest.raises(ValueError):\n AutoGMM = AutoGMMCluster(label_init=np.zeros([50, 1]))\n AutoGMM.fit_predict(X)\n\n with pytest.raises(TypeError):\n AutoGMM = AutoGMMCluster(label_init=np.zeros([100, 2]), max_iter=-2)\n\n # criter = cic\n with pytest.raises(ValueError):\n AutoGMM = AutoGMMCluster(selection_criteria=\"cic\")\n\n\ndef test_labels_init():\n X = np.random.normal(0, 1, size=(5, 3))\n\n with pytest.raises(ValueError):\n AutoGMM = AutoGMMCluster(\n min_components=1, max_components=1, label_init=np.array([0, 0, 0, 0, 1])\n )\n AutoGMM.fit_predict(X)\n\n with pytest.raises(ValueError):\n AutoGMM = AutoGMMCluster(\n min_components=1, max_components=2, label_init=np.array([0, 0, 0, 0, 1])\n )\n AutoGMM.fit_predict(X)\n\n with pytest.raises(ValueError):\n AutoGMM = AutoGMMCluster(\n min_components=2, max_components=3, label_init=np.array([0, 0, 0, 0, 1])\n )\n AutoGMM.fit_predict(X)\n\n AutoGMM = AutoGMMCluster(\n min_components=2, max_components=2, label_init=np.array([0, 0, 0, 0, 1])\n )\n AutoGMM.fit_predict(X)\n\n\ndef test_predict_without_fit():\n # Generate random data\n X = np.random.normal(0, 1, size=(100, 3))\n\n with pytest.raises(NotFittedError):\n AutoGMM = AutoGMMCluster(min_components=2)\n AutoGMM.predict(X)\n\n\ndef test_cosine_on_0():\n X = np.array([[0, 1, 0], [1, 0, 1], [0, 0, 0], [1, 1, 0], [0, 0, 1]])\n\n with pytest.raises(ValueError):\n AutoGMM = AutoGMMCluster(min_components=3, affinity=\"all\")\n AutoGMM.fit(X)\n\n\ndef test_cosine_with_0():\n X = np.array(\n [\n [0, 1, 0],\n [1, 0, 1],\n [0, 0, 0],\n [1, 1, 0],\n [0, 0, 1],\n [0, 1, 1],\n [1, 1, 1],\n [1, 0, 0],\n [0, 1, 1],\n [1, 1, 0],\n [0, 1, 0],\n ]\n )\n\n with pytest.warns(UserWarning):\n AutoGMM = AutoGMMCluster(min_components=2, affinity=\"all\")\n AutoGMM.fit(X)\n\n\ndef test_no_y():\n np.random.seed(1)\n\n n = 100\n d = 3\n\n X1 = np.random.normal(2, 0.5, size=(n, d))\n X2 = np.random.normal(-2, 0.5, size=(n, d))\n X = np.vstack((X1, X2))\n\n AutoGMM = AutoGMMCluster(max_components=5)\n AutoGMM.fit(X)\n\n assert_equal(AutoGMM.n_components_, 2)\n\n\ndef test_two_class():\n \"\"\"\n Easily separable two gaussian problem.\n \"\"\"\n np.random.seed(1)\n\n n = 100\n d = 3\n\n X1 = np.random.normal(2, 0.5, size=(n, d))\n X2 = np.random.normal(-2, 0.5, size=(n, d))\n X = np.vstack((X1, X2))\n y = np.repeat([0, 1], n)\n\n AutoGMM = AutoGMMCluster(max_components=5)\n AutoGMM.fit(X, y)\n\n n_components = AutoGMM.n_components_\n\n # Assert that the two cluster model is the best\n assert_equal(n_components, 2)\n\n # Asser that we get perfect clustering\n assert_allclose(AutoGMM.ari_, 1)\n\n\ndef test_two_class_parallel():\n \"\"\"\n Easily separable two gaussian problem.\n \"\"\"\n np.random.seed(1)\n\n n = 100\n d = 3\n\n X1 = np.random.normal(2, 0.5, size=(n, d))\n X2 = np.random.normal(-2, 0.5, size=(n, d))\n X = np.vstack((X1, X2))\n y = np.repeat([0, 1], n)\n\n AutoGMM = AutoGMMCluster(max_components=5, n_jobs=2)\n AutoGMM.fit(X, y)\n\n n_components = AutoGMM.n_components_\n\n # Assert that the two cluster model is the best\n assert_equal(n_components, 2)\n\n # Asser that we get perfect clustering\n assert_allclose(AutoGMM.ari_, 1)\n\n\ndef test_two_class_aic():\n \"\"\"\n Easily separable two gaussian problem.\n \"\"\"\n np.random.seed(1)\n\n n = 100\n d = 3\n\n X1 = np.random.normal(2, 0.5, size=(n, d))\n X2 = np.random.normal(-2, 0.5, size=(n, d))\n X = np.vstack((X1, X2))\n y = np.repeat([0, 1], n)\n\n AutoGMM = AutoGMMCluster(max_components=5, selection_criteria=\"aic\")\n AutoGMM.fit(X, y)\n\n n_components = AutoGMM.n_components_\n\n # AIC gets the number of components wrong\n assert_equal(n_components >= 1, True)\n assert_equal(n_components <= 5, True)\n\n # Assert that the ari value is valid\n assert_equal(AutoGMM.ari_ >= -1, True)\n assert_equal(AutoGMM.ari_ <= 1, True)\n\n\ndef test_five_class():\n \"\"\"\n Easily separable five gaussian problem.\n \"\"\"\n np.random.seed(1)\n\n n = 100\n mus = [[i * 5, 0] for i in range(5)]\n cov = np.eye(2) # balls\n\n X = np.vstack([np.random.multivariate_normal(mu, cov, n) for mu in mus])\n\n AutoGMM = AutoGMMCluster(min_components=3, max_components=10, covariance_type=\"all\")\n AutoGMM.fit(X)\n\n assert_equal(AutoGMM.n_components_, 5)\n\n\ndef test_five_class_aic():\n \"\"\"\n Easily separable five gaussian problem.\n \"\"\"\n np.random.seed(1)\n\n n = 100\n mus = [[i * 5, 0] for i in range(5)]\n cov = np.eye(2) # balls\n\n X = np.vstack([np.random.multivariate_normal(mu, cov, n) for mu in mus])\n\n AutoGMM = AutoGMMCluster(\n min_components=3,\n max_components=10,\n covariance_type=\"all\",\n selection_criteria=\"aic\",\n )\n AutoGMM.fit(X)\n\n # AIC fails often so there is no assertion here\n assert_equal(AutoGMM.n_components_ >= 3, True)\n assert_equal(AutoGMM.n_components_ <= 10, True)\n\n\ndef test_ase_three_blocks():\n \"\"\"\n Expect 3 clusters from a 3 block model\n \"\"\"\n np.random.seed(1)\n\n # Generate adjacency and labels\n n = 50\n n_communites = [n, n, n]\n p = np.array([[0.8, 0.3, 0.2], [0.3, 0.8, 0.3], [0.2, 0.3, 0.8]])\n y = np.repeat([1, 2, 3], repeats=n)\n\n A = sbm(n=n_communites, p=p)\n\n # Embed to get latent positions\n ase = AdjacencySpectralEmbed(n_components=5)\n X_hat = ase.fit_transform(A)\n\n # Compute clusters\n AutoGMM = AutoGMMCluster(max_components=10)\n AutoGMM.fit(X_hat, y)\n\n n_components = AutoGMM.n_components_\n\n # Assert that the three cluster model is the best\n assert_equal(n_components, 3)\n\n # Asser that we get perfect clustering\n assert_allclose(AutoGMM.ari_, 1)\n\n\ndef test_covariances():\n \"\"\"\n Easily separable two gaussian problem.\n \"\"\"\n np.random.seed(1)\n\n n = 100\n mu1 = [-10, 0]\n mu2 = [10, 0]\n\n # Spherical\n cov1 = 2 * np.eye(2)\n cov2 = 2 * np.eye(2)\n\n X1 = np.random.multivariate_normal(mu1, cov1, n)\n X2 = np.random.multivariate_normal(mu2, cov2, n)\n\n X = np.concatenate((X1, X2))\n\n AutoGMM = AutoGMMCluster(min_components=2, covariance_type=\"all\")\n AutoGMM.fit(X)\n assert_equal(AutoGMM.covariance_type_, \"spherical\")\n\n # Diagonal\n np.random.seed(10)\n cov1 = np.diag([1, 1])\n cov2 = np.diag([2, 1])\n\n X1 = np.random.multivariate_normal(mu1, cov1, n)\n X2 = np.random.multivariate_normal(mu2, cov2, n)\n\n X = np.concatenate((X1, X2))\n\n AutoGMM = AutoGMMCluster(max_components=2, covariance_type=\"all\")\n AutoGMM.fit(X)\n assert_equal(AutoGMM.covariance_type_, \"diag\")\n\n # Tied\n cov1 = np.array([[2, 1], [1, 2]])\n cov2 = np.array([[2, 1], [1, 2]])\n\n X1 = np.random.multivariate_normal(mu1, cov1, n)\n X2 = np.random.multivariate_normal(mu2, cov2, n)\n\n X = np.concatenate((X1, X2))\n\n AutoGMM = AutoGMMCluster(max_components=2, covariance_type=\"all\")\n AutoGMM.fit(X)\n assert_equal(AutoGMM.covariance_type_, \"tied\")\n\n # Full\n cov1 = np.array([[2, -1], [-1, 2]])\n cov2 = np.array([[2, 1], [1, 2]])\n\n X1 = np.random.multivariate_normal(mu1, cov1, n)\n X2 = np.random.multivariate_normal(mu2, cov2, n)\n\n X = np.concatenate((X1, X2))\n\n AutoGMM = AutoGMMCluster(max_components=2, covariance_type=\"all\")\n AutoGMM.fit(X)\n assert_equal(AutoGMM.covariance_type_, \"full\")\n"
] | [
[
"sklearn.utils.check_array",
"numpy.eye",
"numpy.linalg.norm",
"numpy.ones",
"numpy.argmin",
"numpy.zeros"
],
[
"numpy.unique",
"numpy.arange",
"sklearn.metrics.adjusted_rand_score",
"sklearn.model_selection.ParameterGrid",
"sklearn.mixture.GaussianMixture"
],
[
"numpy.issubdtype",
"numpy.ones",
"numpy.any",
"numpy.array",
"numpy.where",
"numpy.sum"
],
[
"numpy.diag",
"numpy.testing.assert_equal",
"numpy.random.seed",
"numpy.random.multivariate_normal",
"numpy.eye",
"numpy.concatenate",
"numpy.random.normal",
"numpy.testing.assert_allclose",
"numpy.repeat",
"numpy.array",
"numpy.zeros",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
felja633/DARE | [
"b112d24da8108c440cc8417ee1ae5cdeef24181e",
"b112d24da8108c440cc8417ee1ae5cdeef24181e"
] | [
"src/color_feature_extraction.py",
"src/psreg.py"
] | [
"import colorsys\nimport numpy as np\nfrom time import time\n\nclass ChannelEncoder:\n def __init__(self,\n nchans=None,\n bounds=None,\n mflag=None,\n cscale=1.0):\n \"\"\"\n\n \"\"\"\n self.mflag = mflag\n self.nchans = nchans\n self.bounds = bounds\n self.cscale = cscale\n self.bfuncwidth = 1.5\n self.d = self.cscale * self.bfuncwidth\n\n if mflag == 0:\n self.fpos = (self.bounds[1] + self.bounds[0] * self.nchans - self.d * (self.bounds[0] + self.bounds[1])) / (self.nchans + 1 - 2 * self.d)\n self.ssc = (self.bounds[1] - self.bounds[0]) / (self.nchans + 1 - 2 * self.d)\n else:\n self.ssc = (self.bounds[1] - self.bounds[0]) / self.nchans\n self.fpos = self.bounds[0]\n\n def basis_cos2(self, x):\n\n c = np.cos(np.pi / 3 * x)\n val = c * c * (np.abs(x) < self.bfuncwidth)\n return val\n\n def basis_bs2(self, x):\n y = (np.abs(x) < 1.0/2.0) * (3.0/4.0 - np.abs(x)**2) + (np.abs(x) >= 1.0/2.0) * (np.abs(x) <= 3.0/2.0) * ((3.0/2.0 - abs(x))**2)/2.0\n return y\n\n ### Encode a value to a channel representation\n def encode(self, x):\n\n #cc = np.zeros((len(x), self.nchans))\n val = (x - self.fpos) / self.ssc + 1\n cpos = np.arange(self.nchans) + 1\n \n cpos = cpos.reshape(1, self.nchans)\n val = val.reshape(len(val),1)\n \n \n if self.mflag:\n ndist = self.nchans / 2.0 - np.abs(np.mod(cpos - val, self.nchans) - self.nchans / 2.0)\n else:\n ndist = np.abs(cpos - val)\n\n \n return self.basis_bs2(ndist)\n\n\ndef generate_1d_channels(feature_map, nch, max_v, min_v, modulo):\n \n not_mod = (1-modulo)\n num_ext_channels = nch + 2*not_mod\n che = ChannelEncoder(num_ext_channels, [min_v, max_v], modulo)\n return che.encode(feature_map) \n\ndef uniform_channel_coding(feature_map, num_channels, modulo):\n\n ### Do this per point...\n cc1 = generate_1d_channels(feature_map[0,:], num_channels, 1.0, 0.0, modulo[0])\n cc2 = generate_1d_channels(feature_map[1,:], num_channels, 1.0, 0.0, modulo[1])\n cc3 = generate_1d_channels(feature_map[2,:], num_channels, 1.0, 0.0, modulo[2])\n\n nmodulo = [1 - m for m in modulo]\n nch1 = num_channels + 2 * nmodulo[0]\n nch2 = num_channels + 2 * nmodulo[1]\n nch3 = num_channels + 2 * nmodulo[2]\n nch = [nch1,nch2,nch3]\n num_points = len(cc1)\n ### compute outer products of channels\n cc1cc2kron = cc2.reshape((len(cc2),nch2, 1)) * cc1.reshape((num_points, 1, nch1))\n tmp = cc1cc2kron.reshape((num_points, 1, nch2, nch1))\n channels = cc3.reshape((num_points, nch3, 1, 1)) * tmp\n\n weights = np.ones((channels.shape[0],num_channels,num_channels,num_channels)) * num_channels * 6.0/5.0\n weights[:,nmodulo[2]:weights.shape[1]-nmodulo[2], nmodulo[1]:weights.shape[2]-nmodulo[1], nmodulo[0]:weights.shape[3]-nmodulo[0]] = num_channels \n channels = channels[:, nmodulo[2]:channels.shape[1]-nmodulo[2], nmodulo[1]:channels.shape[2]-nmodulo[1], nmodulo[0]:channels.shape[3]-nmodulo[0]]\n\n channels = channels * weights * 19.200233330189796\n\n flatt_channels = channels.reshape((channels.shape[0], num_channels**3))\n \n return flatt_channels\n\ndef channel_color_coding_rgb(feature_data, num_channels):\n\n modulo = [0, 0, 0]\n channel_map = np.ndarray(len(feature_data), dtype=object)\n\n for i, feature_map in enumerate(feature_data):\n feature_map = feature_map/255.0\n \n channel_map[i] = uniform_channel_coding(feature_map, num_channels, modulo)\n\n return channel_map\n\ndef channel_color_coding_hsv(feature_data, num_channels):\n\n\n modulo = [1, 0, 0]\n channel_map = np.ndarray(len(feature_data), dtype=object)\n\n for i, feature_map in enumerate(feature_data):\n feature_map = feature_map/255.0\n \n feature_map = [colorsys.rgb_to_hsv(r, g, b) for (r, g, b) in feature_map.transpose()]\n channel_map[i] = uniform_channel_coding(np.array(feature_map).transpose(), num_channels, modulo).astype('float32')\n\n return channel_map\n\ndef get_gamma_color_distr(num_features, K):\n\n color_distr = np.random.gamma(1.0,1.0,size=(num_features,K))\n color_distr_norm = np.sum(color_distr, axis=0)\n color_distr[:, color_distr_norm == 0] = 1.0 # If all are zeros, set to uniform\n color_distr = color_distr / np.sum(color_distr, axis=0)\n return color_distr.astype('float32')\n\ndef get_default_color_distr(num_features, K):\n color_distr = 1.0/num_features * np.ones((num_features, K))\n return color_distr.astype('float32')\n",
"##############################################\n# Density Adaptative Point Set Registration #\n##############################################\nimport sys\nimport numpy as np\nfrom numpy.linalg import svd, det\nfrom time import time\nfrom . import observation_weights\nfrom . import point_cloud_plotting\n\ndef list_prod(X):\n if len(X)==1:\n return X[0]\n elif len(X) == 0:\n return 1.0\n else:\n return np.prod(np.stack(X, 2), 2)\n\ndef sqe(Y, X):\n d = Y[:, :, None].transpose(1, 2, 0) - X[:, :, None].transpose(2, 1, 0)\n s = np.sum(d * d, axis=2)\n \n return s\n\ndef get_default_cluster_priors(num_clusters, gamma):\n pk = 1 / (num_clusters + gamma) * np.ones((num_clusters, 1), dtype=np.float32)\n return pk.transpose()\n\n\ndef get_randn_cluster_means(point_clouds, num_clusters):\n \"\"\" Create random cluster means, distributed on a sphere.\n The standard deviation of all point-cloud points is the sphere radius.\n :param point_clouds: [ X1, X2, ... ]. Xi = 3 x Ni points [np.array].\n :param num_clusters: Number of clusters to generate\n :return: cluster means, (3, num_clusters) [np.array]\n \"\"\"\n # Sample the the unit sphere and scale with data standard deviation\n X = np.random.randn(3, num_clusters).astype(np.float32)\n X = X / np.linalg.norm(X, axis=0)\n v = np.var(np.concatenate(point_clouds, 1), 1, keepdims=True)\n means = X * np.sqrt(v)\n return means\n\n\ndef get_default_cluster_precisions(point_clouds, cluster_means):\n\n # Minimum coordinates in point clouds and clusters\n min_xyz = [np.min(pcl, 1) for pcl in point_clouds] # list of per-pcl minima\n min_xyz = min_xyz + [np.min(cluster_means, 1)] # append cluster_means minima\n min_xyz = np.min(np.stack(min_xyz), 1) # get joint minimum\n\n # Maximum coordinates in point clouds and clusters\n max_xyz = [np.max(pcl, 1) for pcl in point_clouds]\n max_xyz = max_xyz + [np.max(cluster_means, 1)]\n max_xyz = np.max(np.stack(max_xyz), 1)\n\n q = 1 / sqe(min_xyz[...,np.newaxis], max_xyz[...,np.newaxis])\n\n Q = q * np.ones((cluster_means.shape[1], 1))\n return Q.astype(np.float32)\n\n\ndef get_default_start_poses(point_clouds, cluster_means):\n \"\"\" Create default start poses\n :param cluster_means:\n :param point_clouds:\n :return:\n \"\"\"\n I = np.eye(3, dtype=np.float32) # Identity rotation\n mu = np.mean(cluster_means, 1) # Mean of cluster means\n poses = [(I, mu - np.mean(pcl, 1)) for pcl in point_clouds]\n return poses\n\n\ndef get_default_beta(cluster_precisions, gamma):\n\n h = 2 / np.mean(cluster_precisions)\n beta = gamma / (h * (gamma + 1))\n return float(beta)\n\nclass PSREG:\n\n def __init__(self,\n betas=None,\n epsilon=None,\n cluster_priors=None,\n cluster_means=None,\n cluster_precisions=None,\n feature_distr=None,\n debug=False,\n use_kdtree=False,\n fix_cluster_pos_iter=2):\n \"\"\"\n :param beta:\n :param epsilon:\n :param cluster_priors: (1,K) numpy.array (\\rho_k)\n :param cluster_means: (3,K) numpy.array (X)\n :param cluster_precisions: (3,K) numpy.array (Q)\n \"\"\"\n\n self.betas = betas\n self.epsilon = epsilon\n self.cluster_priors = cluster_priors\n self.cluster_means = cluster_means\n self.cluster_precisions = cluster_precisions\n self.feature_distr = feature_distr\n self.debug = debug\n self.use_kdtree = use_kdtree\n self.fix_cluster_pos_iter = fix_cluster_pos_iter\n\n def register_points(self, point_clouds, feature_likelihoods, num_iters, start_poses, show_progress=False, observation_weight_function=observation_weights.default_uniform, ow_args=()):\n \"\"\"\n :param point_clouds: [ X1, X2, ... ]. Xi = (3, Ni) numpy.array\n :param num_iters: Number of iterations to run\n :param start_poses: [ (R1, t1), (R2, t2) ... ]\n Ri = pcl-to-world rotation (3,3) numpy.array,\n ti = pcl-to-world translation vector (3,1) numpy.array\n :return:\n \"\"\"\n N = len(point_clouds)\n\n Vs = point_clouds\n Ps = start_poses\n\n pk = self.cluster_priors\n X = self.cluster_means\n Q = self.cluster_precisions\n fd = self.feature_distr\n ow_reg_factor = 8.0\n\n fts = feature_likelihoods\n\n # Compute the observation weights\n observation_weights = [observation_weight_function(V.transpose(), ow_args) for V in Vs]\n\n TVs = [R @ V + t[..., np.newaxis] for V, (R, t) in zip(Vs, Ps)]\n \n for i in range(len(observation_weights)):\n m = np.sum(observation_weights[i])/observation_weights[i].shape[0]\n observation_weights[i][np.where(observation_weights[i] > m * ow_reg_factor)] = m * ow_reg_factor\n\n ds = [sqe(TV, X) for TV in TVs]\n t_tot = time()\n for i in range(num_iters):\n t0 = time()\n \n a_s, Ls, Rs, ts, TVs, X, Q, den, fd, ds = self._iterate(TVs, X, pk, Q, fd, Vs, fts, ds, observation_weights, i)\n if show_progress:\n print(\"%03d: %.1f ms\" % (i+1, (time() - t0) * 1000))\n\n tot_time = time() - t_tot \n print(\"tot time %03d: %.1f ms\" % (i+1, (tot_time) * 1000))\n\n if self.debug:\n point_cloud_plotting.plotCloudsModel(TVs, X, 56)\n return TVs, X\n\n # (uniform priors so far...)\n def _iterate(self, TVs, X, pk, Q, feature_distr, Vs, features, ds, ows, current_iter):\n \"\"\" Run one cppsr iteraton \"\"\"\n\n M = len(TVs)\n\n a_s = np.ndarray(M, dtype=object)\n Ls = np.ndarray(M, dtype=object)\n Rs = np.ndarray(M, dtype=object)\n ts = np.ndarray(M, dtype=object)\n TV2s = np.ndarray(M, dtype=object)\n ac_den = np.ndarray(M, dtype=object)\n ap = np.ndarray(M, dtype=object)\n num_features = len(feature_distr)\n pyz_feature = np.ndarray((M, num_features), dtype=object)\n \n Qt = Q.transpose()\n for i, (TV, V, d, ow) in enumerate(zip(TVs, Vs, ds, ows)):\n\n # Posteriors\n a = pk * np.power(Qt, 1.5) * np.exp(-0.5 * Qt * d)\n\n ap[i] = a.copy()\n\n if features:\n for j, (fl, fd) in enumerate(zip(features, feature_distr)):\n # the joint feature distribution p(y|z,th)\n pyz_feature[i][j] = fl[i] @ fd \n\n a = list_prod(pyz_feature[i]) * a\n\n \n ac_den[i] = np.sum(a, 1, keepdims=True) + self.betas\n a = a / ac_den[i] # normalize row-wise\n a = a * ow # apply observation weights\n \n L = np.sum(a, 0, keepdims=True).transpose()\n W = (V @ a) * Qt\n\n b = L * Q # weights, b\n mW = np.sum(W, 1, keepdims=True) # mean of W\n mX = X @ b # mean of X\n z = L.transpose() @ Q # sumOfWeights\n P = (X @ W.transpose()) - (mX @ mW.transpose()) / z\n\n # Compute R and t\n uu, _, vv = svd(P)\n \n vv = vv.transpose() # Note: v is transposed compared to matlab's svd()\n S = np.diag([1, 1, det(uu @ vv)]).astype('float32')\n R = uu @ S @ vv.transpose()\n R = R\n t = (mX - R @ mW) / z\n TV = R @ V + t # transform V\n\n a_s[i] = a\n Ls[i] = L\n Rs[i] = R\n ts[i] = t\n TV2s[i] = TV\n\n TVs = TV2s\n\n # Update X\n\n den = Ls[0].copy()\n for L in Ls[1:]:\n den += L\n den = den.transpose()\n \n if self.fix_cluster_pos_iter < current_iter:\n X = TVs[0] @ a_s[0]\n for TV, a in zip(TVs[1:], a_s[1:]):\n X += TV @ a\n X = X / den\n # Update Q\n \n ds2 = [sqe(TV, X) for TV in TVs]\n wn = np.sum(a_s[0] * ds2[0], 0, keepdims=True)\n\n for distances, a in zip(ds2[1:], a_s[1:]):\n wn += np.sum(a * distances, 0, keepdims=True)\n\n Q = (3 * den / (wn + 3 * den * self.epsilon)).transpose()\n\n if features:\n for j, fd in enumerate(feature_distr):\n ac_sum = np.zeros(fd.shape)\n indlist = np.arange(0, num_features)\n \n # Update feature distributions\n for i, (TV, V, ow) in enumerate(zip(TVs, Vs, ows)):\n normed = features[j][i] / ac_den[i]\n ac_sum = ac_sum + normed.transpose(1, 0) @ (ow * ap[i] * list_prod(pyz_feature[i][indlist != j]))\n\n fd = np.multiply(fd, ac_sum)\n fd = fd / (np.sum(fd, axis=0) + 0.0000001)\n feature_distr[j] = fd\n \n return a_s, Ls, Rs, ts, TVs, X, Q, den, feature_distr, ds2\n\n"
] | [
[
"numpy.abs",
"numpy.arange",
"numpy.cos",
"numpy.ones",
"numpy.random.gamma",
"numpy.mod",
"numpy.array",
"numpy.sum"
],
[
"numpy.sqrt",
"numpy.ndarray",
"numpy.concatenate",
"numpy.max",
"numpy.mean",
"numpy.random.randn",
"numpy.exp",
"numpy.where",
"numpy.linalg.svd",
"numpy.arange",
"numpy.eye",
"numpy.stack",
"numpy.linalg.det",
"numpy.zeros",
"numpy.multiply",
"numpy.min",
"numpy.power",
"numpy.sum",
"numpy.linalg.norm",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
oscarjalnefjord/bruker2nifti | [
"3c9ab1a7770435e12a438d3833d54ca40cf16a35"
] | [
"bruker2nifti/_cores.py"
] | [
"import os\nimport nibabel as nib\nimport sys\nimport numpy as np\nimport warnings\n\nfrom os.path import join as jph\n\nfrom bruker2nifti._getters import get_list_scans, nifti_getter\nfrom bruker2nifti._utils import (\n bruker_read_files,\n normalise_b_vect,\n from_dict_to_txt_sorted,\n set_new_data,\n apply_reorientation_to_b_vects,\n obtain_b_vectors_orient_matrix,\n)\n\n\ndef scan2struct(\n pfo_scan,\n correct_slope=True,\n correct_offset=True,\n sample_upside_down=False,\n nifti_version=1,\n qform_code=1,\n sform_code=2,\n get_acqp=False,\n get_method=False,\n get_reco=False,\n frame_body_as_frame_head=False,\n keep_same_det=True,\n consider_subject_position=False,\n):\n \"\"\"\n The core method of the converter has 2 parts.\n 1) parsing the Bruker scan folder structure into an internal dictionary called struct.\n 2) writing the information parsed in struct into folders.\n ----\n scan2struct is the first part of the bridge. Info required to fill nifti header are in the visu_pars file.\n The user may want to parse as well acqp, method (must when EpiDti) and reco parameter files.\n Data are parsed in the intermediate dictionary struct containing the final scan(s) converted in nibabel\n image, with additional infos.\n :param pfo_scan: path to folder containing the scan\n :param correct_slope: [True] if you want to correct the slope of the values.\n :param correct_offset: [True] if you want to correct the offset of the values.\n :param sample_upside_down: [False] if you want to have the sample rotated 180 around the Anterior-Posterior axis.\n :param nifti_version: [1] output nifti version can be version 1 or version 2 (see nibabel documentation)\n :param qform_code: [1] qform of the final nifti image\n :param sform_code: [2] sform of the final nifti image\n :param get_acqp: [False] if you want to parse the information in the acqp parameter file of the bruker raw data\n :param get_method: [False] if you want to parse the information in the method file. Forced to True when\n dealing with diffusion weighted images.\n :param get_reco: [False] if you want to parse the information in the reco parameter file.\n :param frame_body_as_frame_head: e.g. true if monkey, false if rat.\n :param keep_same_det: impose to have in the nifti affine matrix, the same determinat as in the bruker parameter.\n :param consider_subject_position : visu_pars SubjPosition can be 'Head_Prone' or 'Head_Supine'. While it may\n make sense in most cases to take this value into account, in some other it may not, as it is\n tuned to switch from radiological to neurological coordinate systems in a work-around.\n If the subject is Prone and the technician wants to have the coordinates\n in neurological he/she can consciously set the variable vc_subject_position to 'Head_Supine'.\n :return: output_data data structure containing the nibabel image(s) {nib_list, visu_pars_list, acqp, method, reco}\n \"\"\"\n\n if not os.path.isdir(pfo_scan):\n raise IOError(\"Input folder does not exists.\")\n\n # Get system endian_nes\n system_endian_nes = sys.byteorder\n\n # Get sub-scans series in the same experiment.\n list_sub_scans = get_list_scans(jph(pfo_scan, \"pdata\"))\n\n if not list_sub_scans:\n warn_msg = (\n \"\\nNo sub scan in the folder structure: \\n{}. \\nAre you sure the input folder contains a \"\n \"proper Bruker scan?\\n\".format(jph(pfo_scan, \"pdata\"))\n )\n warnings.warn(warn_msg)\n return None\n\n nib_scans_list = []\n visu_pars_list = []\n\n for id_sub_scan in list_sub_scans:\n\n visu_pars = bruker_read_files(\"visu_pars\", pfo_scan, sub_scan_num=id_sub_scan)\n\n if visu_pars == {}:\n warn_msg = (\n \"\\nNo 'visu_pars' data found here: \\n{}. \\nAre you sure the input folder contains a \"\n \"proper Bruker scan?\\n\".format(jph(pfo_scan, \"pdata\", id_sub_scan))\n )\n warnings.warn(warn_msg)\n return None\n\n # In some cases we cannot deal with, VisuPars['VisuCoreSize'] can be a float. No conversion in this case.\n if not (\n isinstance(visu_pars[\"VisuCoreSize\"], np.ndarray)\n or isinstance(visu_pars[\"VisuCoreSize\"], list)\n ):\n warn_msg = (\n \"\\nWarning, VisuCoreSize in VisuPars parameter file {} \\n\"\n \"is not a list or a vector in. The study cannot be converted.\"\n \" \\n\".format(jph(pfo_scan, \"pdata\", id_sub_scan))\n )\n warnings.warn(warn_msg)\n return None\n\n # Get data endian_nes - default big!!\n if visu_pars[\"VisuCoreByteOrder\"] == \"littleEndian\":\n data_endian_ness = \"little\"\n elif visu_pars[\"VisuCoreByteOrder\"] == \"bigEndian\":\n data_endian_ness = \"big\"\n else:\n data_endian_ness = \"big\"\n\n # Get datatype\n if visu_pars[\"VisuCoreWordType\"] == \"_32BIT_SGN_INT\":\n dt = np.int32\n elif visu_pars[\"VisuCoreWordType\"] == \"_16BIT_SGN_INT\":\n dt = np.int16\n elif visu_pars[\"VisuCoreWordType\"] == \"_8BIT_UNSGN_INT\":\n dt = np.uint8\n elif visu_pars[\"VisuCoreWordType\"] == \"_32BIT_FLOAT\":\n dt = np.float32\n else:\n raise IOError(\"Unknown data type for VisuPars VisuCoreWordType\")\n\n # GET IMAGE VOLUME\n if os.path.exists(jph(pfo_scan, \"pdata\", id_sub_scan, \"2dseq\")):\n img_data_vol = np.copy(\n np.fromfile(jph(pfo_scan, \"pdata\", id_sub_scan, \"2dseq\"), dtype=dt)\n )\n else:\n warn_msg = (\n \"\\nNo '2dseq' data found here: \\n{}. \\nAre you sure the input folder contains a \"\n \"proper Bruker scan?\\n\".format(jph(pfo_scan, \"pdata\", id_sub_scan))\n )\n warnings.warn(warn_msg)\n return None\n\n if not data_endian_ness == system_endian_nes:\n img_data_vol.byteswap(True)\n\n if \"VisuAcqSequenceName\" in visu_pars.keys():\n visu_pars_acq_sequence_name = visu_pars[\"VisuAcqSequenceName\"]\n else:\n visu_pars_acq_sequence_name = \"\"\n\n is_dwi = \"dtiepi\" in visu_pars_acq_sequence_name.lower()\n\n if is_dwi:\n # Force to not correcting the slope, if true. Diffusion weighted images must be slope corrected before the\n # DTI analysis. They will be to heavy otherwise.\n correct_slope = False\n correct_offset = False\n # Force method to be parsed. Useful infos in this file to process the DWI.\n get_method = True\n\n # ------------------------------------------------------ #\n # ------ Generate the nifti image using visu_pars. ----- #\n # ------------------------------------------------------ #\n\n nib_im = nifti_getter(\n img_data_vol,\n visu_pars,\n correct_slope,\n correct_offset,\n sample_upside_down,\n nifti_version,\n qform_code,\n sform_code,\n frame_body_as_frame_head=frame_body_as_frame_head,\n keep_same_det=keep_same_det,\n consider_subject_position=consider_subject_position,\n )\n # ------------------------------------------------------ #\n # ------------------------------------------------------ #\n\n nib_scans_list.append(nib_im)\n visu_pars_list.append(visu_pars)\n\n # -- Get additional data\n\n # Get information from method, if it exists. Parse Method parameter and erase the dictionary if unwanted\n method = bruker_read_files(\"method\", pfo_scan)\n\n if method == {}:\n print(\"Warning: No 'method' file to parse.\")\n if \"Method\" in method.keys():\n acquisition_method = (\n method[\"Method\"].replace(\"<\", \"\").replace(\">\", \"\").split(\":\")[-1]\n )\n else:\n acquisition_method = \"\"\n\n if not get_method:\n method = {}\n\n # Get information from acqp, reco, if they exist.\n acqp = {}\n reco = {}\n\n if get_acqp:\n acqp = bruker_read_files(\"acqp\", pfo_scan)\n if acqp == {}:\n print(\"Warning: No 'acqp' file to parse.\")\n\n if get_reco:\n reco = bruker_read_files(\"reco\", pfo_scan)\n if reco == {}:\n print(\"Warning: No 'method' file to parse.\")\n\n # -- Return data structure\n struct_scan = {\n \"nib_scans_list\": nib_scans_list,\n \"visu_pars_list\": visu_pars_list,\n \"acqp\": acqp,\n \"reco\": reco,\n \"method\": method,\n \"acquisition_method\": acquisition_method,\n }\n\n return struct_scan\n\n\ndef write_struct(\n bruker_struct,\n pfo_output,\n fin_scan=\"\",\n save_human_readable=True,\n save_b0_if_dwi=True,\n verbose=1,\n frame_body_as_frame_head=False,\n keep_same_det=True,\n consider_subject_position=False,\n):\n \"\"\"\n The core method of the converter has 2 parts.\n 1) parsing the Bruker scan folder structure into an internal dictionary called struct.\n 2) writing the information parsed in struct into folders.\n -------\n write_struct is the second part of the bridge -\n :param bruker_struct: output of scan2struct\n :param pfo_output: path-to-folder where the converted structure will be saved.\n :param fin_scan: filename of the scan\n :param save_human_readable: output data will be saved in .txt other than in numpy format.\n :param save_b0_if_dwi: save the first time-point if the data is a DWI.\n :param verbose:\n :param frame_body_as_frame_head: according to the animal. If True monkey, if False rat-rabbit\n :param keep_same_det: force the initial determinant to be the same as the final one\n :param consider_subject_position: Attribute manually set, or left blank, by the lab experts. False by default\n :return: save the bruker_struct parsed in scan2struct in the specified folder, with the specified parameters.\n \"\"\"\n\n if not os.path.isdir(pfo_output):\n raise IOError(\"Output folder does not exist.\")\n\n if bruker_struct is None:\n return\n\n if not len(bruker_struct[\"visu_pars_list\"]) == len(bruker_struct[\"nib_scans_list\"]):\n raise IOError(\n \"Visu pars list and scans list have a different number of elements.\"\n )\n\n if fin_scan is None:\n fin_scan = \"\"\n\n # -- WRITE Additional data shared by all the sub-scans:\n # if the modality is a DtiEpi or Dwimage then save the DW directions, b values and b vectors in separate csv .txt.\n\n is_dwi = (\n \"dtiepi\" in bruker_struct[\"visu_pars_list\"][0][\"VisuAcqSequenceName\"].lower()\n or \"dwi\" in bruker_struct[\"visu_pars_list\"][0][\"VisuAcqSequenceName\"].lower()\n )\n\n if (\n is_dwi\n ): # File method is the same for each sub-scan. Cannot embed this in the next for cycle.\n\n # -- Deals with b-vector: normalise, reorient and save in external .npy/txt.\n dw_grad_vec = bruker_struct[\"method\"][\"DwGradVec\"]\n\n assert dw_grad_vec.shape[0] == bruker_struct[\"method\"][\"DwNDiffExp\"]\n\n # get b-vectors re-orientation matrix from visu-pars\n reorientation_matrix = obtain_b_vectors_orient_matrix(\n bruker_struct[\"visu_pars_list\"][0][\"VisuCoreOrientation\"],\n bruker_struct[\"visu_pars_list\"][0][\"VisuSubjectPosition\"],\n frame_body_as_frame_head=frame_body_as_frame_head,\n keep_same_det=keep_same_det,\n consider_subject_position=consider_subject_position,\n )\n\n # apply reorientation\n dw_grad_vec = apply_reorientation_to_b_vects(reorientation_matrix, dw_grad_vec)\n # normalise:\n dw_grad_vec = normalise_b_vect(dw_grad_vec)\n\n np.save(jph(pfo_output, fin_scan + \"_DwGradVec.npy\"), dw_grad_vec)\n\n if save_human_readable:\n np.savetxt(\n jph(pfo_output, fin_scan + \"_DwGradVec.txt\"), dw_grad_vec, fmt=\"%.14f\"\n )\n\n if verbose > 0:\n msg = \"Diffusion weighted directions saved in \" + jph(\n pfo_output, fin_scan + \"_DwDir.npy\"\n )\n print(msg)\n\n b_vals = bruker_struct[\"method\"][\"DwEffBval\"]\n b_vects = bruker_struct[\"method\"][\"DwDir\"]\n\n np.save(jph(pfo_output, fin_scan + \"_DwEffBval.npy\"), b_vals)\n np.save(jph(pfo_output, fin_scan + \"_DwDir.npy\"), b_vects)\n\n if save_human_readable:\n np.savetxt(\n jph(pfo_output, fin_scan + \"_DwEffBval.txt\"), b_vals, fmt=\"%.14f\"\n )\n np.savetxt(jph(pfo_output, fin_scan + \"_DwDir.txt\"), b_vects, fmt=\"%.14f\")\n\n if verbose > 0:\n print(\n \"B-vectors saved in {}\".format(\n jph(pfo_output, fin_scan + \"_DwEffBval.npy\")\n )\n )\n print(\n \"B-values saved in {}\".format(\n jph(pfo_output, fin_scan + \"_DwGradVec.npy\")\n )\n )\n\n # save the dictionary as numpy array containing the corresponding dictionaries\n # TODO use pickle instead of numpy to save the dictionaries(?)\n\n if not bruker_struct[\"acqp\"] == {}:\n np.save(jph(pfo_output, fin_scan + \"_acqp.npy\"), bruker_struct[\"acqp\"])\n if save_human_readable:\n from_dict_to_txt_sorted(\n bruker_struct[\"acqp\"], jph(pfo_output, fin_scan + \"_acqp.txt\")\n )\n if not bruker_struct[\"method\"] == {}:\n np.save(jph(pfo_output, fin_scan + \"_method.npy\"), bruker_struct[\"method\"])\n if save_human_readable:\n from_dict_to_txt_sorted(\n bruker_struct[\"method\"], jph(pfo_output, fin_scan + \"_method.txt\")\n )\n if not bruker_struct[\"reco\"] == {}:\n np.save(jph(pfo_output, fin_scan + \"_reco.npy\"), bruker_struct[\"reco\"])\n if save_human_readable:\n from_dict_to_txt_sorted(\n bruker_struct[\"reco\"], jph(pfo_output, fin_scan + \"_reco.txt\")\n )\n\n # Visu_pars and summary info for each sub-scan:\n summary_info = {}\n\n for i in range(len(bruker_struct[\"visu_pars_list\"])):\n\n if len(bruker_struct[\"nib_scans_list\"]) > 1:\n i_label = \"_subscan_\" + str(i) + \"_\"\n else:\n i_label = \"_\"\n\n # A) Save visu_pars for each sub-scan:\n np.save(\n jph(pfo_output, fin_scan + i_label + \"visu_pars.npy\"),\n bruker_struct[\"visu_pars_list\"][i],\n )\n\n # B) Save single slope data for each sub-scan (from visu_pars):\n np.save(\n jph(pfo_output, fin_scan + i_label + \"slope.npy\"),\n bruker_struct[\"visu_pars_list\"][i][\"VisuCoreDataSlope\"],\n )\n\n # A and B) save them both in .txt if human readable version of data is required.\n if save_human_readable:\n from_dict_to_txt_sorted(\n bruker_struct[\"visu_pars_list\"][i],\n jph(pfo_output, fin_scan + i_label + \"visu_pars.txt\"),\n )\n\n slope = bruker_struct[\"visu_pars_list\"][i][\"VisuCoreDataSlope\"]\n if not isinstance(slope, np.ndarray):\n slope = np.atleast_2d(slope)\n np.savetxt(\n jph(pfo_output, fin_scan + i_label + \"slope.txt\"), slope, fmt=\"%.14f\"\n )\n\n # Update summary dictionary:\n summary_info_i = {\n i_label[1:]\n + \"visu_pars['VisuUid']\": bruker_struct[\"visu_pars_list\"][i][\"VisuUid\"],\n i_label[1:]\n + \"visu_pars['VisuCoreDataSlope']\": bruker_struct[\"visu_pars_list\"][i][\n \"VisuCoreDataSlope\"\n ],\n i_label[1:]\n + \"visu_pars['VisuCoreSize']\": bruker_struct[\"visu_pars_list\"][i][\n \"VisuCoreSize\"\n ],\n i_label[1:]\n + \"visu_pars['VisuCoreOrientation']\": bruker_struct[\"visu_pars_list\"][i][\n \"VisuCoreOrientation\"\n ],\n i_label[1:]\n + \"visu_pars['VisuCorePosition']\": bruker_struct[\"visu_pars_list\"][i][\n \"VisuCorePosition\"\n ],\n }\n\n if len(list(bruker_struct[\"visu_pars_list\"][i][\"VisuCoreExtent\"])) == 2:\n # equivalent to struct['method']['SpatDimEnum'] == '2D':\n if \"VisuCoreSlicePacksSlices\" in bruker_struct[\"visu_pars_list\"][i].keys():\n summary_info_i.update(\n {\n i_label[1:]\n + \"visu_pars['VisuCoreSlicePacksSlices']\": bruker_struct[\n \"visu_pars_list\"\n ][i][\"VisuCoreSlicePacksSlices\"]\n }\n )\n\n if (\n len(list(bruker_struct[\"visu_pars_list\"][i][\"VisuCoreExtent\"])) == 3\n and \"VisuCoreDiskSliceOrder\" in bruker_struct[\"visu_pars_list\"][i].keys()\n ):\n # first part equivalent to struct['method']['SpatDimEnum'] == '3D':\n summary_info_i.update(\n {\n i_label[1:]\n + \"visu_pars['VisuCoreDiskSliceOrder']\": bruker_struct[\n \"visu_pars_list\"\n ][i][\"VisuCoreDiskSliceOrder\"]\n }\n )\n\n if \"VisuCreatorVersion\" in bruker_struct[\"visu_pars_list\"][i].keys():\n summary_info_i.update(\n {\n i_label[1:]\n + \"visu_pars['VisuCreatorVersion']\": bruker_struct[\n \"visu_pars_list\"\n ][i][\"VisuCreatorVersion\"]\n }\n )\n\n summary_info.update(summary_info_i)\n\n # WRITE NIFTI IMAGES:\n\n if isinstance(bruker_struct[\"nib_scans_list\"][i], list):\n # the scan had sub-volumes embedded. they are saved separately\n for sub_vol_id, subvol in enumerate(bruker_struct[\"nib_scans_list\"][i]):\n\n if fin_scan == \"\":\n pfi_scan = jph(\n pfo_output,\n \"scan\"\n + i_label[:-1]\n + \"_subvol_\"\n + str(sub_vol_id)\n + \".nii.gz\",\n )\n else:\n pfi_scan = jph(\n pfo_output,\n fin_scan\n + i_label[:-1]\n + \"_subvol_\"\n + str(sub_vol_id)\n + \".nii.gz\",\n )\n\n nib.save(subvol, pfi_scan)\n\n else:\n\n if fin_scan == \"\":\n pfi_scan = jph(pfo_output, \"scan\" + i_label[:-1] + \".nii.gz\")\n else:\n pfi_scan = jph(pfo_output, fin_scan + i_label[:-1] + \".nii.gz\")\n\n nib.save(bruker_struct[\"nib_scans_list\"][i], pfi_scan)\n\n if save_b0_if_dwi and is_dwi:\n # save the b0, first slice alone. Optimized if you have\n # NiftiSeg (http://cmictig.cs.ucl.ac.uk/wiki/index.php/NiftySeg) installed\n\n if fin_scan == \"\":\n pfi_scan_b0 = jph(pfo_output, \"scan\" + i_label[:-1] + \"_b0.nii.gz\")\n else:\n pfi_scan_b0 = jph(\n pfo_output, fin_scan + i_label[:-1] + \"_b0.nii.gz\"\n )\n\n nib.save(\n set_new_data(\n bruker_struct[\"nib_scans_list\"][i],\n bruker_struct[\"nib_scans_list\"][i].get_data()[..., 0],\n ),\n pfi_scan_b0,\n )\n if verbose > 0:\n msg = \"b0 scan saved alone in \" + pfi_scan_b0\n print(msg)\n\n # complete the summary info with additional information from other parameter files, if required:\n\n if not bruker_struct[\"acqp\"] == {}:\n\n summary_info_acqp = {\n \"acqp['ACQ_sw_version']\": bruker_struct[\"acqp\"][\"ACQ_sw_version\"],\n \"acqp['NR']\": bruker_struct[\"acqp\"][\"NR\"],\n \"acqp['NI']\": bruker_struct[\"acqp\"][\"NI\"],\n \"acqp['ACQ_n_echo_images']\": bruker_struct[\"acqp\"][\"ACQ_n_echo_images\"],\n \"acqp['ACQ_slice_thick']\": bruker_struct[\"acqp\"][\"ACQ_slice_thick\"],\n }\n summary_info.update(summary_info_acqp)\n\n if not bruker_struct[\"method\"] == {}:\n\n summary_info_method = {\n \"method['SpatDimEnum']\": bruker_struct[\"method\"][\"SpatDimEnum\"],\n \"method['Matrix']\": bruker_struct[\"method\"][\"Matrix\"],\n \"method['SpatResol']\": bruker_struct[\"method\"][\"SpatResol\"],\n \"method['Method']\": bruker_struct[\"method\"][\"Method\"],\n \"method['SPackArrSliceOrient']\": bruker_struct[\"method\"][\n \"SPackArrSliceOrient\"\n ],\n \"method['SPackArrReadOrient']\": bruker_struct[\"method\"][\n \"SPackArrReadOrient\"\n ],\n }\n summary_info.update(summary_info_method)\n\n if not bruker_struct[\"reco\"] == {}:\n\n summary_info_reco = {\n \"reco['RECO_size']\": bruker_struct[\"reco\"][\"RECO_size\"],\n \"reco['RECO_inp_order']\": bruker_struct[\"reco\"][\"RECO_inp_order\"],\n }\n summary_info.update(summary_info_reco)\n\n # Finally summary info with the updated information.\n from_dict_to_txt_sorted(summary_info, jph(pfo_output, fin_scan + \"_summary.txt\"))\n\n # Get the method name in a single .txt file:\n if bruker_struct[\"acquisition_method\"] is not \"\":\n text_file = open(jph(pfo_output, \"acquisition_method.txt\"), \"w+\")\n text_file.write(bruker_struct[\"acquisition_method\"])\n text_file.close()\n"
] | [
[
"numpy.atleast_2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.